From: degomme Date: Mon, 6 Mar 2017 13:10:23 +0000 (+0100) Subject: Merge branch 'smpi_cpp' X-Git-Tag: v3_15~218 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/ca2e418072d73461d9c4f1e39e77c9f7380eb3fd?hp=a10acf1eeceb5945d636e9210f2e0d39b235d49b Merge branch 'smpi_cpp' --- diff --git a/include/smpi/forward.hpp b/include/smpi/forward.hpp new file mode 100644 index 0000000000..e24b1f0682 --- /dev/null +++ b/include/smpi/forward.hpp @@ -0,0 +1,32 @@ +/* Copyright (c) 2016. The SimGrid Team. All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#ifndef SIMGRID_SMPI_FORWARD_HPP +#define SIMGRID_SMPI_FORWARD_HPP + + +#ifdef __cplusplus + +#include +namespace simgrid { +namespace SMPI { + +class Group; +class Comm; + +} +} + +typedef simgrid::SMPI::Group SMPI_Group; +typedef simgrid::SMPI::Comm SMPI_Comm; + +#else + +typedef struct SMPI_Group SMPI_Group; +typedef struct SMPI_Comm SMPI_Comm; + +#endif + +#endif diff --git a/include/smpi/smpi.h b/include/smpi/smpi.h index d87e94dabb..6214488aa5 100644 --- a/include/smpi/smpi.h +++ b/include/smpi/smpi.h @@ -18,6 +18,8 @@ #include #include "simgrid/datatypes.h" +#include "include/smpi/forward.hpp" + #ifdef _WIN32 #define MPI_CALL(type,name,args) \ type name args; \ @@ -363,16 +365,14 @@ XBT_PUBLIC_DATA( MPI_Op ) MPI_REPLACE; struct s_smpi_mpi_topology; typedef struct s_smpi_mpi_topology *MPI_Topology; - -struct s_smpi_mpi_group; -typedef struct s_smpi_mpi_group *MPI_Group; + +typedef SMPI_Group* MPI_Group; #define MPI_GROUP_NULL ((MPI_Group)NULL) XBT_PUBLIC_DATA( MPI_Group ) MPI_GROUP_EMPTY; -struct s_smpi_mpi_communicator; -typedef struct s_smpi_mpi_communicator *MPI_Comm; +typedef SMPI_Comm *MPI_Comm; #define MPI_COMM_NULL ((MPI_Comm)NULL) XBT_PUBLIC_DATA( MPI_Comm ) MPI_COMM_WORLD; diff --git a/src/smpi/colls/allgather-2dmesh.c b/src/smpi/colls/allgather-2dmesh.cpp similarity index 98% rename from src/smpi/colls/allgather-2dmesh.c rename to src/smpi/colls/allgather-2dmesh.cpp index 6bec72d967..7452654f66 100644 --- a/src/smpi/colls/allgather-2dmesh.c +++ b/src/smpi/colls/allgather-2dmesh.cpp @@ -118,8 +118,8 @@ smpi_coll_tuned_allgather_2dmesh(void *send_buff, int send_count, MPI_Datatype int my_row_base, my_col_base, src_row_base, block_size, num_reqs; int tag = COLL_TAG_ALLGATHER; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); extent = smpi_datatype_get_extent(send_type); diff --git a/src/smpi/colls/allgather-3dmesh.c b/src/smpi/colls/allgather-3dmesh.cpp similarity index 99% rename from src/smpi/colls/allgather-3dmesh.c rename to src/smpi/colls/allgather-3dmesh.cpp index ab964a1388..7e57df13d6 100644 --- a/src/smpi/colls/allgather-3dmesh.c +++ b/src/smpi/colls/allgather-3dmesh.cpp @@ -105,8 +105,8 @@ int smpi_coll_tuned_allgather_3dmesh(void *send_buff, int send_count, int two_dsize, my_row_base, my_col_base, src_row_base, src_z_base, num_reqs; int tag = COLL_TAG_ALLGATHER; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); extent = smpi_datatype_get_extent(send_type); if (!is_3dmesh(num_procs, &X, &Y, &Z)) diff --git a/src/smpi/colls/allgather-GB.c b/src/smpi/colls/allgather-GB.cpp similarity index 95% rename from src/smpi/colls/allgather-GB.c rename to src/smpi/colls/allgather-GB.cpp index b1bcc881d2..054947a26a 100644 --- a/src/smpi/colls/allgather-GB.c +++ b/src/smpi/colls/allgather-GB.cpp @@ -13,7 +13,7 @@ int smpi_coll_tuned_allgather_GB(void *send_buff, int send_count, MPI_Comm comm) { int num_procs; - num_procs = smpi_comm_size(comm); + num_procs = comm->size(); mpi_coll_gather_fun(send_buff, send_count, send_type, recv_buff, recv_count, recv_type, 0, comm); mpi_coll_bcast_fun(recv_buff, (recv_count * num_procs), recv_type, 0, comm); diff --git a/src/smpi/colls/allgather-NTSLR-NB.c b/src/smpi/colls/allgather-NTSLR-NB.cpp similarity index 97% rename from src/smpi/colls/allgather-NTSLR-NB.c rename to src/smpi/colls/allgather-NTSLR-NB.cpp index 2cabbcd81a..cfb643f35b 100644 --- a/src/smpi/colls/allgather-NTSLR-NB.c +++ b/src/smpi/colls/allgather-NTSLR-NB.cpp @@ -18,8 +18,8 @@ smpi_coll_tuned_allgather_NTSLR_NB(void *sbuf, int scount, MPI_Datatype stype, int send_offset, recv_offset; int tag = COLL_TAG_ALLGATHER; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); rextent = smpi_datatype_get_extent(rtype); sextent = smpi_datatype_get_extent(stype); MPI_Request *rrequest_array; diff --git a/src/smpi/colls/allgather-NTSLR.c b/src/smpi/colls/allgather-NTSLR.cpp similarity index 96% rename from src/smpi/colls/allgather-NTSLR.c rename to src/smpi/colls/allgather-NTSLR.cpp index 659be11bba..234ceeac56 100644 --- a/src/smpi/colls/allgather-NTSLR.c +++ b/src/smpi/colls/allgather-NTSLR.cpp @@ -18,8 +18,8 @@ smpi_coll_tuned_allgather_NTSLR(void *sbuf, int scount, MPI_Datatype stype, int send_offset, recv_offset; int tag = COLL_TAG_ALLGATHER; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); rextent = smpi_datatype_get_extent(rtype); sextent = smpi_datatype_get_extent(stype); diff --git a/src/smpi/colls/allgather-SMP-NTS.c b/src/smpi/colls/allgather-SMP-NTS.cpp similarity index 95% rename from src/smpi/colls/allgather-SMP-NTS.c rename to src/smpi/colls/allgather-SMP-NTS.cpp index d4838ed4a6..f64ca508a9 100644 --- a/src/smpi/colls/allgather-SMP-NTS.c +++ b/src/smpi/colls/allgather-SMP-NTS.cpp @@ -12,8 +12,8 @@ int smpi_coll_tuned_allgather_SMP_NTS(void *sbuf, int scount, MPI_Comm comm) { int src, dst, comm_size, rank; - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); MPI_Aint rextent, sextent; rextent = smpi_datatype_get_extent(rtype); sextent = smpi_datatype_get_extent(stype); @@ -22,12 +22,12 @@ int smpi_coll_tuned_allgather_SMP_NTS(void *sbuf, int scount, int i, send_offset, recv_offset; int intra_rank, inter_rank; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } diff --git a/src/smpi/colls/allgather-bruck.c b/src/smpi/colls/allgather-bruck.cpp similarity index 98% rename from src/smpi/colls/allgather-bruck.c rename to src/smpi/colls/allgather-bruck.cpp index e0424e15e1..aaab5e38de 100644 --- a/src/smpi/colls/allgather-bruck.c +++ b/src/smpi/colls/allgather-bruck.cpp @@ -87,8 +87,8 @@ int smpi_coll_tuned_allgather_bruck(void *send_buff, int send_count, char *recv_ptr = (char *) recv_buff; // get size of the communicator, followed by rank - num_procs = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + num_procs = comm->size(); + rank = comm->rank(); // get size of single element's type for recv buffer recv_extent = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/allgather-loosely-lr.c b/src/smpi/colls/allgather-loosely-lr.cpp similarity index 94% rename from src/smpi/colls/allgather-loosely-lr.c rename to src/smpi/colls/allgather-loosely-lr.cpp index 74df884b27..8724ddafa1 100644 --- a/src/smpi/colls/allgather-loosely-lr.c +++ b/src/smpi/colls/allgather-loosely-lr.cpp @@ -17,20 +17,20 @@ int smpi_coll_tuned_allgather_loosely_lr(void *sbuf, int scount, int intra_rank, inter_rank, inter_comm_size, intra_comm_size; int inter_dst, inter_src; - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); -if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); +if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } if(comm_size%num_core) THROWF(arg_error,0, "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ",num_core); - rank = smpi_comm_rank(comm); + rank = comm->rank(); MPI_Aint rextent, sextent; rextent = smpi_datatype_get_extent(rtype); sextent = smpi_datatype_get_extent(stype); diff --git a/src/smpi/colls/allgather-mvapich-smp.c b/src/smpi/colls/allgather-mvapich-smp.cpp similarity index 86% rename from src/smpi/colls/allgather-mvapich-smp.c rename to src/smpi/colls/allgather-mvapich-smp.cpp index 150798d5eb..5e79deafad 100644 --- a/src/smpi/colls/allgather-mvapich-smp.c +++ b/src/smpi/colls/allgather-mvapich-smp.cpp @@ -49,34 +49,34 @@ int smpi_coll_tuned_allgather_mvapich2_smp(void *sendbuf,int sendcnt, MPI_Dataty MPI_Aint recvtype_extent = 0; /* Datatype extent */ MPI_Comm shmem_comm, leader_comm; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - if(!smpi_comm_is_uniform(comm) || !smpi_comm_is_blocked(comm)) + if(!comm->is_uniform() || !comm->is_blocked()) THROWF(arg_error,0, "allgather MVAPICH2 smp algorithm can't be used with irregular deployment. Please insure that processes deployed on the same node are contiguous and that each node has the same number of processes"); if (recvcnt == 0) { return MPI_SUCCESS; } - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* extract the rank,size information for the intra-node communicator */ recvtype_extent=smpi_datatype_get_extent(recvtype); - shmem_comm = smpi_comm_get_intra_comm(comm); - local_rank = smpi_comm_rank(shmem_comm); - local_size = smpi_comm_size(shmem_comm); + shmem_comm = comm->get_intra_comm(); + local_rank = shmem_comm->rank(); + local_size = shmem_comm->size(); if (local_rank == 0) { /* Node leader. Extract the rank, size information for the leader communicator */ - leader_comm = smpi_comm_get_leaders_comm(comm); + leader_comm = comm->get_leaders_comm(); if(leader_comm==MPI_COMM_NULL){ leader_comm = MPI_COMM_WORLD; } - leader_comm_size = smpi_comm_size(leader_comm); + leader_comm_size = leader_comm->size(); } /*If there is just one node, after gather itself, @@ -103,17 +103,17 @@ int smpi_coll_tuned_allgather_mvapich2_smp(void *sendbuf,int sendcnt, MPI_Dataty /* Exchange the data between the node leaders*/ if (local_rank == 0 && (leader_comm_size > 1)) { /*When data in each socket is different*/ - if (smpi_comm_is_uniform(comm) != 1) { + if (comm->is_uniform() != 1) { int *displs = NULL; int *recvcnts = NULL; int *node_sizes = NULL; int i = 0; - node_sizes = smpi_comm_get_non_uniform_map(comm); + node_sizes = comm->get_non_uniform_map(); - displs = xbt_malloc(sizeof (int) * leader_comm_size); - recvcnts = xbt_malloc(sizeof (int) * leader_comm_size); + displs = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); + recvcnts = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); if (!displs || !recvcnts) { return MPI_ERR_OTHER; } @@ -126,7 +126,7 @@ int smpi_coll_tuned_allgather_mvapich2_smp(void *sendbuf,int sendcnt, MPI_Dataty } - void* sendbuf=((char*)recvbuf)+smpi_datatype_get_extent(recvtype)*displs[smpi_comm_rank(leader_comm)]; + void* sendbuf=((char*)recvbuf)+smpi_datatype_get_extent(recvtype)*displs[leader_comm->rank()]; mpi_errno = mpi_coll_allgatherv_fun(sendbuf, (recvcnt*local_size), @@ -137,7 +137,7 @@ int smpi_coll_tuned_allgather_mvapich2_smp(void *sendbuf,int sendcnt, MPI_Dataty xbt_free(displs); xbt_free(recvcnts); } else { - void* sendtmpbuf=((char*)recvbuf)+smpi_datatype_get_extent(recvtype)*(recvcnt*local_size)*smpi_comm_rank(leader_comm); + void* sendtmpbuf=((char*)recvbuf)+smpi_datatype_get_extent(recvtype)*(recvcnt*local_size)*leader_comm->rank(); diff --git a/src/smpi/colls/allgather-ompi-neighborexchange.c b/src/smpi/colls/allgather-ompi-neighborexchange.cpp similarity index 99% rename from src/smpi/colls/allgather-ompi-neighborexchange.c rename to src/smpi/colls/allgather-ompi-neighborexchange.cpp index 9a3ccd9ed0..9227d51dff 100644 --- a/src/smpi/colls/allgather-ompi-neighborexchange.c +++ b/src/smpi/colls/allgather-ompi-neighborexchange.cpp @@ -79,8 +79,8 @@ smpi_coll_tuned_allgather_ompi_neighborexchange(void *sbuf, int scount, ptrdiff_t slb, rlb, sext, rext; char *tmpsend = NULL, *tmprecv = NULL; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); if (size % 2) { XBT_DEBUG( diff --git a/src/smpi/colls/allgather-pair.c b/src/smpi/colls/allgather-pair.cpp similarity index 97% rename from src/smpi/colls/allgather-pair.c rename to src/smpi/colls/allgather-pair.cpp index 6111f2820c..1d1d2b0e52 100644 --- a/src/smpi/colls/allgather-pair.c +++ b/src/smpi/colls/allgather-pair.cpp @@ -79,8 +79,8 @@ smpi_coll_tuned_allgather_pair(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - unsigned int rank = smpi_comm_rank(comm); - unsigned int num_procs = smpi_comm_size(comm); + unsigned int rank = comm->rank(); + unsigned int num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "allgather pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/allgather-rdb.c b/src/smpi/colls/allgather-rdb.cpp similarity index 97% rename from src/smpi/colls/allgather-rdb.c rename to src/smpi/colls/allgather-rdb.cpp index 84acf3d99f..1d43ca2e38 100644 --- a/src/smpi/colls/allgather-rdb.c +++ b/src/smpi/colls/allgather-rdb.cpp @@ -21,7 +21,7 @@ smpi_coll_tuned_allgather_rdb(void *sbuf, int send_count, int dst_tree_root, rank_tree_root, last_recv_count = 0, num_procs_completed; int offset, tmp_mask; int tag = COLL_TAG_ALLGATHER; - int mask = 1; + unsigned int mask = 1; int success = 0; int curr_count = recv_count; @@ -30,8 +30,8 @@ smpi_coll_tuned_allgather_rdb(void *sbuf, int send_count, char *recv_ptr = (char *) rbuf; // get size of the communicator, followed by rank - unsigned int num_procs = smpi_comm_size(comm); - unsigned int rank = smpi_comm_rank(comm); + unsigned int num_procs = comm->size(); + unsigned int rank = comm->rank(); // get size of single element's type for send buffer and recv buffer send_chunk = smpi_datatype_get_extent(send_type); diff --git a/src/smpi/colls/allgather-rhv.c b/src/smpi/colls/allgather-rhv.cpp similarity index 96% rename from src/smpi/colls/allgather-rhv.c rename to src/smpi/colls/allgather-rhv.cpp index 3aaf479d9a..08edb89fa2 100644 --- a/src/smpi/colls/allgather-rhv.c +++ b/src/smpi/colls/allgather-rhv.cpp @@ -21,16 +21,16 @@ smpi_coll_tuned_allgather_rhv(void *sbuf, int send_count, int i, dst, send_base_offset, recv_base_offset, send_chunk, recv_chunk, send_offset, recv_offset; int tag = COLL_TAG_ALLGATHER; - int mask; + unsigned int mask; int curr_count; // get size of the communicator, followed by rank - unsigned int num_procs = smpi_comm_size(comm); + unsigned int num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "allgather rhv algorithm can't be used with non power of two number of processes ! "); - unsigned int rank = smpi_comm_rank(comm); + unsigned int rank = comm->rank(); // get size of single element's type for send buffer and recv buffer s_extent = smpi_datatype_get_extent(send_type); diff --git a/src/smpi/colls/allgather-ring.c b/src/smpi/colls/allgather-ring.cpp similarity index 98% rename from src/smpi/colls/allgather-ring.c rename to src/smpi/colls/allgather-ring.cpp index 0e96a53ebb..2a6d84a63d 100644 --- a/src/smpi/colls/allgather-ring.c +++ b/src/smpi/colls/allgather-ring.cpp @@ -78,8 +78,8 @@ smpi_coll_tuned_allgather_ring(void *send_buff, int send_count, char *sendptr = (char *) send_buff; char *recvptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); extent = smpi_datatype_get_extent(send_type); // local send/recv diff --git a/src/smpi/colls/allgather-smp-simple.c b/src/smpi/colls/allgather-smp-simple.cpp similarity index 94% rename from src/smpi/colls/allgather-smp-simple.c rename to src/smpi/colls/allgather-smp-simple.cpp index e54c75b5b9..1db7bc2ff7 100644 --- a/src/smpi/colls/allgather-smp-simple.c +++ b/src/smpi/colls/allgather-smp-simple.cpp @@ -12,20 +12,20 @@ int smpi_coll_tuned_allgather_smp_simple(void *send_buf, int scount, MPI_Comm comm) { int src, dst, comm_size, rank; - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } if(comm_size%num_core) THROWF(arg_error,0, "allgather SMP simple algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core); - rank = smpi_comm_rank(comm); + rank = comm->rank(); MPI_Aint rextent, sextent; rextent = smpi_datatype_get_extent(rtype); sextent = smpi_datatype_get_extent(stype); diff --git a/src/smpi/colls/allgather-spreading-simple.c b/src/smpi/colls/allgather-spreading-simple.cpp similarity index 98% rename from src/smpi/colls/allgather-spreading-simple.c rename to src/smpi/colls/allgather-spreading-simple.cpp index 1f51ea43cf..671f9fcb96 100644 --- a/src/smpi/colls/allgather-spreading-simple.c +++ b/src/smpi/colls/allgather-spreading-simple.cpp @@ -80,8 +80,8 @@ smpi_coll_tuned_allgather_spreading_simple(void *send_buff, int send_count, MPI_Status status; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); extent = smpi_datatype_get_extent(send_type); num_reqs = (2 * num_procs) - 2; diff --git a/src/smpi/colls/allgatherv-GB.c b/src/smpi/colls/allgatherv-GB.cpp similarity index 96% rename from src/smpi/colls/allgatherv-GB.c rename to src/smpi/colls/allgatherv-GB.cpp index 1dd6103c6b..7941bcacf4 100644 --- a/src/smpi/colls/allgatherv-GB.c +++ b/src/smpi/colls/allgatherv-GB.cpp @@ -15,7 +15,7 @@ int smpi_coll_tuned_allgatherv_GB(void *send_buff, int send_count, smpi_mpi_gatherv(send_buff, send_count, send_type, recv_buff, recv_counts, recv_disps, recv_type, 0, comm); int num_procs, i, current, max = 0; - num_procs = smpi_comm_size(comm); + num_procs = comm->size(); for (i = 0; i < num_procs; i++) { current = recv_disps[i] + recv_counts[i]; if (current > max) diff --git a/src/smpi/colls/allgatherv-mpich-rdb.c b/src/smpi/colls/allgatherv-mpich-rdb.cpp similarity index 98% rename from src/smpi/colls/allgatherv-mpich-rdb.c rename to src/smpi/colls/allgatherv-mpich-rdb.cpp index a3614e5f69..b2ccc5d9eb 100644 --- a/src/smpi/colls/allgatherv-mpich-rdb.c +++ b/src/smpi/colls/allgatherv-mpich-rdb.cpp @@ -17,17 +17,17 @@ int smpi_coll_tuned_allgatherv_mpich_rdb ( MPI_Datatype recvtype, MPI_Comm comm) { - int j, i; + unsigned int j, i; MPI_Status status; MPI_Aint recvtype_extent, recvtype_true_extent, recvtype_true_lb; - int curr_cnt, dst, total_count; + unsigned int curr_cnt, dst, total_count; void *tmp_buf, *tmp_buf_rl; unsigned int mask, dst_tree_root, my_tree_root, position, send_offset, recv_offset, last_recv_cnt=0, nprocs_completed, k, offset, tmp_mask, tree_root; - unsigned int comm_size = smpi_comm_size(comm); - unsigned int rank = smpi_comm_rank(comm); + unsigned int comm_size = comm->size(); + unsigned int rank = comm->rank(); total_count = 0; for (i=0; irank(); + comm_size=comm->size(); recvtype_extent= smpi_datatype_get_extent( recvtype); total_count = 0; diff --git a/src/smpi/colls/allgatherv-ompi-bruck.c b/src/smpi/colls/allgatherv-ompi-bruck.cpp similarity index 98% rename from src/smpi/colls/allgatherv-ompi-bruck.c rename to src/smpi/colls/allgatherv-ompi-bruck.cpp index d87dbab0e7..d41691ca3d 100644 --- a/src/smpi/colls/allgatherv-ompi-bruck.c +++ b/src/smpi/colls/allgatherv-ompi-bruck.cpp @@ -91,8 +91,8 @@ int smpi_coll_tuned_allgatherv_ompi_bruck(void *sbuf, int scount, char *tmpsend = NULL, *tmprecv = NULL; MPI_Datatype new_rdtype = MPI_DATATYPE_NULL, new_sdtype = MPI_DATATYPE_NULL; - unsigned int size = smpi_comm_size(comm); - unsigned int rank = smpi_comm_rank(comm); + unsigned int size = comm->size(); + unsigned int rank = comm->rank(); XBT_DEBUG( "coll:tuned:allgather_ompi_bruck rank %d", rank); diff --git a/src/smpi/colls/allgatherv-ompi-neighborexchange.c b/src/smpi/colls/allgatherv-ompi-neighborexchange.cpp similarity index 99% rename from src/smpi/colls/allgatherv-ompi-neighborexchange.c rename to src/smpi/colls/allgatherv-ompi-neighborexchange.cpp index edf93c32cc..7392c4b73f 100644 --- a/src/smpi/colls/allgatherv-ompi-neighborexchange.c +++ b/src/smpi/colls/allgatherv-ompi-neighborexchange.cpp @@ -82,8 +82,8 @@ smpi_coll_tuned_allgatherv_ompi_neighborexchange(void *sbuf, int scount, char *tmpsend = NULL, *tmprecv = NULL; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); if (size % 2) { XBT_DEBUG( diff --git a/src/smpi/colls/allgatherv-pair.c b/src/smpi/colls/allgatherv-pair.cpp similarity index 98% rename from src/smpi/colls/allgatherv-pair.c rename to src/smpi/colls/allgatherv-pair.cpp index b6eea73379..e5a5ac60b0 100644 --- a/src/smpi/colls/allgatherv-pair.c +++ b/src/smpi/colls/allgatherv-pair.cpp @@ -79,8 +79,8 @@ smpi_coll_tuned_allgatherv_pair(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - unsigned int rank = smpi_comm_rank(comm); - unsigned int num_procs = smpi_comm_size(comm); + unsigned int rank = comm->rank(); + unsigned int num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "allgatherv pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/allgatherv-ring.c b/src/smpi/colls/allgatherv-ring.cpp similarity index 98% rename from src/smpi/colls/allgatherv-ring.c rename to src/smpi/colls/allgatherv-ring.cpp index 8ac68b76f5..b21f652dee 100644 --- a/src/smpi/colls/allgatherv-ring.c +++ b/src/smpi/colls/allgatherv-ring.cpp @@ -78,8 +78,8 @@ smpi_coll_tuned_allgatherv_ring(void *send_buff, int send_count, char *sendptr = (char *) send_buff; char *recvptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); extent = smpi_datatype_get_extent(send_type); // local send/recv diff --git a/src/smpi/colls/allreduce-lr.c b/src/smpi/colls/allreduce-lr.cpp similarity index 98% rename from src/smpi/colls/allreduce-lr.c rename to src/smpi/colls/allreduce-lr.cpp index 9bcf75c1fb..61ef576de1 100644 --- a/src/smpi/colls/allreduce-lr.c +++ b/src/smpi/colls/allreduce-lr.cpp @@ -29,8 +29,8 @@ smpi_coll_tuned_allreduce_lr(void *sbuf, void *rbuf, int rcount, int send_offset, recv_offset; int remainder, remainder_flag, remainder_offset; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* make it compatible with all data type */ MPI_Aint extent; diff --git a/src/smpi/colls/allreduce-mvapich-rs.c b/src/smpi/colls/allreduce-mvapich-rs.cpp similarity index 97% rename from src/smpi/colls/allreduce-mvapich-rs.c rename to src/smpi/colls/allreduce-mvapich-rs.cpp index 0bd2dbef6d..177b545e60 100644 --- a/src/smpi/colls/allreduce-mvapich-rs.c +++ b/src/smpi/colls/allreduce-mvapich-rs.cpp @@ -31,9 +31,9 @@ int smpi_coll_tuned_allreduce_mvapich2_rs(void *sendbuf, { int mpi_errno = MPI_SUCCESS; int newrank = 0; - unsigned int mask, pof2; - int dst, is_commutative, rem, newdst, i, - send_idx, recv_idx, last_idx, send_cnt, recv_cnt, *cnts, *disps; + int mask, pof2, i, send_idx, recv_idx, last_idx, send_cnt; + int dst, is_commutative, rem, newdst, + recv_cnt, *cnts, *disps; MPI_Aint true_lb, true_extent, extent; void *tmp_buf, *tmp_buf_free; @@ -43,8 +43,8 @@ int smpi_coll_tuned_allreduce_mvapich2_rs(void *sendbuf, /* homogeneous */ - unsigned int comm_size = smpi_comm_size(comm); - unsigned int rank = smpi_comm_rank(comm); + int comm_size = comm->size(); + int rank = comm->rank(); is_commutative = smpi_op_is_commute(op); diff --git a/src/smpi/colls/allreduce-mvapich-two-level.c b/src/smpi/colls/allreduce-mvapich-two-level.cpp similarity index 94% rename from src/smpi/colls/allreduce-mvapich-two-level.c rename to src/smpi/colls/allreduce-mvapich-two-level.cpp index 90ced9a362..def7b4f9bc 100644 --- a/src/smpi/colls/allreduce-mvapich-two-level.c +++ b/src/smpi/colls/allreduce-mvapich-two-level.cpp @@ -93,8 +93,8 @@ int smpi_coll_tuned_allreduce_mvapich2_two_level(void *sendbuf, if(MV2_Allreduce_function==NULL) MV2_Allreduce_function = smpi_coll_tuned_allreduce_rdb; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } if (count == 0) { @@ -103,12 +103,12 @@ int smpi_coll_tuned_allreduce_mvapich2_two_level(void *sendbuf, smpi_datatype_extent(datatype, &true_lb, &true_extent); - total_size = smpi_comm_size(comm); - shmem_comm = smpi_comm_get_intra_comm(comm); - local_rank = smpi_comm_rank(shmem_comm); - local_size = smpi_comm_size(shmem_comm); + total_size = comm->size(); + shmem_comm = comm->get_intra_comm(); + local_rank = shmem_comm->rank(); + local_size = shmem_comm->size(); - leader_comm = smpi_comm_get_leaders_comm(comm); + leader_comm = comm->get_leaders_comm(); if (local_rank == 0) { if (sendbuf != MPI_IN_PLACE) { diff --git a/src/smpi/colls/allreduce-ompi-ring-segmented.c b/src/smpi/colls/allreduce-ompi-ring-segmented.cpp similarity index 99% rename from src/smpi/colls/allreduce-ompi-ring-segmented.c rename to src/smpi/colls/allreduce-ompi-ring-segmented.cpp index a8ea2ac5c2..cbe09f9b5c 100644 --- a/src/smpi/colls/allreduce-ompi-ring-segmented.c +++ b/src/smpi/colls/allreduce-ompi-ring-segmented.cpp @@ -175,8 +175,8 @@ smpi_coll_tuned_allreduce_ompi_ring_segmented(void *sbuf, void *rbuf, int count, ptrdiff_t block_offset, max_real_segsize; MPI_Request reqs[2] = {NULL, NULL}; const size_t segsize = 1 << 20; /* 1 MB */ - unsigned int size = smpi_comm_size(comm); - unsigned int rank = smpi_comm_rank(comm); + int size = comm->size(); + int rank = comm->rank(); XBT_DEBUG("coll:tuned:allreduce_intra_ring_segmented rank %d, count %d", rank, count); diff --git a/src/smpi/colls/allreduce-rab-rdb.c b/src/smpi/colls/allreduce-rab-rdb.cpp similarity index 96% rename from src/smpi/colls/allreduce-rab-rdb.c rename to src/smpi/colls/allreduce-rab-rdb.cpp index 7499785afb..a4de2245e8 100644 --- a/src/smpi/colls/allreduce-rab-rdb.c +++ b/src/smpi/colls/allreduce-rab-rdb.cpp @@ -11,15 +11,15 @@ int smpi_coll_tuned_allreduce_rab_rdb(void *sbuff, void *rbuff, int count, MPI_Comm comm) { int tag = COLL_TAG_ALLREDUCE; - unsigned int mask, pof2; - int dst, newrank, rem, newdst, i, - send_idx, recv_idx, last_idx, send_cnt, recv_cnt, *cnts, *disps; + unsigned int mask, pof2, i, recv_idx, last_idx, send_idx, send_cnt; + int dst, newrank, rem, newdst, + recv_cnt, *cnts, *disps; MPI_Aint extent; MPI_Status status; void *tmp_buf = NULL; - unsigned int nprocs = smpi_comm_size(comm); - unsigned int rank = smpi_comm_rank(comm); + unsigned int nprocs = comm->size(); + int rank = comm->rank(); extent = smpi_datatype_get_extent(dtype); tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); diff --git a/src/smpi/colls/allreduce-rab1.c b/src/smpi/colls/allreduce-rab1.cpp similarity index 97% rename from src/smpi/colls/allreduce-rab1.c rename to src/smpi/colls/allreduce-rab1.cpp index 173d3e530d..eaf238d64d 100644 --- a/src/smpi/colls/allreduce-rab1.c +++ b/src/smpi/colls/allreduce-rab1.cpp @@ -20,8 +20,8 @@ int smpi_coll_tuned_allreduce_rab1(void *sbuff, void *rbuff, void *recv, *tmp_buf; - unsigned int rank = smpi_comm_rank(comm); - unsigned int nprocs = smpi_comm_size(comm); + int rank = comm->rank(); + unsigned int nprocs = comm->size(); if((nprocs&(nprocs-1))) THROWF(arg_error,0, "allreduce rab1 algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/allreduce-rab2.c b/src/smpi/colls/allreduce-rab2.cpp similarity index 97% rename from src/smpi/colls/allreduce-rab2.c rename to src/smpi/colls/allreduce-rab2.cpp index 372d5e84cc..02f499551a 100644 --- a/src/smpi/colls/allreduce-rab2.c +++ b/src/smpi/colls/allreduce-rab2.cpp @@ -26,8 +26,8 @@ int smpi_coll_tuned_allreduce_rab2(void *sbuff, void *rbuff, uop = op_ptr->op; #endif */ - rank = smpi_comm_rank(comm); - nprocs = smpi_comm_size(comm); + rank = comm->rank(); + nprocs = comm->size(); s_extent = smpi_datatype_get_extent(dtype); diff --git a/src/smpi/colls/allreduce-rdb.c b/src/smpi/colls/allreduce-rdb.cpp similarity index 98% rename from src/smpi/colls/allreduce-rdb.c rename to src/smpi/colls/allreduce-rdb.cpp index 85a31efd54..450b5964d0 100644 --- a/src/smpi/colls/allreduce-rdb.c +++ b/src/smpi/colls/allreduce-rdb.cpp @@ -25,8 +25,8 @@ int smpi_coll_tuned_allreduce_rdb(void *sbuff, void *rbuff, int count, uop = op_ptr->op; #endif */ - nprocs=smpi_comm_size(comm); - rank=smpi_comm_rank(comm); + nprocs=comm->size(); + rank=comm->rank(); smpi_datatype_extent(dtype, &lb, &extent); tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); diff --git a/src/smpi/colls/allreduce-redbcast.c b/src/smpi/colls/allreduce-redbcast.cpp similarity index 100% rename from src/smpi/colls/allreduce-redbcast.c rename to src/smpi/colls/allreduce-redbcast.cpp diff --git a/src/smpi/colls/allreduce-smp-binomial-pipeline.c b/src/smpi/colls/allreduce-smp-binomial-pipeline.cpp similarity index 96% rename from src/smpi/colls/allreduce-smp-binomial-pipeline.c rename to src/smpi/colls/allreduce-smp-binomial-pipeline.cpp index 06a80a7a0c..5af338c33f 100644 --- a/src/smpi/colls/allreduce-smp-binomial-pipeline.c +++ b/src/smpi/colls/allreduce-smp-binomial-pipeline.cpp @@ -48,16 +48,16 @@ int smpi_coll_tuned_allreduce_smp_binomial_pipeline(void *send_buf, int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); MPI_Aint extent; extent = smpi_datatype_get_extent(dtype); tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); diff --git a/src/smpi/colls/allreduce-smp-binomial.c b/src/smpi/colls/allreduce-smp-binomial.cpp similarity index 95% rename from src/smpi/colls/allreduce-smp-binomial.c rename to src/smpi/colls/allreduce-smp-binomial.cpp index 3d159be30f..37829934e4 100644 --- a/src/smpi/colls/allreduce-smp-binomial.c +++ b/src/smpi/colls/allreduce-smp-binomial.cpp @@ -36,17 +36,17 @@ int smpi_coll_tuned_allreduce_smp_binomial(void *send_buf, void *recv_buf, int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } MPI_Status status; - comm_size=smpi_comm_size(comm); - rank=smpi_comm_rank(comm); + comm_size=comm->size(); + rank=comm->rank(); MPI_Aint extent, lb; smpi_datatype_extent(dtype, &lb, &extent); tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); diff --git a/src/smpi/colls/allreduce-smp-rdb.c b/src/smpi/colls/allreduce-smp-rdb.cpp similarity index 95% rename from src/smpi/colls/allreduce-smp-rdb.c rename to src/smpi/colls/allreduce-smp-rdb.cpp index 9770d2993f..b95bf7f443 100644 --- a/src/smpi/colls/allreduce-smp-rdb.c +++ b/src/smpi/colls/allreduce-smp-rdb.cpp @@ -35,12 +35,12 @@ int smpi_coll_tuned_allreduce_smp_rdb(void *send_buf, void *recv_buf, int count, int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } /* #ifdef MPICH2_REDUCTION @@ -52,8 +52,8 @@ int smpi_coll_tuned_allreduce_smp_rdb(void *send_buf, void *recv_buf, int count, uop = op_ptr->op; #endif */ - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); MPI_Aint extent; extent = smpi_datatype_get_extent(dtype); tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); diff --git a/src/smpi/colls/allreduce-smp-rsag-lr.c b/src/smpi/colls/allreduce-smp-rsag-lr.cpp similarity index 96% rename from src/smpi/colls/allreduce-smp-rsag-lr.c rename to src/smpi/colls/allreduce-smp-rsag-lr.cpp index fa9296f2c8..7e3dfefb1d 100644 --- a/src/smpi/colls/allreduce-smp-rsag-lr.c +++ b/src/smpi/colls/allreduce-smp-rsag-lr.cpp @@ -23,12 +23,12 @@ int smpi_coll_tuned_allreduce_smp_rsag_lr(void *send_buf, void *recv_buf, int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } /* #ifdef MPICH2_REDUCTION @@ -40,8 +40,8 @@ int smpi_coll_tuned_allreduce_smp_rsag_lr(void *send_buf, void *recv_buf, uop = op_ptr->op; #endif */ - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); MPI_Aint extent; extent = smpi_datatype_get_extent(dtype); tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); diff --git a/src/smpi/colls/allreduce-smp-rsag-rab.c b/src/smpi/colls/allreduce-smp-rsag-rab.cpp similarity index 96% rename from src/smpi/colls/allreduce-smp-rsag-rab.c rename to src/smpi/colls/allreduce-smp-rsag-rab.cpp index 1ac827e5ed..64c17294a6 100644 --- a/src/smpi/colls/allreduce-smp-rsag-rab.c +++ b/src/smpi/colls/allreduce-smp-rsag-rab.cpp @@ -27,20 +27,20 @@ int smpi_coll_tuned_allreduce_smp_rsag_rab(void *sbuf, void *rbuf, int count, int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); if((comm_size&(comm_size-1))) THROWF(arg_error,0, "allreduce smp rsag rab algorithm can't be used with non power of two number of processes ! "); - rank = smpi_comm_rank(comm); + rank = comm->rank(); MPI_Aint extent; extent = smpi_datatype_get_extent(dtype); tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); diff --git a/src/smpi/colls/allreduce-smp-rsag.c b/src/smpi/colls/allreduce-smp-rsag.cpp similarity index 96% rename from src/smpi/colls/allreduce-smp-rsag.c rename to src/smpi/colls/allreduce-smp-rsag.cpp index b4a6885266..298c9784d0 100644 --- a/src/smpi/colls/allreduce-smp-rsag.c +++ b/src/smpi/colls/allreduce-smp-rsag.cpp @@ -22,12 +22,12 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, int tag = COLL_TAG_ALLREDUCE; int mask, src, dst; MPI_Status status; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); } /* #ifdef MPICH2_REDUCTION @@ -39,8 +39,8 @@ int smpi_coll_tuned_allreduce_smp_rsag(void *send_buf, void *recv_buf, uop = op_ptr->op; #endif */ - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); MPI_Aint extent; extent = smpi_datatype_get_extent(dtype); tmp_buf = (void *) smpi_get_tmp_sendbuffer(count * extent); diff --git a/src/smpi/colls/alltoall-2dmesh.c b/src/smpi/colls/alltoall-2dmesh.cpp similarity index 98% rename from src/smpi/colls/alltoall-2dmesh.c rename to src/smpi/colls/alltoall-2dmesh.cpp index dc9971627e..784552ab26 100644 --- a/src/smpi/colls/alltoall-2dmesh.c +++ b/src/smpi/colls/alltoall-2dmesh.cpp @@ -68,8 +68,8 @@ int smpi_coll_tuned_alltoall_2dmesh(void *send_buff, int send_count, int my_row_base, my_col_base, src_row_base, block_size; int tag = COLL_TAG_ALLTOALL; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); extent = smpi_datatype_get_extent(send_type); if (!alltoall_check_is_2dmesh(num_procs, &X, &Y)) diff --git a/src/smpi/colls/alltoall-3dmesh.c b/src/smpi/colls/alltoall-3dmesh.cpp similarity index 98% rename from src/smpi/colls/alltoall-3dmesh.c rename to src/smpi/colls/alltoall-3dmesh.cpp index 81b1ecdd7c..2c4b10c8f9 100644 --- a/src/smpi/colls/alltoall-3dmesh.c +++ b/src/smpi/colls/alltoall-3dmesh.cpp @@ -60,8 +60,8 @@ int smpi_coll_tuned_alltoall_3dmesh(void *send_buff, int send_count, char *tmp_buff1, *tmp_buff2; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); extent = smpi_datatype_get_extent(send_type); if (!alltoall_check_is_3dmesh(num_procs, &X, &Y, &Z)) diff --git a/src/smpi/colls/alltoall-bruck.c b/src/smpi/colls/alltoall-bruck.cpp similarity index 98% rename from src/smpi/colls/alltoall-bruck.c rename to src/smpi/colls/alltoall-bruck.cpp index 10b4d9cf78..903931ba37 100644 --- a/src/smpi/colls/alltoall-bruck.c +++ b/src/smpi/colls/alltoall-bruck.cpp @@ -43,8 +43,8 @@ smpi_coll_tuned_alltoall_bruck(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - num_procs = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + num_procs = comm->size(); + rank = comm->rank(); extent = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoall-mvapich-scatter-dest.c b/src/smpi/colls/alltoall-mvapich-scatter-dest.cpp similarity index 98% rename from src/smpi/colls/alltoall-mvapich-scatter-dest.c rename to src/smpi/colls/alltoall-mvapich-scatter-dest.cpp index 41946973ba..92b80663ef 100644 --- a/src/smpi/colls/alltoall-mvapich-scatter-dest.c +++ b/src/smpi/colls/alltoall-mvapich-scatter-dest.cpp @@ -61,8 +61,8 @@ int smpi_coll_tuned_alltoall_mvapich2_scatter_dest( if (recvcount == 0) return MPI_SUCCESS; - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); /* Get extent of send and recv types */ recvtype_extent = smpi_datatype_get_extent(recvtype); diff --git a/src/smpi/colls/alltoall-pair-light-barrier.c b/src/smpi/colls/alltoall-pair-light-barrier.cpp similarity index 97% rename from src/smpi/colls/alltoall-pair-light-barrier.c rename to src/smpi/colls/alltoall-pair-light-barrier.cpp index 0232b51b3f..448fa95254 100644 --- a/src/smpi/colls/alltoall-pair-light-barrier.c +++ b/src/smpi/colls/alltoall-pair-light-barrier.cpp @@ -43,8 +43,8 @@ smpi_coll_tuned_alltoall_pair_light_barrier(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "alltoall pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/alltoall-pair-mpi-barrier.c b/src/smpi/colls/alltoall-pair-mpi-barrier.cpp similarity index 96% rename from src/smpi/colls/alltoall-pair-mpi-barrier.c rename to src/smpi/colls/alltoall-pair-mpi-barrier.cpp index 42823b5ff5..a324590a61 100644 --- a/src/smpi/colls/alltoall-pair-mpi-barrier.c +++ b/src/smpi/colls/alltoall-pair-mpi-barrier.cpp @@ -40,8 +40,8 @@ smpi_coll_tuned_alltoall_pair_mpi_barrier(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "alltoall pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/alltoall-pair-one-barrier.c b/src/smpi/colls/alltoall-pair-one-barrier.cpp similarity index 96% rename from src/smpi/colls/alltoall-pair-one-barrier.c rename to src/smpi/colls/alltoall-pair-one-barrier.cpp index a0ac128d98..04c9b856b9 100644 --- a/src/smpi/colls/alltoall-pair-one-barrier.c +++ b/src/smpi/colls/alltoall-pair-one-barrier.cpp @@ -41,8 +41,8 @@ smpi_coll_tuned_alltoall_pair_one_barrier(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "alltoall pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/alltoall-pair.c b/src/smpi/colls/alltoall-pair.cpp similarity index 95% rename from src/smpi/colls/alltoall-pair.c rename to src/smpi/colls/alltoall-pair.cpp index 8a0a76e15a..a7731c1dd6 100644 --- a/src/smpi/colls/alltoall-pair.c +++ b/src/smpi/colls/alltoall-pair.cpp @@ -40,8 +40,8 @@ int smpi_coll_tuned_alltoall_pair_rma(void *send_buff, int send_count, MPI_Datat char *send_ptr = (char *) send_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); @@ -75,8 +75,8 @@ int smpi_coll_tuned_alltoall_pair(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "alltoall pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/alltoall-rdb.c b/src/smpi/colls/alltoall-rdb.cpp similarity index 98% rename from src/smpi/colls/alltoall-rdb.c rename to src/smpi/colls/alltoall-rdb.cpp index cb49bfce2a..ff417b36df 100644 --- a/src/smpi/colls/alltoall-rdb.c +++ b/src/smpi/colls/alltoall-rdb.cpp @@ -45,8 +45,8 @@ int smpi_coll_tuned_alltoall_rdb(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - num_procs = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + num_procs = comm->size(); + rank = comm->rank(); send_increment = smpi_datatype_get_extent(send_type); recv_increment = smpi_datatype_get_extent(recv_type); extent = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoall-ring-light-barrier.c b/src/smpi/colls/alltoall-ring-light-barrier.cpp similarity index 97% rename from src/smpi/colls/alltoall-ring-light-barrier.c rename to src/smpi/colls/alltoall-ring-light-barrier.cpp index eb3de84673..26e91c2466 100644 --- a/src/smpi/colls/alltoall-ring-light-barrier.c +++ b/src/smpi/colls/alltoall-ring-light-barrier.cpp @@ -43,8 +43,8 @@ smpi_coll_tuned_alltoall_ring_light_barrier(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoall-ring-mpi-barrier.c b/src/smpi/colls/alltoall-ring-mpi-barrier.cpp similarity index 96% rename from src/smpi/colls/alltoall-ring-mpi-barrier.c rename to src/smpi/colls/alltoall-ring-mpi-barrier.cpp index 970a2653a7..2c8a9dc5e4 100644 --- a/src/smpi/colls/alltoall-ring-mpi-barrier.c +++ b/src/smpi/colls/alltoall-ring-mpi-barrier.cpp @@ -40,8 +40,8 @@ smpi_coll_tuned_alltoall_ring_mpi_barrier(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoall-ring-one-barrier.c b/src/smpi/colls/alltoall-ring-one-barrier.cpp similarity index 96% rename from src/smpi/colls/alltoall-ring-one-barrier.c rename to src/smpi/colls/alltoall-ring-one-barrier.cpp index 49f7802f39..ca0a770203 100644 --- a/src/smpi/colls/alltoall-ring-one-barrier.c +++ b/src/smpi/colls/alltoall-ring-one-barrier.cpp @@ -39,8 +39,8 @@ smpi_coll_tuned_alltoall_ring_one_barrier(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoall-ring.c b/src/smpi/colls/alltoall-ring.cpp similarity index 96% rename from src/smpi/colls/alltoall-ring.c rename to src/smpi/colls/alltoall-ring.cpp index af1cdbfb81..b87117378f 100644 --- a/src/smpi/colls/alltoall-ring.c +++ b/src/smpi/colls/alltoall-ring.cpp @@ -39,8 +39,8 @@ smpi_coll_tuned_alltoall_ring(void *send_buff, int send_count, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoallv-bruck.c b/src/smpi/colls/alltoallv-bruck.cpp similarity index 98% rename from src/smpi/colls/alltoallv-bruck.c rename to src/smpi/colls/alltoallv-bruck.cpp index d7d0c37a76..9d18f95042 100644 --- a/src/smpi/colls/alltoallv-bruck.c +++ b/src/smpi/colls/alltoallv-bruck.cpp @@ -26,8 +26,8 @@ int smpi_coll_tuned_alltoallv_bruck(void *sendbuf, int *sendcounts, int *senddis MPI_Request *requests; // FIXME: check implementation - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank); smpi_datatype_extent(sendtype, &lb, &sendext); diff --git a/src/smpi/colls/alltoallv-ompi-basic-linear.c b/src/smpi/colls/alltoallv-ompi-basic-linear.cpp similarity index 96% rename from src/smpi/colls/alltoallv-ompi-basic-linear.c rename to src/smpi/colls/alltoallv-ompi-basic-linear.cpp index c4218e8db7..0f55bda157 100644 --- a/src/smpi/colls/alltoallv-ompi-basic-linear.c +++ b/src/smpi/colls/alltoallv-ompi-basic-linear.cpp @@ -26,9 +26,9 @@ smpi_coll_tuned_alltoallv_ompi_basic_linear(void *sbuf, int *scounts, int *sdisp int nreqs; ptrdiff_t sext, rext; MPI_Request *preq; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); - MPI_Request *ireqs= xbt_malloc(sizeof(MPI_Request) * size * 2); + size = comm->size(); + rank = comm->rank(); + MPI_Request *ireqs= static_cast(xbt_malloc(sizeof(MPI_Request) * size * 2)); XBT_DEBUG( "coll:tuned:alltoallv_intra_basic_linear rank %d", rank); diff --git a/src/smpi/colls/alltoallv-pair-light-barrier.c b/src/smpi/colls/alltoallv-pair-light-barrier.cpp similarity index 97% rename from src/smpi/colls/alltoallv-pair-light-barrier.c rename to src/smpi/colls/alltoallv-pair-light-barrier.cpp index 20deb834b6..ea05e33ebe 100644 --- a/src/smpi/colls/alltoallv-pair-light-barrier.c +++ b/src/smpi/colls/alltoallv-pair-light-barrier.cpp @@ -43,8 +43,8 @@ smpi_coll_tuned_alltoallv_pair_light_barrier(void *send_buff, int *send_counts, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "alltoallv pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/alltoallv-pair-mpi-barrier.c b/src/smpi/colls/alltoallv-pair-mpi-barrier.cpp similarity index 97% rename from src/smpi/colls/alltoallv-pair-mpi-barrier.c rename to src/smpi/colls/alltoallv-pair-mpi-barrier.cpp index b5b05e803b..95231a187a 100644 --- a/src/smpi/colls/alltoallv-pair-mpi-barrier.c +++ b/src/smpi/colls/alltoallv-pair-mpi-barrier.cpp @@ -40,8 +40,8 @@ smpi_coll_tuned_alltoallv_pair_mpi_barrier(void *send_buff, int *send_counts, in char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "alltoallv pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/alltoallv-pair-one-barrier.c b/src/smpi/colls/alltoallv-pair-one-barrier.cpp similarity index 96% rename from src/smpi/colls/alltoallv-pair-one-barrier.c rename to src/smpi/colls/alltoallv-pair-one-barrier.cpp index 1039812b95..c53ddde2dd 100644 --- a/src/smpi/colls/alltoallv-pair-one-barrier.c +++ b/src/smpi/colls/alltoallv-pair-one-barrier.cpp @@ -40,8 +40,8 @@ smpi_coll_tuned_alltoallv_pair_one_barrier(void *send_buff, int *send_counts, in char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "alltoallv pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/alltoallv-pair.c b/src/smpi/colls/alltoallv-pair.cpp similarity index 96% rename from src/smpi/colls/alltoallv-pair.c rename to src/smpi/colls/alltoallv-pair.cpp index 43940e7673..cb2551de37 100644 --- a/src/smpi/colls/alltoallv-pair.c +++ b/src/smpi/colls/alltoallv-pair.cpp @@ -40,8 +40,8 @@ int smpi_coll_tuned_alltoallv_pair(void *send_buff, int *send_counts, int *send_ char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if((num_procs&(num_procs-1))) THROWF(arg_error,0, "alltoallv pair algorithm can't be used with non power of two number of processes ! "); diff --git a/src/smpi/colls/alltoallv-ring-light-barrier.c b/src/smpi/colls/alltoallv-ring-light-barrier.cpp similarity index 97% rename from src/smpi/colls/alltoallv-ring-light-barrier.c rename to src/smpi/colls/alltoallv-ring-light-barrier.cpp index b493dd76dd..0e3086f59e 100644 --- a/src/smpi/colls/alltoallv-ring-light-barrier.c +++ b/src/smpi/colls/alltoallv-ring-light-barrier.cpp @@ -43,8 +43,8 @@ smpi_coll_tuned_alltoallv_ring_light_barrier(void *send_buff, int *send_counts, char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoallv-ring-mpi-barrier.c b/src/smpi/colls/alltoallv-ring-mpi-barrier.cpp similarity index 96% rename from src/smpi/colls/alltoallv-ring-mpi-barrier.c rename to src/smpi/colls/alltoallv-ring-mpi-barrier.cpp index 723354b823..ed7030f0e8 100644 --- a/src/smpi/colls/alltoallv-ring-mpi-barrier.c +++ b/src/smpi/colls/alltoallv-ring-mpi-barrier.cpp @@ -40,8 +40,8 @@ smpi_coll_tuned_alltoallv_ring_mpi_barrier(void *send_buff, int *send_counts, in char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoallv-ring-one-barrier.c b/src/smpi/colls/alltoallv-ring-one-barrier.cpp similarity index 96% rename from src/smpi/colls/alltoallv-ring-one-barrier.c rename to src/smpi/colls/alltoallv-ring-one-barrier.cpp index e4fdbc4739..443bebe74d 100644 --- a/src/smpi/colls/alltoallv-ring-one-barrier.c +++ b/src/smpi/colls/alltoallv-ring-one-barrier.cpp @@ -39,8 +39,8 @@ smpi_coll_tuned_alltoallv_ring_one_barrier(void *send_buff, int *send_counts, in char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); diff --git a/src/smpi/colls/alltoallv-ring.c b/src/smpi/colls/alltoallv-ring.cpp similarity index 96% rename from src/smpi/colls/alltoallv-ring.c rename to src/smpi/colls/alltoallv-ring.cpp index 3f2ced2ab7..cc46d4ccd9 100644 --- a/src/smpi/colls/alltoallv-ring.c +++ b/src/smpi/colls/alltoallv-ring.cpp @@ -40,8 +40,8 @@ smpi_coll_tuned_alltoallv_ring(void *send_buff, int *send_counts, int *send_disp char *send_ptr = (char *) send_buff; char *recv_ptr = (char *) recv_buff; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); send_chunk = smpi_datatype_get_extent(send_type); recv_chunk = smpi_datatype_get_extent(recv_type); int pof2 = ((num_procs != 0) && ((num_procs & (~num_procs + 1)) == num_procs)); diff --git a/src/smpi/colls/barrier-mvapich2-pair.c b/src/smpi/colls/barrier-mvapich2-pair.cpp similarity index 98% rename from src/smpi/colls/barrier-mvapich2-pair.c rename to src/smpi/colls/barrier-mvapich2-pair.cpp index 2ef6ad1f9a..9683162cdb 100644 --- a/src/smpi/colls/barrier-mvapich2-pair.c +++ b/src/smpi/colls/barrier-mvapich2-pair.cpp @@ -49,12 +49,12 @@ int smpi_coll_tuned_barrier_mvapich2_pair(MPI_Comm comm) int d, dst, src; int mpi_errno = MPI_SUCCESS; - size = smpi_comm_size(comm); + size = comm->size(); /* Trivial barriers return immediately */ if (size == 1) return MPI_SUCCESS; - rank = smpi_comm_rank(comm); + rank = comm->rank(); int N2_prev = 1; /* N2_prev = greatest power of two < size of Comm */ for( N2_prev = 1; N2_prev <= size; N2_prev <<= 1 ); diff --git a/src/smpi/colls/barrier-ompi.c b/src/smpi/colls/barrier-ompi.cpp similarity index 96% rename from src/smpi/colls/barrier-ompi.c rename to src/smpi/colls/barrier-ompi.cpp index d61177d987..3d9f943acc 100644 --- a/src/smpi/colls/barrier-ompi.c +++ b/src/smpi/colls/barrier-ompi.cpp @@ -50,8 +50,8 @@ int smpi_coll_tuned_barrier_ompi_doublering(MPI_Comm comm int left, right; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); XBT_DEBUG("ompi_coll_tuned_barrier_ompi_doublering rank %d", rank); @@ -110,8 +110,8 @@ int smpi_coll_tuned_barrier_ompi_recursivedoubling(MPI_Comm comm int rank, size, adjsize; int mask, remote; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); XBT_DEBUG( "ompi_coll_tuned_barrier_ompi_recursivedoubling rank %d", rank); @@ -185,8 +185,8 @@ int smpi_coll_tuned_barrier_ompi_bruck(MPI_Comm comm int rank, size; int distance, to, from; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); XBT_DEBUG( "ompi_coll_tuned_barrier_ompi_bruck rank %d", rank); @@ -217,7 +217,7 @@ int smpi_coll_tuned_barrier_ompi_two_procs(MPI_Comm comm { int remote; - remote = smpi_comm_rank(comm); + remote = comm->rank(); XBT_DEBUG( "ompi_coll_tuned_barrier_ompi_two_procs rank %d", remote); remote = (remote + 1) & 0x1; @@ -248,8 +248,8 @@ int smpi_coll_tuned_barrier_ompi_two_procs(MPI_Comm comm int smpi_coll_tuned_barrier_ompi_basic_linear(MPI_Comm comm) { int i; - int size = smpi_comm_size(comm); - int rank = smpi_comm_rank(comm); + int size = comm->size(); + int rank = comm->rank(); /* All non-root send & receive zero-length message. */ @@ -302,8 +302,8 @@ int smpi_coll_tuned_barrier_ompi_tree(MPI_Comm comm) int rank, size, depth; int jump, partner; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); XBT_DEBUG( "ompi_coll_tuned_barrier_ompi_tree %d", rank); diff --git a/src/smpi/colls/bcast-NTSB.c b/src/smpi/colls/bcast-NTSB.cpp similarity index 99% rename from src/smpi/colls/bcast-NTSB.c rename to src/smpi/colls/bcast-NTSB.cpp index ffdcfc2504..5869f15082 100644 --- a/src/smpi/colls/bcast-NTSB.c +++ b/src/smpi/colls/bcast-NTSB.cpp @@ -24,8 +24,8 @@ int smpi_coll_tuned_bcast_NTSB(void *buf, int count, MPI_Datatype datatype, MPI_Aint extent; extent = smpi_datatype_get_extent(datatype); - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* source node and destination nodes (same through out the functions) */ int from = (rank - 1) / 2; diff --git a/src/smpi/colls/bcast-NTSL-Isend.c b/src/smpi/colls/bcast-NTSL-Isend.cpp similarity index 98% rename from src/smpi/colls/bcast-NTSL-Isend.c rename to src/smpi/colls/bcast-NTSL-Isend.cpp index cb1e007423..5304f31c98 100644 --- a/src/smpi/colls/bcast-NTSL-Isend.c +++ b/src/smpi/colls/bcast-NTSL-Isend.cpp @@ -26,8 +26,8 @@ int smpi_coll_tuned_bcast_NTSL_Isend(void *buf, int count, MPI_Datatype datatype MPI_Aint extent; extent = smpi_datatype_get_extent(datatype); - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* source node and destination nodes (same through out the functions) */ int to = (rank + 1) % size; diff --git a/src/smpi/colls/bcast-NTSL.c b/src/smpi/colls/bcast-NTSL.cpp similarity index 98% rename from src/smpi/colls/bcast-NTSL.c rename to src/smpi/colls/bcast-NTSL.cpp index c9df1a7c29..146d840326 100644 --- a/src/smpi/colls/bcast-NTSL.c +++ b/src/smpi/colls/bcast-NTSL.cpp @@ -26,8 +26,8 @@ int smpi_coll_tuned_bcast_NTSL(void *buf, int count, MPI_Datatype datatype, MPI_Aint extent; extent = smpi_datatype_get_extent(datatype); - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* source node and destination nodes (same through out the functions) */ int to = (rank + 1) % size; diff --git a/src/smpi/colls/bcast-SMP-binary.c b/src/smpi/colls/bcast-SMP-binary.cpp similarity index 97% rename from src/smpi/colls/bcast-SMP-binary.c rename to src/smpi/colls/bcast-SMP-binary.cpp index 5264ca3648..b551d84d18 100644 --- a/src/smpi/colls/bcast-SMP-binary.c +++ b/src/smpi/colls/bcast-SMP-binary.cpp @@ -23,14 +23,14 @@ int smpi_coll_tuned_bcast_SMP_binary(void *buf, int count, MPI_Aint extent; extent = smpi_datatype_get_extent(datatype); - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + rank = comm->rank(); + size = comm->size(); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int host_num_core=1; - if (smpi_comm_is_uniform(comm)){ - host_num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + host_num_core = comm->get_intra_comm()->size(); }else{ //implementation buggy in this case return smpi_coll_tuned_bcast_mpich( buf , count, datatype, diff --git a/src/smpi/colls/bcast-SMP-binomial.c b/src/smpi/colls/bcast-SMP-binomial.cpp similarity index 92% rename from src/smpi/colls/bcast-SMP-binomial.c rename to src/smpi/colls/bcast-SMP-binomial.cpp index 6053e49c7a..911721926d 100644 --- a/src/smpi/colls/bcast-SMP-binomial.c +++ b/src/smpi/colls/bcast-SMP-binomial.cpp @@ -16,15 +16,15 @@ int smpi_coll_tuned_bcast_SMP_binomial(void *buf, int count, MPI_Status status; int tag = COLL_TAG_BCAST; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); }else{ //implementation buggy in this case return smpi_coll_tuned_bcast_mpich( buf , count, datatype, diff --git a/src/smpi/colls/bcast-SMP-linear.c b/src/smpi/colls/bcast-SMP-linear.cpp similarity index 96% rename from src/smpi/colls/bcast-SMP-linear.c rename to src/smpi/colls/bcast-SMP-linear.cpp index b3f9b6a630..049682de39 100644 --- a/src/smpi/colls/bcast-SMP-linear.c +++ b/src/smpi/colls/bcast-SMP-linear.cpp @@ -22,14 +22,14 @@ int smpi_coll_tuned_bcast_SMP_linear(void *buf, int count, MPI_Aint extent; extent = smpi_datatype_get_extent(datatype); - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + rank = comm->rank(); + size = comm->size(); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int num_core=1; - if (smpi_comm_is_uniform(comm)){ - num_core = smpi_comm_size(smpi_comm_get_intra_comm(comm)); + if (comm->is_uniform()){ + num_core = comm->get_intra_comm()->size(); }else{ //implementation buggy in this case return smpi_coll_tuned_bcast_mpich( buf , count, datatype, diff --git a/src/smpi/colls/bcast-arrival-pattern-aware-wait.c b/src/smpi/colls/bcast-arrival-pattern-aware-wait.cpp similarity index 99% rename from src/smpi/colls/bcast-arrival-pattern-aware-wait.c rename to src/smpi/colls/bcast-arrival-pattern-aware-wait.cpp index 8e33648fc8..2b3a0a6262 100644 --- a/src/smpi/colls/bcast-arrival-pattern-aware-wait.c +++ b/src/smpi/colls/bcast-arrival-pattern-aware-wait.cpp @@ -55,8 +55,8 @@ int smpi_coll_tuned_bcast_arrival_pattern_aware_wait(void *buf, int count, - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* segment is segment size in number of elements (not bytes) */ diff --git a/src/smpi/colls/bcast-arrival-pattern-aware.c b/src/smpi/colls/bcast-arrival-pattern-aware.cpp similarity index 99% rename from src/smpi/colls/bcast-arrival-pattern-aware.c rename to src/smpi/colls/bcast-arrival-pattern-aware.cpp index 09fbbcd571..03e4f300eb 100644 --- a/src/smpi/colls/bcast-arrival-pattern-aware.c +++ b/src/smpi/colls/bcast-arrival-pattern-aware.cpp @@ -45,8 +45,8 @@ int smpi_coll_tuned_bcast_arrival_pattern_aware(void *buf, int count, - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* segment is segment size in number of elements (not bytes) */ diff --git a/src/smpi/colls/bcast-arrival-scatter.c b/src/smpi/colls/bcast-arrival-scatter.cpp similarity index 99% rename from src/smpi/colls/bcast-arrival-scatter.c rename to src/smpi/colls/bcast-arrival-scatter.cpp index ff7b10bc8e..4d0b24a178 100644 --- a/src/smpi/colls/bcast-arrival-scatter.c +++ b/src/smpi/colls/bcast-arrival-scatter.cpp @@ -53,8 +53,8 @@ int smpi_coll_tuned_bcast_arrival_scatter(void *buf, int count, /* source and destination */ int to, from; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* message too small */ if (count < size) { diff --git a/src/smpi/colls/bcast-binomial-tree.c b/src/smpi/colls/bcast-binomial-tree.cpp similarity index 98% rename from src/smpi/colls/bcast-binomial-tree.c rename to src/smpi/colls/bcast-binomial-tree.cpp index 955a814827..647ce9edaa 100644 --- a/src/smpi/colls/bcast-binomial-tree.c +++ b/src/smpi/colls/bcast-binomial-tree.cpp @@ -76,8 +76,8 @@ smpi_coll_tuned_bcast_binomial_tree(void *buff, int count, int src, dst, rank, num_procs, mask, relative_rank; int tag = COLL_TAG_BCAST; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); relative_rank = (rank >= root) ? rank - root : rank - root + num_procs; diff --git a/src/smpi/colls/bcast-flattree-pipeline.c b/src/smpi/colls/bcast-flattree-pipeline.cpp similarity index 96% rename from src/smpi/colls/bcast-flattree-pipeline.c rename to src/smpi/colls/bcast-flattree-pipeline.cpp index 9b94eb214e..9246b51841 100644 --- a/src/smpi/colls/bcast-flattree-pipeline.c +++ b/src/smpi/colls/bcast-flattree-pipeline.cpp @@ -27,8 +27,8 @@ smpi_coll_tuned_bcast_flattree_pipeline(void *buff, int count, XBT_WARN("MPI_bcast_flattree_pipeline use default MPI_bcast_flattree."); return smpi_coll_tuned_bcast_flattree(buff, count, data_type, root, comm); } - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); MPI_Request *request_array; MPI_Status *status_array; diff --git a/src/smpi/colls/bcast-flattree.c b/src/smpi/colls/bcast-flattree.cpp similarity index 94% rename from src/smpi/colls/bcast-flattree.c rename to src/smpi/colls/bcast-flattree.cpp index 5d4708d1e3..bafa90d3b4 100644 --- a/src/smpi/colls/bcast-flattree.c +++ b/src/smpi/colls/bcast-flattree.cpp @@ -16,8 +16,8 @@ smpi_coll_tuned_bcast_flattree(void *buff, int count, MPI_Datatype data_type, int i, rank, num_procs; int tag = COLL_TAG_BCAST; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); if (rank != root) { smpi_mpi_recv(buff, count, data_type, root, tag, comm, MPI_STATUS_IGNORE); diff --git a/src/smpi/colls/bcast-mvapich-smp.c b/src/smpi/colls/bcast-mvapich-smp.cpp similarity index 92% rename from src/smpi/colls/bcast-mvapich-smp.c rename to src/smpi/colls/bcast-mvapich-smp.cpp index 5202177afe..083110b9fa 100644 --- a/src/smpi/colls/bcast-mvapich-smp.c +++ b/src/smpi/colls/bcast-mvapich-smp.cpp @@ -86,8 +86,8 @@ int smpi_coll_tuned_bcast_mvapich2_inter_node(void *buffer, int leader_root, leader_of_root; - rank = smpi_comm_rank(comm); - //comm_size = smpi_comm_size(comm); + rank = comm->rank(); + //comm_size = comm->size(); if (MV2_Bcast_function==NULL){ @@ -98,23 +98,23 @@ int smpi_coll_tuned_bcast_mvapich2_inter_node(void *buffer, MV2_Bcast_intra_node_function= smpi_coll_tuned_bcast_mpich; } - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - shmem_comm = smpi_comm_get_intra_comm(comm); - local_rank = smpi_comm_rank(shmem_comm); - local_size = smpi_comm_size(shmem_comm); + shmem_comm = comm->get_intra_comm(); + local_rank = shmem_comm->rank(); + local_size = shmem_comm->size(); - leader_comm = smpi_comm_get_leaders_comm(comm); + leader_comm = comm->get_leaders_comm(); if ((local_rank == 0) && (local_size > 1)) { - global_rank = smpi_comm_rank(leader_comm); + global_rank = leader_comm->rank(); } - int* leaders_map = smpi_comm_get_leaders_map(comm); - leader_of_root = smpi_group_rank(smpi_comm_group(comm),leaders_map[root]); - leader_root = smpi_group_rank(smpi_comm_group(leader_comm),leaders_map[root]); + int* leaders_map = comm->get_leaders_map(); + leader_of_root = comm->group()->rank(leaders_map[root]); + leader_root = leader_comm->group()->rank(leaders_map[root]); if (local_size > 1) { @@ -140,7 +140,7 @@ int smpi_coll_tuned_bcast_mvapich2_inter_node(void *buffer, #endif /* if (local_rank == 0) { - leader_comm = smpi_comm_get_leaders_comm(comm); + leader_comm = comm->get_leaders_comm(); root = leader_root; } @@ -187,12 +187,12 @@ int smpi_coll_tuned_bcast_mvapich2_knomial_intra_node(void *buffer, MV2_Bcast_intra_node_function= smpi_coll_tuned_bcast_mpich; } - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - local_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + local_size = comm->size(); + rank = comm->rank(); reqarray=(MPI_Request *)xbt_malloc(2 * mv2_intra_node_knomial_factor * sizeof (MPI_Request)); @@ -268,12 +268,12 @@ int smpi_coll_tuned_bcast_mvapich2_intra_node(void *buffer, MV2_Bcast_intra_node_function= smpi_coll_tuned_bcast_mpich; } - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - comm_size = smpi_comm_size(comm); - // rank = smpi_comm_rank(comm); + comm_size = comm->size(); + // rank = comm->rank(); /* if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN)*/ is_contig = 1; @@ -329,7 +329,7 @@ int smpi_coll_tuned_bcast_mvapich2_intra_node(void *buffer, /* }*/ } - shmem_comm = smpi_comm_get_intra_comm(comm); + shmem_comm = comm->get_intra_comm(); if (!is_contig || !is_homogeneous) { mpi_errno = MPIR_Bcast_inter_node_helper_MV2(tmp_buf, nbytes, MPI_BYTE, diff --git a/src/smpi/colls/bcast-ompi-pipeline.c b/src/smpi/colls/bcast-ompi-pipeline.cpp similarity index 96% rename from src/smpi/colls/bcast-ompi-pipeline.c rename to src/smpi/colls/bcast-ompi-pipeline.cpp index 3aebd8cb50..a55825009d 100644 --- a/src/smpi/colls/bcast-ompi-pipeline.c +++ b/src/smpi/colls/bcast-ompi-pipeline.cpp @@ -18,7 +18,7 @@ int smpi_coll_tuned_bcast_ompi_pipeline( void* buffer, { int count_by_segment = original_count; size_t type_size; - int segsize =1024 << 7; + size_t segsize =1024 << 7; //mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; //mca_coll_tuned_comm_t *data = tuned_module->tuned_data; @@ -42,8 +42,8 @@ int smpi_coll_tuned_bcast_ompi_pipeline( void* buffer, */ type_size = smpi_datatype_size(datatype); - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); if(size==1)return MPI_SUCCESS; @@ -71,8 +71,8 @@ int smpi_coll_tuned_bcast_ompi_pipeline( void* buffer, COLL_TUNED_COMPUTED_SEGCOUNT( segsize, type_size, count_by_segment ); - XBT_DEBUG("coll:tuned:bcast_intra_pipeline rank %d ss %5d type_size %lu count_by_segment %d", - smpi_comm_rank(comm), segsize, (unsigned long)type_size, count_by_segment); + XBT_DEBUG("coll:tuned:bcast_intra_pipeline rank %d ss %5zu type_size %lu count_by_segment %d", + comm->rank(), segsize, (unsigned long)type_size, count_by_segment); diff --git a/src/smpi/colls/bcast-ompi-split-bintree.c b/src/smpi/colls/bcast-ompi-split-bintree.cpp similarity index 99% rename from src/smpi/colls/bcast-ompi-split-bintree.c rename to src/smpi/colls/bcast-ompi-split-bintree.cpp index dae4d27926..fdf9b1f08a 100644 --- a/src/smpi/colls/bcast-ompi-split-bintree.c +++ b/src/smpi/colls/bcast-ompi-split-bintree.cpp @@ -66,7 +66,7 @@ smpi_coll_tuned_bcast_ompi_split_bintree ( void* buffer, int root, MPI_Comm comm) { - int segsize ; + unsigned int segsize ; int rank, size; int segindex, i, lr, pair; int segcount[2]; /* Number ompi_request_wait_allof elements sent with each segment */ @@ -84,8 +84,8 @@ smpi_coll_tuned_bcast_ompi_split_bintree ( void* buffer, // mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module; // mca_coll_tuned_comm_t *data = tuned_module->tuned_data; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); //compute again segsize diff --git a/src/smpi/colls/bcast-scatter-LR-allgather.c b/src/smpi/colls/bcast-scatter-LR-allgather.cpp similarity index 99% rename from src/smpi/colls/bcast-scatter-LR-allgather.c rename to src/smpi/colls/bcast-scatter-LR-allgather.cpp index 87d84cae19..d8091d9e98 100644 --- a/src/smpi/colls/bcast-scatter-LR-allgather.c +++ b/src/smpi/colls/bcast-scatter-LR-allgather.cpp @@ -79,8 +79,8 @@ smpi_coll_tuned_bcast_scatter_LR_allgather(void *buff, int count, int scatter_size, left, right, next_src, *recv_counts, *disps; int tag = COLL_TAG_BCAST; - rank = smpi_comm_rank(comm); - num_procs = smpi_comm_size(comm); + rank = comm->rank(); + num_procs = comm->size(); extent = smpi_datatype_get_extent(data_type); diff --git a/src/smpi/colls/bcast-scatter-rdb-allgather.c b/src/smpi/colls/bcast-scatter-rdb-allgather.cpp similarity index 98% rename from src/smpi/colls/bcast-scatter-rdb-allgather.c rename to src/smpi/colls/bcast-scatter-rdb-allgather.cpp index d4f8cab05e..fa49229a1f 100644 --- a/src/smpi/colls/bcast-scatter-rdb-allgather.c +++ b/src/smpi/colls/bcast-scatter-rdb-allgather.cpp @@ -13,8 +13,8 @@ static int scatter_for_bcast( int mpi_errno = MPI_SUCCESS; int scatter_size, curr_size, recv_size = 0, send_size; - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); relative_rank = (rank >= root) ? rank - root : rank - root + comm_size; /* use long message algorithm: binomial tree scatter followed by an allgather */ @@ -114,8 +114,8 @@ smpi_coll_tuned_bcast_scatter_rdb_allgather ( MPI_Aint true_extent, true_lb; void *tmp_buf; - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); relative_rank = (rank >= root) ? rank - root : rank - root + comm_size; /* If there is only one process, return */ diff --git a/src/smpi/colls/coll_tuned_topo.c b/src/smpi/colls/coll_tuned_topo.cpp similarity index 98% rename from src/smpi/colls/coll_tuned_topo.c rename to src/smpi/colls/coll_tuned_topo.cpp index a5c53b10f2..b04da38baf 100644 --- a/src/smpi/colls/coll_tuned_topo.c +++ b/src/smpi/colls/coll_tuned_topo.cpp @@ -95,8 +95,8 @@ ompi_coll_tuned_topo_build_tree( int fanout, /* * Get size and rank of the process in this communicator */ - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); tree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t)); if (!tree) { @@ -195,8 +195,8 @@ ompi_coll_tuned_topo_build_in_order_bintree( MPI_Comm comm ) /* * Get size and rank of the process in this communicator */ - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); tree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t)); if (!tree) { @@ -338,8 +338,8 @@ ompi_coll_tuned_topo_build_bmtree( MPI_Comm comm, /* * Get size and rank of the process in this communicator */ - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); index = rank -root; @@ -419,8 +419,8 @@ ompi_coll_tuned_topo_build_in_order_bmtree( MPI_Comm comm, /* * Get size and rank of the process in this communicator */ - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); vrank = (rank - root + size) % size; @@ -481,8 +481,8 @@ ompi_coll_tuned_topo_build_chain( int fanout, /* * Get size and rank of the process in this communicator */ - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); if( fanout < 1 ) { XBT_DEBUG("coll:tuned:topo:build_chain WARNING invalid fanout of ZERO, forcing to 1 (pipeline)!"); diff --git a/src/smpi/colls/colls_global.c b/src/smpi/colls/colls_global.cpp similarity index 100% rename from src/smpi/colls/colls_global.c rename to src/smpi/colls/colls_global.cpp diff --git a/src/smpi/colls/gather-mvapich.c b/src/smpi/colls/gather-mvapich.cpp similarity index 94% rename from src/smpi/colls/gather-mvapich.c rename to src/smpi/colls/gather-mvapich.cpp index 033cdb6e92..4baf4db0da 100644 --- a/src/smpi/colls/gather-mvapich.c +++ b/src/smpi/colls/gather-mvapich.cpp @@ -37,7 +37,6 @@ #include "colls_private.h" - #define MPIR_Gather_MV2_Direct smpi_coll_tuned_gather_ompi_basic_linear #define MPIR_Gather_MV2_two_level_Direct smpi_coll_tuned_gather_ompi_basic_linear #define MPIR_Gather_intra smpi_coll_tuned_gather_mpich @@ -149,11 +148,11 @@ int smpi_coll_tuned_gather_mvapich2_two_level(void *sendbuf, if(MV2_Gather_intra_node_function==NULL) MV2_Gather_intra_node_function=smpi_coll_tuned_gather_mpich; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); if (((rank == root) && (recvcnt == 0)) || ((rank != root) && (sendcnt == 0))) { @@ -175,19 +174,19 @@ int smpi_coll_tuned_gather_mvapich2_two_level(void *sendbuf, /* extract the rank,size information for the intra-node * communicator */ - shmem_comm = smpi_comm_get_intra_comm(comm); - local_rank = smpi_comm_rank(shmem_comm); - local_size = smpi_comm_size(shmem_comm); + shmem_comm = comm->get_intra_comm(); + local_rank = shmem_comm->rank(); + local_size = shmem_comm->size(); if (local_rank == 0) { /* Node leader. Extract the rank, size information for the leader * communicator */ - leader_comm = smpi_comm_get_leaders_comm(comm); + leader_comm = comm->get_leaders_comm(); if(leader_comm==MPI_COMM_NULL){ leader_comm = MPI_COMM_WORLD; } - leader_comm_size = smpi_comm_size(leader_comm); - leader_comm_rank = smpi_comm_rank(leader_comm); + leader_comm_size = leader_comm->size(); + leader_comm_rank = leader_comm->size(); } if (rank == root) { @@ -263,14 +262,14 @@ int smpi_coll_tuned_gather_mvapich2_two_level(void *sendbuf, ); } } - leader_comm = smpi_comm_get_leaders_comm(comm); - int* leaders_map = smpi_comm_get_leaders_map(comm); - leader_of_root = smpi_group_rank(smpi_comm_group(comm),leaders_map[root]); - leader_root = smpi_group_rank(smpi_comm_group(leader_comm),leaders_map[root]); + leader_comm = comm->get_leaders_comm(); + int* leaders_map = comm->get_leaders_map(); + leader_of_root = comm->group()->rank(leaders_map[root]); + leader_root = leader_comm->group()->rank(leaders_map[root]); /* leader_root is the rank of the leader of the root in leader_comm. * leader_root is to be used as the root of the inter-leader gather ops */ - if (!smpi_comm_is_uniform(comm)) { + if (!comm->is_uniform()) { if (local_rank == 0) { int *displs = NULL; int *recvcnts = NULL; @@ -302,11 +301,11 @@ int smpi_coll_tuned_gather_mvapich2_two_level(void *sendbuf, } } - node_sizes = smpi_comm_get_non_uniform_map(comm); + node_sizes = comm->get_non_uniform_map(); if (leader_comm_rank == leader_root) { - displs = xbt_malloc(sizeof (int) * leader_comm_size); - recvcnts = xbt_malloc(sizeof (int) * leader_comm_size); + displs = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); + recvcnts = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); if (!displs || !recvcnts) { mpi_errno = MPI_ERR_OTHER; return mpi_errno; diff --git a/src/smpi/colls/gather-ompi.c b/src/smpi/colls/gather-ompi.cpp similarity index 98% rename from src/smpi/colls/gather-ompi.c rename to src/smpi/colls/gather-ompi.cpp index 46dc36e259..e7d1cff536 100644 --- a/src/smpi/colls/gather-ompi.c +++ b/src/smpi/colls/gather-ompi.cpp @@ -47,8 +47,8 @@ smpi_coll_tuned_gather_ompi_binomial(void *sbuf, int scount, MPI_Aint rextent, rlb, rtrue_lb, rtrue_extent; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); XBT_DEBUG( "smpi_coll_tuned_gather_ompi_binomial rank %d", rank); @@ -211,8 +211,8 @@ smpi_coll_tuned_gather_ompi_linear_sync(void *sbuf, int scount, MPI_Aint lb; int first_segment_size=0; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); size_t dsize, block_size; if (rank == root) { @@ -370,8 +370,8 @@ smpi_coll_tuned_gather_ompi_basic_linear(void *sbuf, int scount, MPI_Aint extent; MPI_Aint lb; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); /* Everyone but root sends data and returns. */ XBT_DEBUG( diff --git a/src/smpi/colls/reduce-NTSL.c b/src/smpi/colls/reduce-NTSL.cpp similarity index 98% rename from src/smpi/colls/reduce-NTSL.c rename to src/smpi/colls/reduce-NTSL.cpp index 60aa06ca08..c2560e5ebe 100644 --- a/src/smpi/colls/reduce-NTSL.c +++ b/src/smpi/colls/reduce-NTSL.cpp @@ -27,8 +27,8 @@ int smpi_coll_tuned_reduce_NTSL(void *buf, void *rbuf, int count, MPI_Aint extent; extent = smpi_datatype_get_extent(datatype); - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* source node and destination nodes (same through out the functions) */ int to = (rank - 1 + size) % size; diff --git a/src/smpi/colls/reduce-arrival-pattern-aware.c b/src/smpi/colls/reduce-arrival-pattern-aware.cpp similarity index 99% rename from src/smpi/colls/reduce-arrival-pattern-aware.c rename to src/smpi/colls/reduce-arrival-pattern-aware.cpp index e528e9a787..7aaf9043cf 100644 --- a/src/smpi/colls/reduce-arrival-pattern-aware.c +++ b/src/smpi/colls/reduce-arrival-pattern-aware.cpp @@ -24,7 +24,7 @@ int smpi_coll_tuned_reduce_arrival_pattern_aware(void *buf, void *rbuf, MPI_Op op, int root, MPI_Comm comm) { - int rank = smpi_comm_rank(comm); + int rank = comm->rank(); int tag = -COLL_TAG_REDUCE; MPI_Status status; MPI_Request request; @@ -35,7 +35,7 @@ int smpi_coll_tuned_reduce_arrival_pattern_aware(void *buf, void *rbuf, MPI_Status temp_status_array[MAX_NODE]; - int size = smpi_comm_size(comm); + int size = comm->size(); int i; int sent_count; diff --git a/src/smpi/colls/reduce-binomial.c b/src/smpi/colls/reduce-binomial.cpp similarity index 97% rename from src/smpi/colls/reduce-binomial.c rename to src/smpi/colls/reduce-binomial.cpp index 180a07de30..d9255a8555 100644 --- a/src/smpi/colls/reduce-binomial.c +++ b/src/smpi/colls/reduce-binomial.cpp @@ -22,8 +22,8 @@ int smpi_coll_tuned_reduce_binomial(void *sendbuf, void *recvbuf, int count, MPI_Aint true_lb, true_extent; if (count == 0) return 0; - rank = smpi_comm_rank(comm); - comm_size = smpi_comm_size(comm); + rank = comm->rank(); + comm_size = comm->size(); extent = smpi_datatype_get_extent(datatype); diff --git a/src/smpi/colls/reduce-flat-tree.c b/src/smpi/colls/reduce-flat-tree.cpp similarity index 95% rename from src/smpi/colls/reduce-flat-tree.c rename to src/smpi/colls/reduce-flat-tree.cpp index 8a3140d5e0..a12009c131 100644 --- a/src/smpi/colls/reduce-flat-tree.c +++ b/src/smpi/colls/reduce-flat-tree.cpp @@ -20,8 +20,8 @@ smpi_coll_tuned_reduce_flat_tree(void *sbuf, void *rbuf, int count, char *inbuf; MPI_Status status; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* If not root, send data to the root. */ extent = smpi_datatype_get_extent(dtype); @@ -49,7 +49,7 @@ smpi_coll_tuned_reduce_flat_tree(void *sbuf, void *rbuf, int count, for (i = size - 2; i >= 0; --i) { if (rank == i) - inbuf = sbuf; + inbuf = static_cast(sbuf); else { smpi_mpi_recv(origin, count, dtype, i, tag, comm, &status); inbuf = origin; diff --git a/src/smpi/colls/reduce-mvapich-knomial.c b/src/smpi/colls/reduce-mvapich-knomial.cpp similarity index 94% rename from src/smpi/colls/reduce-mvapich-knomial.c rename to src/smpi/colls/reduce-mvapich-knomial.cpp index 7ec5c64797..39f69bb753 100644 --- a/src/smpi/colls/reduce-mvapich-knomial.c +++ b/src/smpi/colls/reduce-mvapich-knomial.cpp @@ -56,8 +56,8 @@ static int MPIR_Reduce_knomial_trace(int root, int reduce_knomial_factor, int orig_mask=0x1; int recv_iter=0, send_iter=0; int *knomial_reduce_src_array=NULL; - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); lroot = root; relative_rank = (rank - lroot + comm_size) % comm_size; @@ -90,7 +90,7 @@ static int MPIR_Reduce_knomial_trace(int root, int reduce_knomial_factor, /* Finally, fill up the src array */ if(recv_iter > 0) { - knomial_reduce_src_array = smpi_get_tmp_sendbuffer(sizeof(int)*recv_iter); + knomial_reduce_src_array = static_cast(smpi_get_tmp_sendbuffer(sizeof(int)*recv_iter)); } mask = orig_mask; @@ -138,7 +138,7 @@ int smpi_coll_tuned_reduce_mvapich2_knomial ( if (count == 0) return MPI_SUCCESS; - rank = smpi_comm_rank(comm); + rank = comm->rank(); /* Create a temporary buffer */ @@ -172,8 +172,8 @@ int smpi_coll_tuned_reduce_mvapich2_knomial ( &dst, &expected_send_count, &expected_recv_count, &src_array); if(expected_recv_count > 0 ) { - tmp_buf = xbt_malloc(sizeof(void *)*expected_recv_count); - requests = xbt_malloc(sizeof(MPI_Request)*expected_recv_count); + tmp_buf = static_cast(xbt_malloc(sizeof(void *)*expected_recv_count)); + requests = static_cast(xbt_malloc(sizeof(MPI_Request)*expected_recv_count)); for(k=0; k < expected_recv_count; k++ ) { tmp_buf[k] = smpi_get_tmp_sendbuffer(count*(MAX(extent,true_extent))); tmp_buf[k] = (void *)((char*)tmp_buf[k] - true_lb); diff --git a/src/smpi/colls/reduce-mvapich-two-level.c b/src/smpi/colls/reduce-mvapich-two-level.cpp similarity index 93% rename from src/smpi/colls/reduce-mvapich-two-level.c rename to src/smpi/colls/reduce-mvapich-two-level.cpp index 8bb18d00da..548c5ca986 100644 --- a/src/smpi/colls/reduce-mvapich-two-level.c +++ b/src/smpi/colls/reduce-mvapich-two-level.cpp @@ -96,20 +96,20 @@ int smpi_coll_tuned_reduce_mvapich2_two_level( void *sendbuf, if(MV2_Reduce_intra_function==NULL) MV2_Reduce_intra_function=smpi_coll_tuned_reduce_mpich; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - my_rank = smpi_comm_rank(comm); - total_size = smpi_comm_size(comm); - shmem_comm = smpi_comm_get_intra_comm(comm); - local_rank = smpi_comm_rank(shmem_comm); - local_size = smpi_comm_size(shmem_comm); + my_rank = comm->rank(); + total_size = comm->size(); + shmem_comm = comm->get_intra_comm(); + local_rank = shmem_comm->rank(); + local_size = shmem_comm->size(); - leader_comm = smpi_comm_get_leaders_comm(comm); - int* leaders_map = smpi_comm_get_leaders_map(comm); - leader_of_root = smpi_group_rank(smpi_comm_group(comm),leaders_map[root]); - leader_root = smpi_group_rank(smpi_comm_group(leader_comm),leaders_map[root]); + leader_comm = comm->get_leaders_comm(); + int* leaders_map = comm->get_leaders_map(); + leader_of_root = comm->group()->rank(leaders_map[root]); + leader_root = leader_comm->group()->rank(leaders_map[root]); is_commutative=smpi_op_is_commute(op); @@ -187,12 +187,12 @@ int smpi_coll_tuned_reduce_mvapich2_two_level( void *sendbuf, if (local_rank == 0) { - leader_comm = smpi_comm_get_leaders_comm(comm); + leader_comm = comm->get_leaders_comm(); if(leader_comm==MPI_COMM_NULL){ leader_comm = MPI_COMM_WORLD; } - leader_comm_size = smpi_comm_size(leader_comm); - leader_comm_rank = smpi_comm_rank(leader_comm); + leader_comm_size = leader_comm->size(); + leader_comm_rank = leader_comm->rank(); tmp_buf=(void *)smpi_get_tmp_sendbuffer(count * (MAX(extent, true_extent))); tmp_buf = (void *) ((char *) tmp_buf - true_lb); diff --git a/src/smpi/colls/reduce-ompi.c b/src/smpi/colls/reduce-ompi.cpp similarity index 98% rename from src/smpi/colls/reduce-ompi.c rename to src/smpi/colls/reduce-ompi.cpp index 3c51282749..faf37ef7d5 100644 --- a/src/smpi/colls/reduce-ompi.c +++ b/src/smpi/colls/reduce-ompi.cpp @@ -69,7 +69,7 @@ int smpi_coll_tuned_ompi_reduce_generic( void* sendbuf, void* recvbuf, int origi XBT_DEBUG( "coll:tuned:reduce_generic count %d, msg size %ld, segsize %ld, max_requests %d", original_count, (unsigned long)(num_segments * segment_increment), (unsigned long)segment_increment, max_outstanding_reqs); - rank = smpi_comm_rank(comm); + rank = comm->rank(); /* non-leaf nodes - wait for children to send me data & forward up (if needed) */ @@ -333,9 +333,9 @@ int smpi_coll_tuned_reduce_ompi_chain( void *sendbuf, void *recvbuf, int count, uint32_t segsize=64*1024; int segcount = count; size_t typelng; - int fanout = smpi_comm_size(comm)/2; + int fanout = comm->size()/2; - XBT_DEBUG("coll:tuned:reduce_intra_chain rank %d fo %d ss %5d", smpi_comm_rank(comm), fanout, segsize); + XBT_DEBUG("coll:tuned:reduce_intra_chain rank %d fo %d ss %5d", comm->rank(), fanout, segsize); /** * Determine number of segments and number of elements @@ -372,7 +372,7 @@ int smpi_coll_tuned_reduce_ompi_pipeline( void *sendbuf, void *recvbuf, const double a4 = 0.0033 / 1024.0; /* [1/B] */ const double b4 = 1.6761; typelng= smpi_datatype_size( datatype); - int communicator_size = smpi_comm_size(comm); + int communicator_size = comm->size(); size_t message_size = typelng * count; if (communicator_size > (a2 * message_size + b2)) { @@ -387,7 +387,7 @@ int smpi_coll_tuned_reduce_ompi_pipeline( void *sendbuf, void *recvbuf, } XBT_DEBUG("coll:tuned:reduce_intra_pipeline rank %d ss %5d", - smpi_comm_rank(comm), segsize); + comm->rank(), segsize); COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); @@ -418,7 +418,7 @@ int smpi_coll_tuned_reduce_ompi_binary( void *sendbuf, void *recvbuf, segsize = 32*1024; XBT_DEBUG("coll:tuned:reduce_intra_binary rank %d ss %5d", - smpi_comm_rank(comm), segsize); + comm->rank(), segsize); COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); @@ -448,7 +448,7 @@ int smpi_coll_tuned_reduce_ompi_binomial( void *sendbuf, void *recvbuf, * sent per operation */ typelng= smpi_datatype_size( datatype); - int communicator_size = smpi_comm_size(comm); + int communicator_size = comm->size(); size_t message_size = typelng * count; if (((communicator_size < 8) && (message_size < 20480)) || (message_size < 2048) || (count <= 1)) { @@ -460,7 +460,7 @@ int smpi_coll_tuned_reduce_ompi_binomial( void *sendbuf, void *recvbuf, } XBT_DEBUG("coll:tuned:reduce_intra_binomial rank %d ss %5d", - smpi_comm_rank(comm), segsize); + comm->rank(), segsize); COLL_TUNED_COMPUTED_SEGCOUNT( segsize, typelng, segcount ); return smpi_coll_tuned_ompi_reduce_generic( sendbuf, recvbuf, count, datatype, @@ -489,8 +489,8 @@ int smpi_coll_tuned_reduce_ompi_in_order_binary( void *sendbuf, void *recvbuf, void *use_this_sendbuf = NULL, *use_this_recvbuf = NULL; size_t typelng; - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); XBT_DEBUG("coll:tuned:reduce_intra_in_order_binary rank %d ss %5d", rank, segsize); @@ -604,8 +604,8 @@ smpi_coll_tuned_reduce_ompi_basic_linear(void *sbuf, void *rbuf, int count, /* Initialize */ - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); XBT_DEBUG("coll:tuned:reduce_intra_basic_linear rank %d", rank); diff --git a/src/smpi/colls/reduce-rab.c b/src/smpi/colls/reduce-rab.cpp similarity index 98% rename from src/smpi/colls/reduce-rab.c rename to src/smpi/colls/reduce-rab.cpp index 61653b7b98..f529bd8364 100644 --- a/src/smpi/colls/reduce-rab.c +++ b/src/smpi/colls/reduce-rab.cpp @@ -594,14 +594,14 @@ static int MPI_I_anyReduce(void* Sendbuf, void* Recvbuf, int count, MPI_Datatype MPI_Type_extent(mpi_datatype, &typelng); scrlng = typelng * count; #ifdef NO_CACHE_OPTIMIZATION - scr1buf = malloc(scrlng); - scr2buf = malloc(scrlng); - scr3buf = malloc(scrlng); + scr1buf = static_cast(xbt_malloc(scrlng)); + scr2buf = static_cast(xbt_malloc(scrlng)); + scr3buf = static_cast(xbt_malloc(scrlng)); #else # ifdef SCR_LNG_OPTIM scrlng = SCR_LNG_OPTIM(scrlng); # endif - scr2buf = malloc(3*scrlng); /* To test cache problems. */ + scr2buf = static_cast(xbt_malloc(3*scrlng)); /* To test cache problems. */ scr1buf = scr2buf + 1*scrlng; /* scr1buf and scr3buf must not*/ scr3buf = scr2buf + 2*scrlng; /* be used for malloc because */ /* they are interchanged below.*/ @@ -913,9 +913,9 @@ static int MPI_I_anyReduce(void* Sendbuf, void* Recvbuf, int count, MPI_Datatype } # ifdef NO_CACHE_TESTING - free(scr1buf); free(scr2buf); free(scr3buf); + xbt_free(scr1buf); xbt_free(scr2buf); xbt_free(scr3buf); # else - free(scr2buf); /* scr1buf and scr3buf are part of scr2buf */ + xbt_free(scr2buf); /* scr1buf and scr3buf are part of scr2buf */ # endif return(MPI_SUCCESS); } /* new_prot */ diff --git a/src/smpi/colls/reduce-scatter-gather.c b/src/smpi/colls/reduce-scatter-gather.cpp similarity index 99% rename from src/smpi/colls/reduce-scatter-gather.c rename to src/smpi/colls/reduce-scatter-gather.cpp index 38db76025b..6256be704c 100644 --- a/src/smpi/colls/reduce-scatter-gather.c +++ b/src/smpi/colls/reduce-scatter-gather.cpp @@ -31,8 +31,8 @@ int smpi_coll_tuned_reduce_scatter_gather(void *sendbuf, void *recvbuf, if (count == 0) return 0; - rank = smpi_comm_rank(comm); - comm_size = smpi_comm_size(comm); + rank = comm->rank(); + comm_size = comm->size(); diff --git a/src/smpi/colls/reduce_scatter-mpich.c b/src/smpi/colls/reduce_scatter-mpich.cpp similarity index 98% rename from src/smpi/colls/reduce_scatter-mpich.c rename to src/smpi/colls/reduce_scatter-mpich.cpp index 11adf04c5d..2692b50a3e 100644 --- a/src/smpi/colls/reduce_scatter-mpich.c +++ b/src/smpi/colls/reduce_scatter-mpich.cpp @@ -32,8 +32,8 @@ int smpi_coll_tuned_reduce_scatter_mpich_pair(void *sendbuf, void *recvbuf, int int mpi_errno = MPI_SUCCESS; int total_count, dst, src; int is_commutative; - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); extent =smpi_datatype_get_extent(datatype); smpi_datatype_extent(datatype, &true_lb, &true_extent); @@ -152,8 +152,8 @@ int smpi_coll_tuned_reduce_scatter_mpich_noncomm(void *sendbuf, void *recvbuf, i MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { int mpi_errno = MPI_SUCCESS; - int comm_size = smpi_comm_size(comm) ; - int rank = smpi_comm_rank(comm); + int comm_size = comm->size() ; + int rank = comm->rank(); int pof2; int log2_comm_size; int i, k; @@ -209,8 +209,8 @@ int smpi_coll_tuned_reduce_scatter_mpich_noncomm(void *sendbuf, void *recvbuf, i size = total_count; for (k = 0; k < log2_comm_size; ++k) { /* use a double-buffering scheme to avoid local copies */ - char *incoming_data = (buf0_was_inout ? tmp_buf1 : tmp_buf0); - char *outgoing_data = (buf0_was_inout ? tmp_buf0 : tmp_buf1); + char *incoming_data = static_cast(buf0_was_inout ? tmp_buf1 : tmp_buf0); + char *outgoing_data = static_cast(buf0_was_inout ? tmp_buf0 : tmp_buf1); int peer = rank ^ (0x1 << k); size /= 2; @@ -279,8 +279,8 @@ int smpi_coll_tuned_reduce_scatter_mpich_rdb(void *sendbuf, void *recvbuf, int r int received; MPI_Datatype sendtype, recvtype; int nprocs_completed, tmp_mask, tree_root, is_commutative=0; - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); extent =smpi_datatype_get_extent(datatype); smpi_datatype_extent(datatype, &true_lb, &true_extent); diff --git a/src/smpi/colls/reduce_scatter-ompi.c b/src/smpi/colls/reduce_scatter-ompi.cpp similarity index 99% rename from src/smpi/colls/reduce_scatter-ompi.c rename to src/smpi/colls/reduce_scatter-ompi.cpp index e303d208f3..81862299dd 100644 --- a/src/smpi/colls/reduce_scatter-ompi.c +++ b/src/smpi/colls/reduce_scatter-ompi.cpp @@ -58,8 +58,8 @@ smpi_coll_tuned_reduce_scatter_ompi_basic_recursivehalving(void *sbuf, char *result_buf = NULL, *result_buf_free = NULL; /* Initialize */ - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); XBT_DEBUG("coll:tuned:reduce_scatter_ompi_basic_recursivehalving, rank %d", rank); if(!smpi_op_is_commute(op)) @@ -374,8 +374,8 @@ smpi_coll_tuned_reduce_scatter_ompi_ring(void *sbuf, void *rbuf, int *rcounts, ptrdiff_t true_lb, true_extent, lb, extent, max_real_segsize; MPI_Request reqs[2] = {NULL, NULL}; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); XBT_DEBUG( "coll:tuned:reduce_scatter_ompi_ring rank %d, size %d", rank, size); diff --git a/src/smpi/colls/scatter-mvapich-two-level.c b/src/smpi/colls/scatter-mvapich-two-level.cpp similarity index 86% rename from src/smpi/colls/scatter-mvapich-two-level.c rename to src/smpi/colls/scatter-mvapich-two-level.cpp index 4cbc64a97c..1edba3662c 100644 --- a/src/smpi/colls/scatter-mvapich-two-level.c +++ b/src/smpi/colls/scatter-mvapich-two-level.cpp @@ -65,11 +65,11 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_direct(void *sendbuf, if(MV2_Scatter_intra_function==NULL) MV2_Scatter_intra_function=smpi_coll_tuned_scatter_mpich; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); if (((rank == root) && (recvcnt == 0)) || ((rank != root) && (sendcnt == 0))) { @@ -78,16 +78,16 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_direct(void *sendbuf, /* extract the rank,size information for the intra-node * communicator */ - shmem_comm = smpi_comm_get_intra_comm(comm); - local_rank = smpi_comm_rank(shmem_comm); - local_size = smpi_comm_size(shmem_comm); + shmem_comm = comm->get_intra_comm(); + local_rank = shmem_comm->rank(); + local_size = shmem_comm->size(); if (local_rank == 0) { /* Node leader. Extract the rank, size information for the leader * communicator */ - leader_comm = smpi_comm_get_leaders_comm(comm); - leader_comm_size = smpi_comm_size(leader_comm); - leader_comm_rank = smpi_comm_rank(leader_comm); + leader_comm = comm->get_leaders_comm(); + leader_comm_size = leader_comm->size(); + leader_comm_rank = leader_comm->rank(); } if (local_size == comm_size) { @@ -111,10 +111,10 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_direct(void *sendbuf, tmp_buf = smpi_get_tmp_sendbuffer(nbytes * local_size); } - leader_comm = smpi_comm_get_leaders_comm(comm); - int* leaders_map = smpi_comm_get_leaders_map(comm); - leader_of_root = smpi_group_rank(smpi_comm_group(comm),leaders_map[root]); - leader_root = smpi_group_rank(smpi_comm_group(leader_comm),leaders_map[root]); + leader_comm = comm->get_leaders_comm(); + int* leaders_map = comm->get_leaders_map(); + leader_of_root = comm->group()->rank(leaders_map[root]); + leader_root = leader_comm->group()->rank(leaders_map[root]); /* leader_root is the rank of the leader of the root in leader_comm. * leader_root is to be used as the root of the inter-leader gather ops */ @@ -138,17 +138,17 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_direct(void *sendbuf, } if (leader_comm_size > 1 && local_rank == 0) { - if (!smpi_comm_is_uniform(comm)) { + if (!comm->is_uniform()) { int *displs = NULL; int *sendcnts = NULL; int *node_sizes; int i = 0; - node_sizes = smpi_comm_get_non_uniform_map(comm); + node_sizes = comm->get_non_uniform_map(); if (root != leader_of_root) { if (leader_comm_rank == leader_root) { - displs = xbt_malloc(sizeof (int) * leader_comm_size); - sendcnts = xbt_malloc(sizeof (int) * leader_comm_size); + displs = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); + sendcnts = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); sendcnts[0] = node_sizes[0] * nbytes; displs[0] = 0; @@ -163,8 +163,8 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_direct(void *sendbuf, MPI_BYTE, leader_root, leader_comm); } else { if (leader_comm_rank == leader_root) { - displs = xbt_malloc(sizeof (int) * leader_comm_size); - sendcnts = xbt_malloc(sizeof (int) * leader_comm_size); + displs = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); + sendcnts = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); sendcnts[0] = node_sizes[0] * sendcnt; displs[0] = 0; @@ -249,11 +249,11 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_binomial(void *sendbuf, if(MV2_Scatter_intra_function==NULL) MV2_Scatter_intra_function=smpi_coll_tuned_scatter_mpich; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); if (((rank == root) && (recvcnt == 0)) || ((rank != root) && (sendcnt == 0))) { @@ -262,16 +262,16 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_binomial(void *sendbuf, /* extract the rank,size information for the intra-node * communicator */ - shmem_comm = smpi_comm_get_intra_comm(comm); - local_rank = smpi_comm_rank(shmem_comm); - local_size = smpi_comm_size(shmem_comm); + shmem_comm = comm->get_intra_comm(); + local_rank = shmem_comm->rank(); + local_size = shmem_comm->size(); if (local_rank == 0) { /* Node leader. Extract the rank, size information for the leader * communicator */ - leader_comm = smpi_comm_get_leaders_comm(comm); - leader_comm_size = smpi_comm_size(leader_comm); - leader_comm_rank = smpi_comm_rank(leader_comm); + leader_comm = comm->get_leaders_comm(); + leader_comm_size = leader_comm->size(); + leader_comm_rank = leader_comm->rank(); } if (local_size == comm_size) { @@ -294,10 +294,10 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_binomial(void *sendbuf, /* Node leader, allocate tmp_buffer */ tmp_buf = smpi_get_tmp_sendbuffer(nbytes * local_size); } - leader_comm = smpi_comm_get_leaders_comm(comm); - int* leaders_map = smpi_comm_get_leaders_map(comm); - leader_of_root = smpi_group_rank(smpi_comm_group(comm),leaders_map[root]); - leader_root = smpi_group_rank(smpi_comm_group(leader_comm),leaders_map[root]); + leader_comm = comm->get_leaders_comm(); + int* leaders_map = comm->get_leaders_map(); + leader_of_root = comm->group()->rank(leaders_map[root]); + leader_root = leader_comm->group()->rank(leaders_map[root]); /* leader_root is the rank of the leader of the root in leader_comm. * leader_root is to be used as the root of the inter-leader gather ops */ @@ -319,17 +319,17 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_binomial(void *sendbuf, } if (leader_comm_size > 1 && local_rank == 0) { - if (!smpi_comm_is_uniform(comm)) { + if (!comm->is_uniform()) { int *displs = NULL; int *sendcnts = NULL; int *node_sizes; int i = 0; - node_sizes = smpi_comm_get_non_uniform_map(comm); + node_sizes = comm->get_non_uniform_map(); if (root != leader_of_root) { if (leader_comm_rank == leader_root) { - displs = xbt_malloc(sizeof (int) * leader_comm_size); - sendcnts = xbt_malloc(sizeof (int) * leader_comm_size); + displs = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); + sendcnts = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); sendcnts[0] = node_sizes[0] * nbytes; displs[0] = 0; @@ -344,8 +344,8 @@ int smpi_coll_tuned_scatter_mvapich2_two_level_binomial(void *sendbuf, MPI_BYTE, leader_root, leader_comm); } else { if (leader_comm_rank == leader_root) { - displs = xbt_malloc(sizeof (int) * leader_comm_size); - sendcnts = xbt_malloc(sizeof (int) * leader_comm_size); + displs = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); + sendcnts = static_cast(xbt_malloc(sizeof (int) * leader_comm_size)); sendcnts[0] = node_sizes[0] * sendcnt; displs[0] = 0; diff --git a/src/smpi/colls/scatter-ompi.c b/src/smpi/colls/scatter-ompi.cpp similarity index 98% rename from src/smpi/colls/scatter-ompi.c rename to src/smpi/colls/scatter-ompi.cpp index 9eace67529..6fb5e0c6fc 100644 --- a/src/smpi/colls/scatter-ompi.c +++ b/src/smpi/colls/scatter-ompi.cpp @@ -47,8 +47,8 @@ smpi_coll_tuned_scatter_ompi_binomial(void *sbuf, int scount, MPI_Aint sextent, slb, strue_lb, strue_extent; MPI_Aint rextent, rlb, rtrue_lb, rtrue_extent; - size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + size = comm->size(); + rank = comm->rank(); XBT_DEBUG( "smpi_coll_tuned_scatter_ompi_binomial rank %d", rank); @@ -205,8 +205,8 @@ smpi_coll_tuned_scatter_ompi_basic_linear(void *sbuf, int scount, /* Initialize */ - rank = smpi_comm_rank(comm); - size = smpi_comm_size(comm); + rank = comm->rank(); + size = comm->size(); /* If not root, receive data. */ diff --git a/src/smpi/colls/smpi_automatic_selector.cpp b/src/smpi/colls/smpi_automatic_selector.cpp index c79cea58b0..f6e5240ffc 100644 --- a/src/smpi/colls/smpi_automatic_selector.cpp +++ b/src/smpi/colls/smpi_automatic_selector.cpp @@ -51,17 +51,17 @@ min_coll=i;\ time_min=time2-time1;\ }\ - if(smpi_comm_rank(comm)==0){\ + if(comm->rank()==0){\ if(buf_inrank()==0){\ XBT_WARN("For rank 0, the quickest was %s : %f , but global was %s : %f at max",mpi_coll_##cat##_description[min_coll].name, time_min,mpi_coll_##cat##_description[global_coll].name, max_min);\ }else\ - XBT_WARN("The quickest %s was %s on rank %d and took %f",#cat,mpi_coll_##cat##_description[min_coll].name, smpi_comm_rank(comm), time_min);\ + XBT_WARN("The quickest %s was %s on rank %d and took %f",#cat,mpi_coll_##cat##_description[min_coll].name, comm->rank(), time_min);\ return (min_coll!=-1)?MPI_SUCCESS:MPI_ERR_INTERN;\ }\ diff --git a/src/smpi/colls/smpi_intel_mpi_selector.c b/src/smpi/colls/smpi_intel_mpi_selector.cpp similarity index 99% rename from src/smpi/colls/smpi_intel_mpi_selector.c rename to src/smpi/colls/smpi_intel_mpi_selector.cpp index bdcb1b70cd..43138e341f 100644 --- a/src/smpi/colls/smpi_intel_mpi_selector.c +++ b/src/smpi/colls/smpi_intel_mpi_selector.cpp @@ -15,7 +15,7 @@ #define INTEL_MAX_NB_PPN 5 /* 1 2 4 8 16 ppn */ typedef struct { - int max_size; + unsigned int max_size; int algo; } intel_tuning_table_size_element; @@ -2246,13 +2246,13 @@ intel_tuning_table_element intel_alltoallv_table[] = size_t block_dsize = total_message_size*smpi_datatype_size(recv_type); #define SIZECOMP_gather\ - int rank = smpi_comm_rank(comm);\ + int rank = comm->rank();\ size_t block_dsize = (send_buff == MPI_IN_PLACE || rank ==root) ?\ recv_count * smpi_datatype_size(recv_type) :\ send_count * smpi_datatype_size(send_type); #define SIZECOMP_scatter\ - int rank = smpi_comm_rank(comm);\ + int rank = comm->rank();\ size_t block_dsize = (sendbuf == MPI_IN_PLACE || rank !=root ) ?\ recvcount * smpi_datatype_size(recvtype) :\ sendcount * smpi_datatype_size(sendtype); @@ -2263,17 +2263,17 @@ intel_tuning_table_element intel_alltoallv_table[] = #define IMPI_COLL_SELECT(cat, ret, args, args2)\ ret smpi_coll_tuned_ ## cat ## _impi (COLL_UNPAREN args)\ {\ - int comm_size = smpi_comm_size(comm);\ + int comm_size = comm->size();\ int i =0;\ SIZECOMP_ ## cat\ i=0;\ int j =0, k=0;\ - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){\ - smpi_comm_init_smp(comm);\ + if(comm->get_leaders_comm()==MPI_COMM_NULL){\ + comm->init_smp();\ }\ int local_size=1;\ - if (smpi_comm_is_uniform(comm)) {\ - local_size = smpi_comm_size(smpi_comm_get_intra_comm(comm));\ + if (comm->is_uniform()) {\ + local_size = comm->get_intra_comm()->size();\ }\ while(i < INTEL_MAX_NB_PPN &&\ local_size!=intel_ ## cat ## _table[i].ppn)\ diff --git a/src/smpi/colls/smpi_mpich_selector.c b/src/smpi/colls/smpi_mpich_selector.cpp similarity index 98% rename from src/smpi/colls/smpi_mpich_selector.c rename to src/smpi/colls/smpi_mpich_selector.cpp index a43b6d8214..7e6aa5250b 100644 --- a/src/smpi/colls/smpi_mpich_selector.c +++ b/src/smpi/colls/smpi_mpich_selector.cpp @@ -60,7 +60,7 @@ int smpi_coll_tuned_allreduce_mpich(void *sbuf, void *rbuf, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) { size_t dsize, block_dsize; - int comm_size = smpi_comm_size(comm); + int comm_size = comm->size(); const size_t large_message = 2048; //MPIR_PARAM_ALLREDUCE_SHORT_MSG_SIZE dsize = smpi_datatype_size(dtype); @@ -140,10 +140,10 @@ int smpi_coll_tuned_alltoall_mpich( void *sbuf, int scount, { int communicator_size; size_t dsize, block_dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); - int short_size=256; - int medium_size=32768; + unsigned int short_size=256; + unsigned int medium_size=32768; //short size and comm_size >=8 -> bruck // medium size messages and (short messages for comm_size < 8), we @@ -257,7 +257,7 @@ int smpi_coll_tuned_bcast_mpich(void *buff, int count, //int segsize = 0; size_t message_size, dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* else we need data size for decision function */ dsize = smpi_datatype_size(datatype); @@ -348,7 +348,7 @@ int smpi_coll_tuned_reduce_mpich( void *sendbuf, void *recvbuf, int communicator_size=0; //int segsize = 0; size_t message_size, dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* need data size for decision function */ dsize=smpi_datatype_size(datatype); @@ -430,7 +430,7 @@ int smpi_coll_tuned_reduce_scatter_mpich( void *sbuf, void *rbuf, XBT_DEBUG("smpi_coll_tuned_reduce_scatter_mpich"); - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); // We need data size for decision function total_message_size = 0; for (i = 0; i < comm_size; i++) { @@ -521,7 +521,7 @@ int smpi_coll_tuned_allgather_mpich(void *sbuf, int scount, int communicator_size, pow2_size; size_t dsize, total_dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* Determine complete data size */ dsize=smpi_datatype_size(sdtype); @@ -600,7 +600,7 @@ int smpi_coll_tuned_allgatherv_mpich(void *sbuf, int scount, int communicator_size, pow2_size,i; size_t total_dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* Determine complete data size */ total_dsize = 0; @@ -689,7 +689,7 @@ int smpi_coll_tuned_scatter_mpich(void *sbuf, int scount, int root, MPI_Comm comm ) { - if(smpi_comm_rank(comm)!=root){ + if(comm->rank()!=root){ sbuf=xbt_malloc(rcount*smpi_datatype_get_extent(rdtype)); scount=rcount; sdtype=rdtype; @@ -697,7 +697,7 @@ int smpi_coll_tuned_scatter_mpich(void *sbuf, int scount, int ret= smpi_coll_tuned_scatter_ompi_binomial (sbuf, scount, sdtype, rbuf, rcount, rdtype, root, comm); - if(smpi_comm_rank(comm)!=root){ + if(comm->rank()!=root){ xbt_free(sbuf); } return ret; diff --git a/src/smpi/colls/smpi_mvapich2_selector.c b/src/smpi/colls/smpi_mvapich2_selector.cpp similarity index 95% rename from src/smpi/colls/smpi_mvapich2_selector.c rename to src/smpi/colls/smpi_mvapich2_selector.cpp index 9e64ac3456..4dc8c579c0 100644 --- a/src/smpi/colls/smpi_mvapich2_selector.c +++ b/src/smpi/colls/smpi_mvapich2_selector.cpp @@ -22,17 +22,17 @@ int smpi_coll_tuned_alltoall_mvapich2( void *sendbuf, int sendcount, if(mv2_alltoall_table_ppn_conf==NULL) init_mv2_alltoall_tables_stampede(); - int sendtype_size, recvtype_size, nbytes, comm_size; + int sendtype_size, recvtype_size, comm_size; char * tmp_buf = NULL; int mpi_errno=MPI_SUCCESS; int range = 0; int range_threshold = 0; int conf_index = 0; - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); sendtype_size=smpi_datatype_size(sendtype); recvtype_size=smpi_datatype_size(recvtype); - nbytes = sendtype_size * sendcount; + long nbytes = sendtype_size * sendcount; /* check if safe to use partial subscription mode */ @@ -88,7 +88,7 @@ int smpi_coll_tuned_allgather_mvapich2(void *sendbuf, int sendcount, MPI_Datatyp { int mpi_errno = MPI_SUCCESS; - int nbytes = 0, comm_size, recvtype_size; + long nbytes = 0, comm_size, recvtype_size; int range = 0; int partial_sub_ok = 0; int conf_index = 0; @@ -98,21 +98,21 @@ int smpi_coll_tuned_allgather_mvapich2(void *sendbuf, int sendcount, MPI_Datatyp MPI_Comm shmem_comm; //MPI_Comm *shmem_commptr=NULL; /* Get the size of the communicator */ - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); recvtype_size=smpi_datatype_size(recvtype); nbytes = recvtype_size * recvcount; if(mv2_allgather_table_ppn_conf==NULL) init_mv2_allgather_tables_stampede(); - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } int i; - if (smpi_comm_is_uniform(comm)){ - shmem_comm = smpi_comm_get_intra_comm(comm); - local_size = smpi_comm_size(shmem_comm); + if (comm->is_uniform()){ + shmem_comm = comm->get_intra_comm(); + local_size = shmem_comm->size(); i = 0; if (mv2_allgather_table_ppn_conf[0] == -1) { // Indicating user defined tuning @@ -158,7 +158,7 @@ int smpi_coll_tuned_allgather_mvapich2(void *sendbuf, int sendcount, MPI_Datatyp /* intracommunicator */ if(is_two_level ==1){ if(partial_sub_ok ==1){ - if (smpi_comm_is_blocked(comm)){ + if (comm->is_blocked()){ mpi_errno = MPIR_2lvl_Allgather_MV2(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); @@ -201,12 +201,12 @@ int smpi_coll_tuned_gather_mvapich2(void *sendbuf, int range = 0; int range_threshold = 0; int range_intra_threshold = 0; - int nbytes = 0; + long nbytes = 0; int comm_size = 0; int recvtype_size, sendtype_size; int rank = -1; - comm_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + comm_size = comm->size(); + rank = comm->rank(); if (rank == root) { recvtype_size=smpi_datatype_size(recvtype); @@ -239,7 +239,7 @@ int smpi_coll_tuned_gather_mvapich2(void *sendbuf, range_intra_threshold++; } - if (smpi_comm_is_blocked(comm) ) { + if (comm->is_blocked() ) { // Set intra-node function pt for gather_two_level MV2_Gather_intra_node_function = mv2_gather_thresholds_table[range].intra_node[range_intra_threshold]. @@ -271,12 +271,12 @@ int smpi_coll_tuned_allgatherv_mvapich2(void *sendbuf, int sendcount, MPI_Dataty int mpi_errno = MPI_SUCCESS; int range = 0, comm_size, total_count, recvtype_size, i; int range_threshold = 0; - int nbytes = 0; + long nbytes = 0; if(mv2_allgatherv_thresholds_table==NULL) init_mv2_allgatherv_tables_stampede(); - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); total_count = 0; for (i = 0; i < comm_size; i++) total_count += recvcounts[i]; @@ -341,8 +341,8 @@ int smpi_coll_tuned_allreduce_mvapich2(void *sendbuf, //int rank = 0, int comm_size = 0; - comm_size = smpi_comm_size(comm); - //rank = smpi_comm_rank(comm); + comm_size = comm->size(); + //rank = comm->rank(); if (count == 0) { return MPI_SUCCESS; @@ -354,7 +354,7 @@ int smpi_coll_tuned_allreduce_mvapich2(void *sendbuf, /* check if multiple threads are calling this collective function */ MPI_Aint sendtype_size = 0; - int nbytes = 0; + long nbytes = 0; int range = 0, range_threshold = 0, range_threshold_intra = 0; int is_two_level = 0; int is_commutative = 0; @@ -426,8 +426,8 @@ int smpi_coll_tuned_allreduce_mvapich2(void *sendbuf, if(is_two_level == 1){ // check if shm is ready, if not use other algorithm first if (is_commutative) { - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } mpi_errno = MPIR_Allreduce_two_level_MV2(sendbuf, recvbuf, count, datatype, op, comm); @@ -484,7 +484,7 @@ int smpi_coll_tuned_bcast_mvapich2(void *buffer, int mpi_errno = MPI_SUCCESS; int comm_size/*, rank*/; int two_level_bcast = 1; - size_t nbytes = 0; + long nbytes = 0; int range = 0; int range_threshold = 0; int range_threshold_intra = 0; @@ -497,13 +497,13 @@ int smpi_coll_tuned_bcast_mvapich2(void *buffer, if (count == 0) return MPI_SUCCESS; - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } if(!mv2_bcast_thresholds_table) init_mv2_bcast_tables_stampede(); - comm_size = smpi_comm_size(comm); - //rank = smpi_comm_rank(comm); + comm_size = comm->size(); + //rank = comm->rank(); is_contig=1; /* if (HANDLE_GET_KIND(datatype) == HANDLE_KIND_BUILTIN)*/ @@ -528,7 +528,7 @@ int smpi_coll_tuned_bcast_mvapich2(void *buffer, /* } else { MPIR_Pack_size_impl(1, datatype, &type_size); }*/ - nbytes = (size_t) (count) * (type_size); + nbytes = (count) * (type_size); /* Search for the corresponding system size inside the tuning table */ while ((range < (mv2_size_bcast_tuning_table - 1)) && @@ -625,7 +625,7 @@ int smpi_coll_tuned_bcast_mvapich2(void *buffer, } else #endif /* defined(CHANNEL_MRAIL_GEN2) */ { - shmem_comm = smpi_comm_get_intra_comm(comm); + shmem_comm = comm->get_intra_comm(); if (!is_contig || !is_homogeneous) { mpi_errno = MPIR_Bcast_tune_inter_node_helper_MV2(tmp_buf, nbytes, MPI_BYTE, @@ -693,11 +693,11 @@ int smpi_coll_tuned_reduce_mvapich2( void *sendbuf, int range_intra_threshold = 0; int is_commutative, pof2; int comm_size = 0; - int nbytes = 0; + long nbytes = 0; int sendtype_size; int is_two_level = 0; - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); sendtype_size=smpi_datatype_size(datatype); nbytes = count * sendtype_size; @@ -758,8 +758,8 @@ int smpi_coll_tuned_reduce_mvapich2( void *sendbuf, if(is_two_level == 1) { if (is_commutative == 1) { - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } mpi_errno = MPIR_Reduce_two_level_helper_MV2(sendbuf, recvbuf, count, datatype, op, root, comm); @@ -801,12 +801,12 @@ int smpi_coll_tuned_reduce_scatter_mvapich2(void *sendbuf, void *recvbuf, int *r MPI_Comm comm) { int mpi_errno = MPI_SUCCESS; - int i = 0, comm_size = smpi_comm_size(comm), total_count = 0, type_size = + int i = 0, comm_size = comm->size(), total_count = 0, type_size = 0, nbytes = 0; int range = 0; int range_threshold = 0; int is_commutative = 0; - int *disps = xbt_malloc(comm_size * sizeof (int)); + int *disps = static_cast(xbt_malloc(comm_size * sizeof (int))); if(mv2_red_scat_thresholds_table==NULL) init_mv2_reduce_scatter_tables_stampede(); @@ -893,13 +893,13 @@ int smpi_coll_tuned_scatter_mvapich2(void *sendbuf, if(mv2_scatter_thresholds_table==NULL) init_mv2_scatter_tables_stampede(); - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - smpi_comm_init_smp(comm); + if(comm->get_leaders_comm()==MPI_COMM_NULL){ + comm->init_smp(); } - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); - rank = smpi_comm_rank(comm); + rank = comm->rank(); if (rank == root) { sendtype_size=smpi_datatype_size(sendtype); @@ -910,10 +910,10 @@ int smpi_coll_tuned_scatter_mvapich2(void *sendbuf, } // check if safe to use partial subscription mode - if (smpi_comm_is_uniform(comm)) { + if (comm->is_uniform()) { - shmem_comm = smpi_comm_get_intra_comm(comm); - local_size = smpi_comm_size(shmem_comm); + shmem_comm = comm->get_intra_comm(); + local_size = shmem_comm->size(); i = 0; if (mv2_scatter_table_ppn_conf[0] == -1) { // Indicating user defined tuning @@ -982,7 +982,7 @@ int smpi_coll_tuned_scatter_mvapich2(void *sendbuf, if( (MV2_Scatter_function == &MPIR_Scatter_MV2_two_level_Direct) || (MV2_Scatter_function == &MPIR_Scatter_MV2_two_level_Binomial)) { - if( smpi_comm_is_blocked(comm)) { + if( comm->is_blocked()) { MV2_Scatter_intra_function = mv2_scatter_thresholds_table[conf_index][range].intra_node[range_threshold_intra] .MV2_pt_Scatter_function; diff --git a/src/smpi/colls/smpi_mvapich2_selector_stampede.h b/src/smpi/colls/smpi_mvapich2_selector_stampede.h index 6c5601e581..7245090080 100644 --- a/src/smpi/colls/smpi_mvapich2_selector_stampede.h +++ b/src/smpi/colls/smpi_mvapich2_selector_stampede.h @@ -51,13 +51,13 @@ static void init_mv2_alltoall_tables_stampede(){ mv2_alltoall_num_ppn_conf = 3; if(smpi_coll_cleanup_callback==NULL) smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; - mv2_alltoall_thresholds_table = xbt_malloc(sizeof(mv2_alltoall_tuning_table *) - * mv2_alltoall_num_ppn_conf); - table_ptrs = xbt_malloc(sizeof(mv2_alltoall_tuning_table *) - * mv2_alltoall_num_ppn_conf); - mv2_size_alltoall_tuning_table = xbt_malloc(sizeof(int) * - mv2_alltoall_num_ppn_conf); - mv2_alltoall_table_ppn_conf = xbt_malloc(mv2_alltoall_num_ppn_conf * sizeof(int)); + mv2_alltoall_thresholds_table = static_cast(xbt_malloc(sizeof(mv2_alltoall_tuning_table *) + * mv2_alltoall_num_ppn_conf)); + table_ptrs = static_cast(xbt_malloc(sizeof(mv2_alltoall_tuning_table *) + * mv2_alltoall_num_ppn_conf)); + mv2_size_alltoall_tuning_table = static_cast(xbt_malloc(sizeof(int) * + mv2_alltoall_num_ppn_conf)); + mv2_alltoall_table_ppn_conf = static_cast(xbt_malloc(mv2_alltoall_num_ppn_conf * sizeof(int))); mv2_alltoall_table_ppn_conf[0] = 1; mv2_size_alltoall_tuning_table[0] = 6; mv2_alltoall_tuning_table mv2_tmp_alltoall_thresholds_table_1ppn[] = { @@ -272,7 +272,7 @@ static void init_mv2_alltoall_tables_stampede(){ agg_table_sum += mv2_size_alltoall_tuning_table[i]; } mv2_alltoall_thresholds_table[0] = - xbt_malloc(agg_table_sum * sizeof (mv2_alltoall_tuning_table)); + static_cast(xbt_malloc(agg_table_sum * sizeof (mv2_alltoall_tuning_table))); memcpy(mv2_alltoall_thresholds_table[0], table_ptrs[0], (sizeof(mv2_alltoall_tuning_table) * mv2_size_alltoall_tuning_table[0])); @@ -346,14 +346,14 @@ static void init_mv2_allgather_tables_stampede(){ mv2_allgather_tuning_table **table_ptrs = NULL; mv2_allgather_num_ppn_conf = 3; mv2_allgather_thresholds_table - = xbt_malloc(sizeof(mv2_allgather_tuning_table *) - * mv2_allgather_num_ppn_conf); - table_ptrs = xbt_malloc(sizeof(mv2_allgather_tuning_table *) - * mv2_allgather_num_ppn_conf); - mv2_size_allgather_tuning_table = xbt_malloc(sizeof(int) * - mv2_allgather_num_ppn_conf); + = static_cast(xbt_malloc(sizeof(mv2_allgather_tuning_table *) + * mv2_allgather_num_ppn_conf)); + table_ptrs = static_cast(xbt_malloc(sizeof(mv2_allgather_tuning_table *) + * mv2_allgather_num_ppn_conf)); + mv2_size_allgather_tuning_table = static_cast(xbt_malloc(sizeof(int) * + mv2_allgather_num_ppn_conf)); mv2_allgather_table_ppn_conf - = xbt_malloc(mv2_allgather_num_ppn_conf * sizeof(int)); + = static_cast(xbt_malloc(mv2_allgather_num_ppn_conf * sizeof(int))); mv2_allgather_table_ppn_conf[0] = 1; mv2_size_allgather_tuning_table[0] = 6; mv2_allgather_tuning_table mv2_tmp_allgather_thresholds_table_1ppn[] = { @@ -541,7 +541,7 @@ static void init_mv2_allgather_tables_stampede(){ agg_table_sum += mv2_size_allgather_tuning_table[i]; } mv2_allgather_thresholds_table[0] = - xbt_malloc(agg_table_sum * sizeof (mv2_allgather_tuning_table)); + static_cast(xbt_malloc(agg_table_sum * sizeof (mv2_allgather_tuning_table))); memcpy(mv2_allgather_thresholds_table[0], table_ptrs[0], (sizeof(mv2_allgather_tuning_table) * mv2_size_allgather_tuning_table[0])); @@ -601,8 +601,8 @@ static void init_mv2_gather_tables_stampede(){ if(smpi_coll_cleanup_callback==NULL) smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_gather_tuning_table=7; - mv2_gather_thresholds_table = xbt_malloc(mv2_size_gather_tuning_table* - sizeof (mv2_gather_tuning_table)); + mv2_gather_thresholds_table = static_cast(xbt_malloc(mv2_size_gather_tuning_table* + sizeof (mv2_gather_tuning_table))); mv2_gather_tuning_table mv2_tmp_gather_thresholds_table[]={ {16, 2,{{0, 524288, &MPIR_Gather_MV2_Direct}, @@ -688,8 +688,8 @@ static void init_mv2_allgatherv_tables_stampede(){ if(smpi_coll_cleanup_callback==NULL) smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_allgatherv_tuning_table = 6; - mv2_allgatherv_thresholds_table = xbt_malloc(mv2_size_allgatherv_tuning_table * - sizeof (mv2_allgatherv_tuning_table)); + mv2_allgatherv_thresholds_table = static_cast(xbt_malloc(mv2_size_allgatherv_tuning_table * + sizeof (mv2_allgatherv_tuning_table))); mv2_allgatherv_tuning_table mv2_tmp_allgatherv_thresholds_table[] = { { 16, @@ -836,8 +836,8 @@ static void init_mv2_allreduce_tables_stampede(){ if(smpi_coll_cleanup_callback==NULL) smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_allreduce_tuning_table = 8; - mv2_allreduce_thresholds_table = xbt_malloc(mv2_size_allreduce_tuning_table * - sizeof (mv2_allreduce_tuning_table)); + mv2_allreduce_thresholds_table = static_cast(xbt_malloc(mv2_size_allreduce_tuning_table * + sizeof (mv2_allreduce_tuning_table))); mv2_allreduce_tuning_table mv2_tmp_allreduce_thresholds_table[] = { { 16, @@ -1036,8 +1036,8 @@ static void init_mv2_bcast_tables_stampede(){ if(smpi_coll_cleanup_callback==NULL) smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_bcast_tuning_table=8; - mv2_bcast_thresholds_table = xbt_malloc(mv2_size_bcast_tuning_table * - sizeof (mv2_bcast_tuning_table)); + mv2_bcast_thresholds_table = static_cast(xbt_malloc(mv2_size_bcast_tuning_table * + sizeof (mv2_bcast_tuning_table))); mv2_bcast_tuning_table mv2_tmp_bcast_thresholds_table[]={ { @@ -1305,8 +1305,8 @@ static void init_mv2_reduce_tables_stampede(){ smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; /*Stampede*/ mv2_size_reduce_tuning_table = 8; - mv2_reduce_thresholds_table = xbt_malloc(mv2_size_reduce_tuning_table * - sizeof (mv2_reduce_tuning_table)); + mv2_reduce_thresholds_table = static_cast(xbt_malloc(mv2_size_reduce_tuning_table * + sizeof (mv2_reduce_tuning_table))); mv2_reduce_tuning_table mv2_tmp_reduce_thresholds_table[] = { { 16, @@ -1548,8 +1548,8 @@ static void init_mv2_reduce_scatter_tables_stampede(){ if(smpi_coll_cleanup_callback==NULL) smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_red_scat_tuning_table = 6; - mv2_red_scat_thresholds_table = xbt_malloc(mv2_size_red_scat_tuning_table * - sizeof (mv2_red_scat_tuning_table)); + mv2_red_scat_thresholds_table = static_cast(xbt_malloc(mv2_size_red_scat_tuning_table * + sizeof (mv2_red_scat_tuning_table))); mv2_red_scat_tuning_table mv2_tmp_red_scat_thresholds_table[] = { { 16, @@ -1679,14 +1679,14 @@ static void init_mv2_scatter_tables_stampede(){ mv2_scatter_tuning_table **table_ptrs = NULL; mv2_scatter_num_ppn_conf = 3; mv2_scatter_thresholds_table - = xbt_malloc(sizeof(mv2_scatter_tuning_table *) - * mv2_scatter_num_ppn_conf); - table_ptrs = xbt_malloc(sizeof(mv2_scatter_tuning_table *) - * mv2_scatter_num_ppn_conf); - mv2_size_scatter_tuning_table = xbt_malloc(sizeof(int) * - mv2_scatter_num_ppn_conf); + = static_cast(xbt_malloc(sizeof(mv2_scatter_tuning_table *) + * mv2_scatter_num_ppn_conf)); + table_ptrs = static_cast(xbt_malloc(sizeof(mv2_scatter_tuning_table *) + * mv2_scatter_num_ppn_conf)); + mv2_size_scatter_tuning_table = static_cast(xbt_malloc(sizeof(int) * + mv2_scatter_num_ppn_conf)); mv2_scatter_table_ppn_conf - = xbt_malloc(mv2_scatter_num_ppn_conf * sizeof(int)); + = static_cast(xbt_malloc(mv2_scatter_num_ppn_conf * sizeof(int))); mv2_scatter_table_ppn_conf[0] = 1; mv2_size_scatter_tuning_table[0] = 6; mv2_scatter_tuning_table mv2_tmp_scatter_thresholds_table_1ppn[] = { @@ -1967,7 +1967,7 @@ static void init_mv2_scatter_tables_stampede(){ agg_table_sum += mv2_size_scatter_tuning_table[i]; } mv2_scatter_thresholds_table[0] = - xbt_malloc(agg_table_sum * sizeof (mv2_scatter_tuning_table)); + static_cast(xbt_malloc(agg_table_sum * sizeof (mv2_scatter_tuning_table))); memcpy(mv2_scatter_thresholds_table[0], table_ptrs[0], (sizeof(mv2_scatter_tuning_table) * mv2_size_scatter_tuning_table[0])); diff --git a/src/smpi/colls/smpi_openmpi_selector.c b/src/smpi/colls/smpi_openmpi_selector.cpp similarity index 98% rename from src/smpi/colls/smpi_openmpi_selector.c rename to src/smpi/colls/smpi_openmpi_selector.cpp index b0fb6662dc..a0acf0c44e 100644 --- a/src/smpi/colls/smpi_openmpi_selector.c +++ b/src/smpi/colls/smpi_openmpi_selector.cpp @@ -13,7 +13,7 @@ int smpi_coll_tuned_allreduce_ompi(void *sbuf, void *rbuf, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) { size_t dsize, block_dsize; - int comm_size = smpi_comm_size(comm); + int comm_size = comm->size(); const size_t intermediate_message = 10000; /** @@ -61,7 +61,7 @@ int smpi_coll_tuned_alltoall_ompi( void *sbuf, int scount, { int communicator_size; size_t dsize, block_dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* Decision function based on measurement on Grig cluster at the University of Tennessee (2GB MX) up to 64 nodes. @@ -101,7 +101,7 @@ int smpi_coll_tuned_alltoallv_ompi(void *sbuf, int *scounts, int *sdisps, int smpi_coll_tuned_barrier_ompi(MPI_Comm comm) -{ int communicator_size = smpi_comm_size(comm); +{ int communicator_size = comm->size(); if( 2 == communicator_size ) return smpi_coll_tuned_barrier_ompi_two_procs(comm); @@ -141,7 +141,7 @@ int smpi_coll_tuned_bcast_ompi(void *buff, int count, //int segsize = 0; size_t message_size, dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* else we need data size for decision function */ dsize = smpi_datatype_size(datatype); @@ -243,7 +243,7 @@ int smpi_coll_tuned_reduce_ompi( void *sendbuf, void *recvbuf, /* no limit on # of outstanding requests */ //const int max_requests = 0; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* need data size for decision function */ dsize=smpi_datatype_size(datatype); @@ -342,7 +342,7 @@ int smpi_coll_tuned_reduce_scatter_ompi( void *sbuf, void *rbuf, XBT_DEBUG("smpi_coll_tuned_reduce_scatter_ompi"); - comm_size = smpi_comm_size(comm); + comm_size = comm->size(); // We need data size for decision function dsize=smpi_datatype_size(dtype); total_message_size = 0; @@ -391,7 +391,7 @@ int smpi_coll_tuned_allgather_ompi(void *sbuf, int scount, int communicator_size, pow2_size; size_t dsize, total_dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* Special case for 2 processes */ if (communicator_size == 2) { @@ -473,7 +473,7 @@ int smpi_coll_tuned_allgatherv_ompi(void *sbuf, int scount, int communicator_size; size_t dsize, total_dsize; - communicator_size = smpi_comm_size(comm); + communicator_size = comm->size(); /* Special case for 2 processes */ if (communicator_size == 2) { @@ -534,8 +534,8 @@ int smpi_coll_tuned_gather_ompi(void *sbuf, int scount, XBT_DEBUG("smpi_coll_tuned_gather_ompi"); - communicator_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + communicator_size = comm->size(); + rank = comm->rank(); // Determine block size if (rank == root) { @@ -584,8 +584,8 @@ int smpi_coll_tuned_scatter_ompi(void *sbuf, int scount, XBT_DEBUG("smpi_coll_tuned_scatter_ompi"); - communicator_size = smpi_comm_size(comm); - rank = smpi_comm_rank(comm); + communicator_size = comm->size(); + rank = comm->rank(); // Determine block size if (root == rank) { dsize=smpi_datatype_size(sdtype); diff --git a/src/smpi/private.h b/src/smpi/private.h index 30d679f891..d600eb7ad3 100644 --- a/src/smpi/private.h +++ b/src/smpi/private.h @@ -9,6 +9,8 @@ #include "simgrid/simix.h" #include "smpi/smpi.h" +#include "src/smpi/smpi_group.hpp" +#include "src/smpi/smpi_comm.hpp" #include "src/include/smpi/smpi_interface.h" #include "src/instr/instr_private.h" #include "src/internal_config.h" @@ -186,7 +188,7 @@ XBT_PRIVATE bool smpi_process_get_replaying(); XBT_PRIVATE void smpi_deployment_register_process(const char* instance_id, int rank, int index, MPI_Comm** comm, msg_bar_t* bar); XBT_PRIVATE void smpi_deployment_cleanup_instances(); - + XBT_PRIVATE void smpi_comm_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size); XBT_PRIVATE void smpi_comm_null_copy_buffer_callback(smx_activity_t comm, void *buff, size_t buff_size); @@ -245,40 +247,6 @@ XBT_PRIVATE void smpi_op_destroy(MPI_Op op); XBT_PRIVATE void smpi_op_set_fortran(MPI_Op op); XBT_PRIVATE void smpi_op_apply(MPI_Op op, void *invec, void *inoutvec, int *len, MPI_Datatype * datatype); -XBT_PRIVATE MPI_Group smpi_group_new(int size); -XBT_PRIVATE MPI_Group smpi_group_copy(MPI_Group origin); -XBT_PRIVATE void smpi_group_destroy(MPI_Group group); -XBT_PRIVATE void smpi_group_set_mapping(MPI_Group group, int index, int rank); -XBT_PRIVATE int smpi_group_index(MPI_Group group, int rank); -XBT_PRIVATE int smpi_group_rank(MPI_Group group, int index); -XBT_PRIVATE int smpi_group_use(MPI_Group group); -XBT_PRIVATE int smpi_group_unuse(MPI_Group group); -XBT_PRIVATE int smpi_group_size(MPI_Group group); -XBT_PRIVATE int smpi_group_compare(MPI_Group group1, MPI_Group group2); -XBT_PRIVATE int smpi_group_incl(MPI_Group group, int n, int* ranks, MPI_Group* newgroup); - -XBT_PRIVATE MPI_Topology smpi_comm_topo(MPI_Comm comm); -XBT_PRIVATE MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo); -XBT_PRIVATE void smpi_comm_destroy(MPI_Comm comm); -XBT_PRIVATE MPI_Group smpi_comm_group(MPI_Comm comm); -XBT_PRIVATE int smpi_comm_size(MPI_Comm comm); -XBT_PRIVATE void smpi_comm_get_name(MPI_Comm comm, char* name, int* len); -XBT_PRIVATE int smpi_comm_rank(MPI_Comm comm); -XBT_PRIVATE MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key); -XBT_PRIVATE int smpi_comm_dup(MPI_Comm comm, MPI_Comm* newcomm); -XBT_PRIVATE void smpi_comm_use(MPI_Comm comm); -XBT_PRIVATE void smpi_comm_unuse(MPI_Comm comm); -XBT_PRIVATE void smpi_comm_cleanup_attributes(MPI_Comm comm); -XBT_PRIVATE void smpi_comm_cleanup_smp(MPI_Comm comm); -XBT_PRIVATE void smpi_comm_set_leaders_comm(MPI_Comm comm, MPI_Comm leaders); -XBT_PRIVATE void smpi_comm_set_intra_comm(MPI_Comm comm, MPI_Comm leaders); -XBT_PRIVATE int* smpi_comm_get_non_uniform_map(MPI_Comm comm); -XBT_PRIVATE int* smpi_comm_get_leaders_map(MPI_Comm comm); -XBT_PRIVATE MPI_Comm smpi_comm_get_leaders_comm(MPI_Comm comm); -XBT_PRIVATE MPI_Comm smpi_comm_get_intra_comm(MPI_Comm comm); -XBT_PRIVATE int smpi_comm_is_uniform(MPI_Comm comm); -XBT_PRIVATE int smpi_comm_is_blocked(MPI_Comm comm); -XBT_PRIVATE void smpi_comm_init_smp(MPI_Comm comm); XBT_PRIVATE int smpi_comm_c2f(MPI_Comm comm); XBT_PRIVATE int smpi_comm_add_f(MPI_Comm comm); @@ -396,13 +364,10 @@ XBT_PRIVATE int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcou void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm); XBT_PRIVATE int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts, int *senddisps, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *recvdisps, MPI_Datatype recvtype, MPI_Comm comm); - XBT_PRIVATE int smpi_comm_keyval_create(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, void* extra_state); XBT_PRIVATE int smpi_comm_keyval_free(int* keyval); -XBT_PRIVATE int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag); -XBT_PRIVATE int smpi_comm_attr_delete(MPI_Comm comm, int keyval); -XBT_PRIVATE int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value); + XBT_PRIVATE int smpi_type_attr_delete(MPI_Datatype type, int keyval); XBT_PRIVATE int smpi_type_attr_get(MPI_Datatype type, int keyval, void* attr_value, int* flag); XBT_PRIVATE int smpi_type_attr_put(MPI_Datatype type, int keyval, void* attr_value); @@ -434,9 +399,6 @@ XBT_PRIVATE void* smpi_get_tmp_sendbuffer(int size); XBT_PRIVATE void* smpi_get_tmp_recvbuffer(int size); XBT_PRIVATE void smpi_free_tmp_buffer(void* buf); -XBT_PRIVATE int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag); -XBT_PRIVATE XBT_PRIVATE int smpi_comm_attr_delete(MPI_Comm comm, int keyval); -XBT_PRIVATE int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value); // f77 wrappers void mpi_init_(int* ierr); diff --git a/src/smpi/smpi_base.cpp b/src/smpi/smpi_base.cpp index b64113ca98..7201d37d2f 100644 --- a/src/smpi/smpi_base.cpp +++ b/src/smpi/smpi_base.cpp @@ -218,7 +218,7 @@ static MPI_Request build_request(void *buf, int count, MPI_Datatype datatype, in request->dst = dst; request->tag = tag; request->comm = comm; - smpi_comm_use(request->comm); + request->comm->use(); request->action = nullptr; request->flags = flags; request->detached = 0; @@ -259,7 +259,7 @@ MPI_Request smpi_mpi_send_init(void *buf, int count, MPI_Datatype datatype, int { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SEND | PREPARED); + comm->group()->index(dst), tag, comm, PERSISTENT | SEND | PREPARED); return request; } @@ -267,7 +267,7 @@ MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype, int { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(comm), dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED); + comm->group()->index(dst), tag, comm, PERSISTENT | SSEND | SEND | PREPARED); return request; } @@ -275,7 +275,7 @@ MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype, int { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, - src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : smpi_group_index(smpi_comm_group(comm), src), + src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : comm->group()->index(src), smpi_process_index(), tag, comm, PERSISTENT | RECV | PREPARED); return request; } @@ -459,7 +459,7 @@ void smpi_mpi_request_free(MPI_Request * request) if((*request)->refcount==0){ smpi_datatype_unuse((*request)->old_type); - smpi_comm_unuse((*request)->comm); + (*request)->comm->unuse(); print_request("Destroying", (*request)); xbt_free(*request); *request = MPI_REQUEST_NULL; @@ -505,7 +505,7 @@ MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype, int dst { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf , count, datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(comm), dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED); + comm->group()->index(dst), tag,comm, PERSISTENT | ISEND | SEND | PREPARED); return request; } @@ -513,7 +513,7 @@ MPI_Request smpi_mpi_isend(void *buf, int count, MPI_Datatype datatype, int dst, { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | ISEND | SEND); + comm->group()->index(dst), tag, comm, NON_PERSISTENT | ISEND | SEND); smpi_mpi_start(request); return request; } @@ -522,7 +522,7 @@ MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype, int dst { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(comm), dst), tag,comm, NON_PERSISTENT | ISEND | SSEND | SEND); + comm->group()->index(dst), tag,comm, NON_PERSISTENT | ISEND | SSEND | SEND); smpi_mpi_start(request); return request; } @@ -531,7 +531,7 @@ MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype, int src { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : - smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag, + comm->group()->index(src), smpi_process_index(), tag, comm, PERSISTENT | RECV | PREPARED); return request; } @@ -540,7 +540,7 @@ MPI_Request smpi_mpi_irecv(void *buf, int count, MPI_Datatype datatype, int src, { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, src == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : - smpi_group_index(smpi_comm_group(comm), src), smpi_process_index(), tag, comm, + comm->group()->index(src), smpi_process_index(), tag, comm, NON_PERSISTENT | RECV); smpi_mpi_start(request); return request; @@ -558,7 +558,7 @@ void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst, int tag { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SEND); + comm->group()->index(dst), tag, comm, NON_PERSISTENT | SEND); smpi_mpi_start(request); smpi_mpi_wait(&request, MPI_STATUS_IGNORE); @@ -569,7 +569,7 @@ void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype, int dst, int ta { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ request = build_request(buf==MPI_BOTTOM ? nullptr : buf, count, datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(comm), dst), tag, comm, NON_PERSISTENT | SSEND | SEND); + comm->group()->index(dst), tag, comm, NON_PERSISTENT | SSEND | SEND); smpi_mpi_start(request); smpi_mpi_wait(&request, MPI_STATUS_IGNORE); @@ -583,7 +583,7 @@ void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,int d MPI_Request requests[2]; MPI_Status stats[2]; int myid=smpi_process_index(); - if ((smpi_group_index(smpi_comm_group(comm), dst) == myid) && (smpi_group_index(smpi_comm_group(comm), src) == myid)){ + if ((comm->group()->index(dst) == myid) && (comm->group()->index(src) == myid)){ smpi_datatype_copy(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype); return; } @@ -612,7 +612,7 @@ static void finish_wait(MPI_Request * request, MPI_Status * status) if(!((req->detached != 0) && ((req->flags & SEND) != 0)) && ((req->flags & PREPARED) == 0)){ if(status != MPI_STATUS_IGNORE) { int src = req->src == MPI_ANY_SOURCE ? req->real_src : req->src; - status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(req->comm), src); + status->MPI_SOURCE = req->comm->group()->rank(src); status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag; status->MPI_ERROR = req->truncated != 0 ? MPI_ERR_TRUNCATE : MPI_SUCCESS; // this handles the case were size in receive differs from size in send @@ -775,7 +775,7 @@ void smpi_mpi_probe(int source, int tag, MPI_Comm comm, MPI_Status* status){ void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status){ MPI_Request request = build_request(nullptr, 0, MPI_CHAR, source == MPI_ANY_SOURCE ? MPI_ANY_SOURCE : - smpi_group_index(smpi_comm_group(comm), source), smpi_comm_rank(comm), tag, comm, PERSISTENT | RECV); + comm->group()->index(source), comm->rank(), tag, comm, PERSISTENT | RECV); // to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls // (especially when used as a break condition, such as while(MPI_Iprobe(...)) ... ) @@ -812,7 +812,7 @@ void smpi_mpi_iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* MPI_Request req = static_cast(sync_comm->src_data); *flag = 1; if(status != MPI_STATUS_IGNORE && (req->flags & PREPARED) == 0) { - status->MPI_SOURCE = smpi_group_rank(smpi_comm_group(comm), req->src); + status->MPI_SOURCE = comm->group()->rank(req->src); status->MPI_TAG = req->tag; status->MPI_ERROR = MPI_SUCCESS; status->count = req->real_size; @@ -1030,8 +1030,8 @@ void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype, MPI_Aint lb = 0; MPI_Aint recvext = 0; - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); if(rank != root) { // Send buffer to root smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm); @@ -1063,10 +1063,10 @@ void smpi_mpi_gather(void *sendbuf, int sendcount, MPI_Datatype sendtype, void smpi_mpi_reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { - int rank = smpi_comm_rank(comm); + int rank = comm->rank(); /* arbitrarily choose root as rank 0 */ - int size = smpi_comm_size(comm); + int size = comm->size(); int count = 0; int *displs = xbt_new(int, size); for (int i = 0; i < size; i++) { @@ -1088,8 +1088,8 @@ void smpi_mpi_gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void MPI_Aint lb = 0; MPI_Aint recvext = 0; - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); if (rank != root) { // Send buffer to root smpi_mpi_send(sendbuf, sendcount, sendtype, root, system_tag, comm); @@ -1126,8 +1126,8 @@ void smpi_mpi_allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, MPI_Aint recvext = 0; MPI_Request *requests; - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); // FIXME: check for errors smpi_datatype_extent(recvtype, &lb, &recvext); // Local copy from self @@ -1161,8 +1161,8 @@ void smpi_mpi_allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, vo MPI_Aint lb = 0; MPI_Aint recvext = 0; - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); smpi_datatype_extent(recvtype, &lb, &recvext); // Local copy from self smpi_datatype_copy(sendbuf, sendcount, sendtype, @@ -1197,8 +1197,8 @@ void smpi_mpi_scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, MPI_Aint sendext = 0; MPI_Request *requests; - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); if(rank != root) { // Recv buffer from root smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE); @@ -1236,8 +1236,8 @@ void smpi_mpi_scatterv(void *sendbuf, int *sendcounts, int *displs, MPI_Datatype MPI_Aint lb = 0; MPI_Aint sendext = 0; - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); if(rank != root) { // Recv buffer from root smpi_mpi_recv(recvbuf, recvcount, recvtype, root, system_tag, comm, MPI_STATUS_IGNORE); @@ -1277,8 +1277,8 @@ void smpi_mpi_reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat char* sendtmpbuf = static_cast(sendbuf); - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); //non commutative case, use a working algo from openmpi if(!smpi_op_is_commute(op)){ smpi_coll_tuned_reduce_ompi_basic_linear(sendtmpbuf, recvbuf, count, datatype, op, root, comm); @@ -1350,8 +1350,8 @@ void smpi_mpi_scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatyp MPI_Aint lb = 0; MPI_Aint dataext = 0; - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); smpi_datatype_extent(datatype, &lb, &dataext); @@ -1410,8 +1410,8 @@ void smpi_mpi_exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datat MPI_Aint lb = 0; MPI_Aint dataext = 0; int recvbuf_is_empty=1; - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); smpi_datatype_extent(datatype, &lb, &dataext); diff --git a/src/smpi/smpi_bench.cpp b/src/smpi/smpi_bench.cpp index 7873514ec5..db2431f9e7 100644 --- a/src/smpi/smpi_bench.cpp +++ b/src/smpi/smpi_bench.cpp @@ -340,7 +340,7 @@ static unsigned int private_sleep(double secs) smpi_bench_end(); XBT_DEBUG("Sleep for: %lf secs", secs); - int rank = smpi_comm_rank(MPI_COMM_WORLD); + int rank = MPI_COMM_WORLD->rank(); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type=TRACING_SLEEPING; extra->sleep_duration=secs; diff --git a/src/smpi/smpi_coll.cpp b/src/smpi/smpi_coll.cpp index e6fae05f0e..1fbc744d2a 100644 --- a/src/smpi/smpi_coll.cpp +++ b/src/smpi/smpi_coll.cpp @@ -125,7 +125,7 @@ void (*smpi_coll_cleanup_callback)(); int smpi_coll_tuned_alltoall_ompi2(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) { - int size = smpi_comm_size(comm); + int size = comm->size(); int sendsize = smpi_datatype_size(sendtype) * sendcount; if (sendsize < 200 && size > 12) { return smpi_coll_tuned_alltoall_bruck(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); @@ -154,8 +154,8 @@ int smpi_coll_tuned_alltoall_bruck(void *sendbuf, int sendcount, MPI_Datatype se MPI_Request *requests; // FIXME: check implementation - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); XBT_DEBUG("<%d> algorithm alltoall_bruck() called.", rank); smpi_datatype_extent(sendtype, &lb, &sendext); smpi_datatype_extent(recvtype, &lb, &recvext); @@ -212,8 +212,8 @@ int smpi_coll_tuned_alltoall_basic_linear(void *sendbuf, int sendcount, MPI_Data MPI_Request *requests; /* Initialize. */ - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); XBT_DEBUG("<%d> algorithm alltoall_basic_linear() called.", rank); smpi_datatype_extent(sendtype, &lb, &sendext); smpi_datatype_extent(recvtype, &lb, &recvext); @@ -265,8 +265,8 @@ int smpi_coll_basic_alltoallv(void *sendbuf, int *sendcounts, int *senddisps, MP MPI_Request *requests; /* Initialize. */ - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + int rank = comm->rank(); + int size = comm->size(); XBT_DEBUG("<%d> algorithm basic_alltoallv() called.", rank); smpi_datatype_extent(sendtype, &lb, &sendext); smpi_datatype_extent(recvtype, &lb, &recvext); diff --git a/src/smpi/smpi_comm.cpp b/src/smpi/smpi_comm.cpp index d3657510d0..58748d92b1 100644 --- a/src/smpi/smpi_comm.cpp +++ b/src/smpi/smpi_comm.cpp @@ -26,21 +26,6 @@ int comm_keyval_id = 0;//avoid collisions /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to * support them, we have to add a field MPIR_Topo_type, and replace the MPI_Topology field by an union. */ -typedef struct s_smpi_mpi_communicator { - MPI_Group group; - MPIR_Topo_type topoType; - MPI_Topology topo; // to be replaced by an union - int refcount; - MPI_Comm leaders_comm;//inter-node communicator - MPI_Comm intra_comm;//intra-node communicator . For MPI_COMM_WORLD this can't be used, as var is global. - //use an intracomm stored in the process data instead - int* leaders_map; //who is the leader of each process - int is_uniform; - int* non_uniform_map; //set if smp nodes have a different number of processes allocated - int is_blocked;// are ranks allocated on the same smp node contiguous ? - xbt_dict_t attributes; -} s_smpi_mpi_communicator_t; - static int smpi_compare_rankmap(const void *a, const void *b) { const int* x = static_cast(a); @@ -61,164 +46,163 @@ static int smpi_compare_rankmap(const void *a, const void *b) return 1; } -MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo) +namespace simgrid{ +namespace SMPI{ + +Comm::Comm(MPI_Group group, MPI_Topology topo) { - MPI_Comm comm; - - comm = xbt_new(s_smpi_mpi_communicator_t, 1); - comm->group = group; - comm->refcount=1; - comm->topoType = MPI_INVALID_TOPO; - comm->topo = topo; - comm->intra_comm = MPI_COMM_NULL; - comm->leaders_comm = MPI_COMM_NULL; - comm->is_uniform=1; - comm->non_uniform_map = nullptr; - comm->leaders_map = nullptr; - comm->is_blocked=0; - comm->attributes=nullptr; - return comm; + m_group = group; + m_refcount=1; + m_topoType = MPI_INVALID_TOPO; + m_topo = topo; + m_intra_comm = MPI_COMM_NULL; + m_leaders_comm = MPI_COMM_NULL; + m_is_uniform=1; + m_non_uniform_map = nullptr; + m_leaders_map = nullptr; + m_is_blocked=0; + m_attributes=nullptr; } -void smpi_comm_destroy(MPI_Comm comm) +void Comm::destroy() { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - smpi_topo_destroy(comm->topo); // there's no use count on topos - smpi_comm_unuse(comm); + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->destroy(); + smpi_topo_destroy(m_topo); // there's no use count on topos + this->unuse(); } -int smpi_comm_dup(MPI_Comm comm, MPI_Comm* newcomm){ +int Comm::dup(MPI_Comm* newcomm){ if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables smpi_switch_data_segment(smpi_process_index()); } - MPI_Group cp=smpi_group_copy(smpi_comm_group(comm)); - (*newcomm) = smpi_comm_new(cp, smpi_comm_topo(comm)); + MPI_Group cp = new simgrid::SMPI::Group(this->group()); + (*newcomm) = new simgrid::SMPI::Comm(cp, this->topo()); int ret = MPI_SUCCESS; - if(comm->attributes !=nullptr){ - (*newcomm)->attributes = xbt_dict_new_homogeneous(nullptr); + if(m_attributes !=nullptr){ + (*newcomm)->m_attributes = xbt_dict_new_homogeneous(nullptr); xbt_dict_cursor_t cursor = nullptr; char* key; int flag; void* value_in; void* value_out; - xbt_dict_foreach (comm->attributes, cursor, key, value_in) { + xbt_dict_foreach (m_attributes, cursor, key, value_in) { smpi_comm_key_elem elem = static_cast(xbt_dict_get_or_null_ext(smpi_comm_keyvals, key, sizeof(int))); if (elem != nullptr && elem->copy_fn != MPI_NULL_COPY_FN) { - ret = elem->copy_fn(comm, atoi(key), nullptr, value_in, &value_out, &flag); + ret = elem->copy_fn(this, atoi(key), nullptr, value_in, &value_out, &flag); if (ret != MPI_SUCCESS) { - smpi_comm_destroy(*newcomm); + (*newcomm)->destroy(); *newcomm = MPI_COMM_NULL; xbt_dict_cursor_free(&cursor); return ret; } if (flag) - xbt_dict_set_ext((*newcomm)->attributes, key, sizeof(int), value_out, nullptr); + xbt_dict_set_ext((*newcomm)->m_attributes, key, sizeof(int), value_out, nullptr); } } } return ret; } -MPI_Group smpi_comm_group(MPI_Comm comm) +MPI_Group Comm::group() { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->group; + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->group(); + return m_group; } -MPI_Topology smpi_comm_topo(MPI_Comm comm) { - if (comm != MPI_COMM_NULL) - return comm->topo; +MPI_Topology Comm::topo() { + if (this != MPI_COMM_NULL) + return m_topo; return nullptr; } -int smpi_comm_size(MPI_Comm comm) +int Comm::size() { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return smpi_group_size(smpi_comm_group(comm)); + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->size(); + return m_group->size(); } -int smpi_comm_rank(MPI_Comm comm) +int Comm::rank() { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return smpi_group_rank(smpi_comm_group(comm), smpi_process_index()); + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->rank(); + return m_group->rank(smpi_process_index()); } -void smpi_comm_get_name (MPI_Comm comm, char* name, int* len) +void Comm::get_name (char* name, int* len) { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - if(comm == MPI_COMM_WORLD) { + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->get_name(name, len); + if(this == MPI_COMM_WORLD) { strncpy(name, "WORLD",5); *len = 5; } else { - *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", comm); + *len = snprintf(name, MPI_MAX_NAME_STRING, "%p", this); } } -void smpi_comm_set_leaders_comm(MPI_Comm comm, MPI_Comm leaders){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - comm->leaders_comm=leaders; +void Comm::set_leaders_comm(MPI_Comm leaders){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->set_leaders_comm(leaders); + m_leaders_comm=leaders; } -void smpi_comm_set_intra_comm(MPI_Comm comm, MPI_Comm leaders){ - comm->intra_comm=leaders; +void Comm::set_intra_comm(MPI_Comm leaders){ + m_intra_comm=leaders; } -int* smpi_comm_get_non_uniform_map(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->non_uniform_map; +int* Comm::get_non_uniform_map(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->get_non_uniform_map(); + return m_non_uniform_map; } -int* smpi_comm_get_leaders_map(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->leaders_map; +int* Comm::get_leaders_map(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->get_leaders_map(); + return m_leaders_map; } -MPI_Comm smpi_comm_get_leaders_comm(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->leaders_comm; +MPI_Comm Comm::get_leaders_comm(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->get_leaders_comm(); + return m_leaders_comm; } -MPI_Comm smpi_comm_get_intra_comm(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD) +MPI_Comm Comm::get_intra_comm(){ + if (this == MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD) return smpi_process_get_comm_intra(); - else return comm->intra_comm; + else return m_intra_comm; } -int smpi_comm_is_uniform(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->is_uniform; +int Comm::is_uniform(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->is_uniform(); + return m_is_uniform; } -int smpi_comm_is_blocked(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - return comm->is_blocked; +int Comm::is_blocked(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->is_blocked(); + return m_is_blocked; } -MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key) +MPI_Comm Comm::split(int color, int key) { - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->split(color, key); int system_tag = 123; int* recvbuf; MPI_Group group_root = nullptr; MPI_Group group_out = nullptr; - MPI_Group group = smpi_comm_group(comm); - int rank = smpi_comm_rank(comm); - int size = smpi_comm_size(comm); + MPI_Group group = this->group(); + int rank = this->rank(); + int size = this->size(); /* Gather all colors and keys on rank 0 */ int* sendbuf = xbt_new(int, 2); sendbuf[0] = color; @@ -228,7 +212,7 @@ MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key) } else { recvbuf = nullptr; } - smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, comm); + smpi_mpi_gather(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0, this); xbt_free(sendbuf); /* Do the actual job */ if(rank == 0) { @@ -251,25 +235,25 @@ MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key) rankmap[2 * count + 1] = recvbuf[2 * i + 1]; count++; qsort(rankmap, count, 2 * sizeof(int), &smpi_compare_rankmap); - group_out = smpi_group_new(count); + group_out = new simgrid::SMPI::Group(count); if (i == 0) { group_root = group_out; /* Save root's group */ } for (int j = 0; j < count; j++) { - int index = smpi_group_index(group, rankmap[2 * j]); - smpi_group_set_mapping(group_out, index, j); + int index = group->index(rankmap[2 * j]); + group_out->set_mapping(index, j); } MPI_Request* requests = xbt_new(MPI_Request, count); int reqs = 0; for (int j = 0; j < count; j++) { if(rankmap[2 * j] != 0) { - group_snd[reqs]=smpi_group_copy(group_out); - requests[reqs] = smpi_mpi_isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, comm); + group_snd[reqs]=new simgrid::SMPI::Group(group_out); + requests[reqs] = smpi_mpi_isend(&(group_snd[reqs]), 1, MPI_PTR, rankmap[2 * j], system_tag, this); reqs++; } } if(i != 0) { - smpi_group_destroy(group_out); + group_out->destroy(); } smpi_mpi_waitall(reqs, requests, MPI_STATUS_IGNORE); xbt_free(requests); @@ -281,55 +265,55 @@ MPI_Comm smpi_comm_split(MPI_Comm comm, int color, int key) group_out = group_root; /* exit with root's group */ } else { if(color != MPI_UNDEFINED) { - smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, comm, MPI_STATUS_IGNORE); + smpi_mpi_recv(&group_out, 1, MPI_PTR, 0, system_tag, this, MPI_STATUS_IGNORE); } /* otherwise, exit with group_out == nullptr */ } - return group_out!=nullptr ? smpi_comm_new(group_out, nullptr) : MPI_COMM_NULL; + return group_out!=nullptr ? new simgrid::SMPI::Comm(group_out, nullptr) : MPI_COMM_NULL; } -void smpi_comm_use(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - smpi_group_use(comm->group); - comm->refcount++; +void Comm::use(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->use(); + m_group->use(); + m_refcount++; } -void smpi_comm_cleanup_attributes(MPI_Comm comm){ - if(comm->attributes !=nullptr){ +void Comm::cleanup_attributes(){ + if(m_attributes !=nullptr){ xbt_dict_cursor_t cursor = nullptr; char* key; void* value; int flag; - xbt_dict_foreach (comm->attributes, cursor, key, value) { + xbt_dict_foreach (m_attributes, cursor, key, value) { smpi_comm_key_elem elem = static_cast(xbt_dict_get_or_null(smpi_comm_keyvals, key)); if (elem != nullptr && elem->delete_fn != nullptr) - elem->delete_fn(comm, atoi(key), value, &flag); + elem->delete_fn(this, atoi(key), value, &flag); } - xbt_dict_free(&comm->attributes); + xbt_dict_free(&m_attributes); } } -void smpi_comm_cleanup_smp(MPI_Comm comm){ - if (comm->intra_comm != MPI_COMM_NULL) - smpi_comm_unuse(comm->intra_comm); - if (comm->leaders_comm != MPI_COMM_NULL) - smpi_comm_unuse(comm->leaders_comm); - if (comm->non_uniform_map != nullptr) - xbt_free(comm->non_uniform_map); - if (comm->leaders_map != nullptr) - xbt_free(comm->leaders_map); +void Comm::cleanup_smp(){ + if (m_intra_comm != MPI_COMM_NULL) + m_intra_comm->unuse(); + if (m_leaders_comm != MPI_COMM_NULL) + m_leaders_comm->unuse(); + if (m_non_uniform_map != nullptr) + xbt_free(m_non_uniform_map); + if (m_leaders_map != nullptr) + xbt_free(m_leaders_map); } -void smpi_comm_unuse(MPI_Comm comm){ - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); - comm->refcount--; - smpi_group_unuse(comm->group); +void Comm::unuse(){ + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->unuse(); + m_refcount--; + m_group->unuse(); - if(comm->refcount==0){ - smpi_comm_cleanup_smp(comm); - smpi_comm_cleanup_attributes(comm); - xbt_free(comm); + if(m_refcount==0){ + this->cleanup_smp(); + this->cleanup_attributes(); + delete this; } } @@ -341,13 +325,13 @@ static int compare_ints (const void *a, const void *b) return static_cast(*da > *db) - static_cast(*da < *db); } -void smpi_comm_init_smp(MPI_Comm comm){ +void Comm::init_smp(){ int leader = -1; - if (comm == MPI_COMM_UNINITIALIZED) - comm = smpi_process_comm_world(); + if (this == MPI_COMM_UNINITIALIZED) + return smpi_process_comm_world()->init_smp(); - int comm_size =smpi_comm_size(comm); + int comm_size = this->size(); // If we are in replay - perform an ugly hack // tell SimGrid we are not in replay for a while, because we need the buffers to be copied for the following calls @@ -370,7 +354,7 @@ void smpi_comm_init_smp(MPI_Comm comm){ xbt_swag_foreach(process, process_list) { int index = process->pid -1; - if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){ + if(this->group()->rank(index)!=MPI_UNDEFINED){ intra_comm_size++; //the process is in the comm if(index < min_index) @@ -379,18 +363,18 @@ void smpi_comm_init_smp(MPI_Comm comm){ } } XBT_DEBUG("number of processes deployed on my node : %d", intra_comm_size); - MPI_Group group_intra = smpi_group_new(intra_comm_size); + MPI_Group group_intra = new simgrid::SMPI::Group(intra_comm_size); i=0; process = nullptr; xbt_swag_foreach(process, process_list) { int index = process->pid -1; - if(smpi_group_rank(smpi_comm_group(comm), index)!=MPI_UNDEFINED){ - smpi_group_set_mapping(group_intra, index, i); + if(this->group()->rank(index)!=MPI_UNDEFINED){ + group_intra->set_mapping(index, i); i++; } } - MPI_Comm comm_intra = smpi_comm_new(group_intra, nullptr); + MPI_Comm comm_intra = new simgrid::SMPI::Comm(group_intra, nullptr); leader=min_index; int * leaders_map= static_cast(xbt_malloc0(sizeof(int)*comm_size)); @@ -399,14 +383,14 @@ void smpi_comm_init_smp(MPI_Comm comm){ leader_list[i]=-1; } - smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, comm); + smpi_coll_tuned_allgather_mpich(&leader, 1, MPI_INT , leaders_map, 1, MPI_INT, this); if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables smpi_switch_data_segment(smpi_process_index()); } - if(comm->leaders_map==nullptr){ - comm->leaders_map= leaders_map; + if(m_leaders_map==nullptr){ + m_leaders_map= leaders_map; }else{ xbt_free(leaders_map); } @@ -415,39 +399,39 @@ void smpi_comm_init_smp(MPI_Comm comm){ for(i=0; ileaders_map[i]==leader_list[j]){ + if(m_leaders_map[i]==leader_list[j]){ already_done=1; } } if(already_done==0){ - leader_list[leader_group_size]=comm->leaders_map[i]; + leader_list[leader_group_size]=m_leaders_map[i]; leader_group_size++; } } qsort(leader_list, leader_group_size, sizeof(int),compare_ints); - MPI_Group leaders_group = smpi_group_new(leader_group_size); + MPI_Group leaders_group = new simgrid::SMPI::Group(leader_group_size); MPI_Comm leader_comm = MPI_COMM_NULL; - if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && comm!=MPI_COMM_WORLD){ + if(MPI_COMM_WORLD!=MPI_COMM_UNINITIALIZED && this!=MPI_COMM_WORLD){ //create leader_communicator for (i=0; i< leader_group_size;i++) - smpi_group_set_mapping(leaders_group, leader_list[i], i); - leader_comm = smpi_comm_new(leaders_group, nullptr); - smpi_comm_set_leaders_comm(comm, leader_comm); - smpi_comm_set_intra_comm(comm, comm_intra); + leaders_group->set_mapping(leader_list[i], i); + leader_comm = new simgrid::SMPI::Comm(leaders_group, nullptr); + this->set_leaders_comm(leader_comm); + this->set_intra_comm(comm_intra); //create intracommunicator }else{ for (i=0; i< leader_group_size;i++) - smpi_group_set_mapping(leaders_group, leader_list[i], i); + leaders_group->set_mapping(leader_list[i], i); - if(smpi_comm_get_leaders_comm(comm)==MPI_COMM_NULL){ - leader_comm = smpi_comm_new(leaders_group, nullptr); - smpi_comm_set_leaders_comm(comm, leader_comm); + if(this->get_leaders_comm()==MPI_COMM_NULL){ + leader_comm = new simgrid::SMPI::Comm(leaders_group, nullptr); + this->set_leaders_comm(leader_comm); }else{ - leader_comm=smpi_comm_get_leaders_comm(comm); - smpi_group_unuse(leaders_group); + leader_comm=this->get_leaders_comm(); + leaders_group->unuse(); } smpi_process_set_comm_intra(comm_intra); } @@ -455,8 +439,8 @@ void smpi_comm_init_smp(MPI_Comm comm){ int is_uniform = 1; // Are the nodes uniform ? = same number of process/node - int my_local_size=smpi_comm_size(comm_intra); - if(smpi_comm_rank(comm_intra)==0) { + int my_local_size=comm_intra->size(); + if(comm_intra->rank()==0) { int* non_uniform_map = xbt_new0(int,leader_group_size); smpi_coll_tuned_allgather_mpich(&my_local_size, 1, MPI_INT, non_uniform_map, 1, MPI_INT, leader_comm); @@ -466,23 +450,23 @@ void smpi_comm_init_smp(MPI_Comm comm){ break; } } - if(is_uniform==0 && smpi_comm_is_uniform(comm)!=0){ - comm->non_uniform_map= non_uniform_map; + if(is_uniform==0 && this->is_uniform()!=0){ + m_non_uniform_map= non_uniform_map; }else{ xbt_free(non_uniform_map); } - comm->is_uniform=is_uniform; + m_is_uniform=is_uniform; } - smpi_coll_tuned_bcast_mpich(&(comm->is_uniform),1, MPI_INT, 0, comm_intra ); + smpi_coll_tuned_bcast_mpich(&(m_is_uniform),1, MPI_INT, 0, comm_intra ); if(smpi_privatize_global_variables){ //we need to switch as the called function may silently touch global variables smpi_switch_data_segment(smpi_process_index()); } // Are the ranks blocked ? = allocated contiguously on the SMP nodes int is_blocked=1; - int prev=smpi_group_rank(smpi_comm_group(comm), smpi_group_index(smpi_comm_group(comm_intra), 0)); + int prev=this->group()->rank(comm_intra->group()->index(0)); for (i=1; igroup()->rank(comm_intra->group()->index(i)); if(that!=prev+1){ is_blocked=0; break; @@ -491,14 +475,14 @@ void smpi_comm_init_smp(MPI_Comm comm){ } int global_blocked; - smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, comm); + smpi_mpi_allreduce(&is_blocked, &(global_blocked), 1, MPI_INT, MPI_LAND, this); - if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || comm==MPI_COMM_WORLD){ - if(smpi_comm_rank(comm)==0){ - comm->is_blocked=global_blocked; + if(MPI_COMM_WORLD==MPI_COMM_UNINITIALIZED || this==MPI_COMM_WORLD){ + if(this->rank()==0){ + m_is_blocked=global_blocked; } }else{ - comm->is_blocked=global_blocked; + m_is_blocked=global_blocked; } xbt_free(leader_list); @@ -506,7 +490,7 @@ void smpi_comm_init_smp(MPI_Comm comm){ smpi_process_set_replaying(true); } -int smpi_comm_attr_delete(MPI_Comm comm, int keyval){ +int Comm::attr_delete(int keyval){ smpi_comm_key_elem elem = static_cast(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast(&keyval), sizeof(int))); if(elem==nullptr) @@ -514,31 +498,31 @@ int smpi_comm_attr_delete(MPI_Comm comm, int keyval){ if(elem->delete_fn!=MPI_NULL_DELETE_FN){ void* value = nullptr; int flag; - if(smpi_comm_attr_get(comm, keyval, &value, &flag)==MPI_SUCCESS){ - int ret = elem->delete_fn(comm, keyval, value, &flag); + if(this->attr_get(keyval, &value, &flag)==MPI_SUCCESS){ + int ret = elem->delete_fn(this, keyval, value, &flag); if(ret!=MPI_SUCCESS) return ret; } } - if(comm->attributes==nullptr) + if(m_attributes==nullptr) return MPI_ERR_ARG; - xbt_dict_remove_ext(comm->attributes, reinterpret_cast(&keyval), sizeof(int)); + xbt_dict_remove_ext(m_attributes, reinterpret_cast(&keyval), sizeof(int)); return MPI_SUCCESS; } -int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){ +int Comm::attr_get(int keyval, void* attr_value, int* flag){ smpi_comm_key_elem elem = static_cast(xbt_dict_get_or_null_ext(smpi_comm_keyvals, reinterpret_cast(&keyval), sizeof(int))); if(elem==nullptr) return MPI_ERR_ARG; - if(comm->attributes==nullptr){ + if(m_attributes==nullptr){ *flag=0; return MPI_SUCCESS; } try { *static_cast(attr_value) = - xbt_dict_get_ext(comm->attributes, reinterpret_cast(&keyval), sizeof(int)); + xbt_dict_get_ext(m_attributes, reinterpret_cast(&keyval), sizeof(int)); *flag=1; } catch (xbt_ex& ex) { @@ -547,7 +531,7 @@ int smpi_comm_attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag){ return MPI_SUCCESS; } -int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){ +int Comm::attr_put(int keyval, void* attr_value){ if(smpi_comm_keyvals==nullptr) smpi_comm_keyvals = xbt_dict_new_homogeneous(nullptr); smpi_comm_key_elem elem = @@ -556,19 +540,22 @@ int smpi_comm_attr_put(MPI_Comm comm, int keyval, void* attr_value){ return MPI_ERR_ARG; int flag; void* value = nullptr; - smpi_comm_attr_get(comm, keyval, &value, &flag); + this->attr_get(keyval, &value, &flag); if(flag!=0 && elem->delete_fn!=MPI_NULL_DELETE_FN){ - int ret = elem->delete_fn(comm, keyval, value, &flag); + int ret = elem->delete_fn(this, keyval, value, &flag); if(ret!=MPI_SUCCESS) return ret; } - if(comm->attributes==nullptr) - comm->attributes = xbt_dict_new_homogeneous(nullptr); + if(m_attributes==nullptr) + m_attributes = xbt_dict_new_homogeneous(nullptr); - xbt_dict_set_ext(comm->attributes, reinterpret_cast(&keyval), sizeof(int), attr_value, nullptr); + xbt_dict_set_ext(m_attributes, reinterpret_cast(&keyval), sizeof(int), attr_value, nullptr); return MPI_SUCCESS; } +} +} + int smpi_comm_keyval_create(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, void* extra_state){ if(smpi_comm_keyvals==nullptr) diff --git a/src/smpi/smpi_comm.hpp b/src/smpi/smpi_comm.hpp new file mode 100644 index 0000000000..1b71731bdb --- /dev/null +++ b/src/smpi/smpi_comm.hpp @@ -0,0 +1,66 @@ +/* Copyright (c) 2010-2015. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#ifndef SMPI_COMM_HPP_INCLUDED +#define SMPI_COMM_HPP_INCLUDED + +#include "private.h" + +namespace simgrid{ +namespace SMPI{ + +class Comm { + + private: + MPI_Group m_group; + MPIR_Topo_type m_topoType; + MPI_Topology m_topo; // to be replaced by an union + int m_refcount; + MPI_Comm m_leaders_comm;//inter-node communicator + MPI_Comm m_intra_comm;//intra-node communicator . For MPI_COMM_WORLD this can't be used, as var is global. + //use an intracomm stored in the process data instead + int* m_leaders_map; //who is the leader of each process + int m_is_uniform; + int* m_non_uniform_map; //set if smp nodes have a different number of processes allocated + int m_is_blocked;// are ranks allocated on the same smp node contiguous ? + xbt_dict_t m_attributes; + + public: + + Comm(MPI_Group group, MPI_Topology topo); + + void destroy(); + int dup(MPI_Comm* newcomm); + MPI_Group group(); + MPI_Topology topo(); + int size(); + int rank(); + void get_name (char* name, int* len); + void set_leaders_comm(MPI_Comm leaders); + void set_intra_comm(MPI_Comm leaders); + int* get_non_uniform_map(); + int* get_leaders_map(); + MPI_Comm get_leaders_comm(); + MPI_Comm get_intra_comm(); + int is_uniform(); + int is_blocked(); + MPI_Comm split(int color, int key); + void use(); + void cleanup_attributes(); + void cleanup_smp(); + void unuse(); + void init_smp(); + int attr_delete(int keyval); + int attr_get(int keyval, void* attr_value, int* flag); + int attr_put(int keyval, void* attr_value); + +}; + +} +} + + +#endif diff --git a/src/smpi/smpi_deployment.cpp b/src/smpi/smpi_deployment.cpp index d9ad5ac1ff..d854e78f45 100644 --- a/src/smpi/smpi_deployment.cpp +++ b/src/smpi/smpi_deployment.cpp @@ -69,12 +69,12 @@ void smpi_deployment_register_process(const char* instance_id, int rank, int ind xbt_assert(instance, "Error, unknown instance %s", instance_id); if(instance->comm_world == MPI_COMM_NULL){ - MPI_Group group = smpi_group_new(instance->size); - instance->comm_world = smpi_comm_new(group, nullptr); + MPI_Group group = new simgrid::SMPI::Group(instance->size); + instance->comm_world = new simgrid::SMPI::Comm(group, nullptr); } instance->present_processes++; index_to_process_data[index]=instance->index+rank; - smpi_group_set_mapping(smpi_comm_group(instance->comm_world), index, rank); + instance->comm_world->group()->set_mapping(index, rank); *bar = instance->finalization_barrier; *comm = &instance->comm_world; } @@ -85,7 +85,7 @@ void smpi_deployment_cleanup_instances(){ char *name = nullptr; xbt_dict_foreach(smpi_instances, cursor, name, instance) { if(instance->comm_world!=MPI_COMM_NULL) - while (smpi_group_unuse(smpi_comm_group(instance->comm_world)) > 0); + while (instance->comm_world->group()->unuse() > 0); xbt_free(instance->comm_world); MSG_barrier_destroy(instance->finalization_barrier); } diff --git a/src/smpi/smpi_global.cpp b/src/smpi/smpi_global.cpp index bcc611cae3..924e89b011 100644 --- a/src/smpi/smpi_global.cpp +++ b/src/smpi/smpi_global.cpp @@ -352,9 +352,9 @@ MPI_Comm smpi_process_comm_self() { smpi_process_data_t data = smpi_process_data(); if(data->comm_self==MPI_COMM_NULL){ - MPI_Group group = smpi_group_new(1); - data->comm_self = smpi_comm_new(group, nullptr); - smpi_group_set_mapping(group, smpi_process_index(), 0); + MPI_Group group = new simgrid::SMPI::Group(1); + data->comm_self = new simgrid::SMPI::Comm(group, nullptr); + group->set_mapping(smpi_process_index(), 0); } return data->comm_self; @@ -604,13 +604,13 @@ void smpi_global_init() //if the process was launched through smpirun script we generate a global mpi_comm_world //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance if(smpirun){ - group = smpi_group_new(process_count); - MPI_COMM_WORLD = smpi_comm_new(group, nullptr); + group = new simgrid::SMPI::Group(process_count); + MPI_COMM_WORLD = new simgrid::SMPI::Comm(group, nullptr); MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast(process_count)); msg_bar_t bar = MSG_barrier_init(process_count); for (i = 0; i < process_count; i++) { - smpi_group_set_mapping(group, i, i); + group->set_mapping(i, i); process_data[i]->finalization_barrier = bar; } } @@ -622,17 +622,17 @@ void smpi_global_destroy() smpi_bench_destroy(); if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ - while (smpi_group_unuse(smpi_comm_group(MPI_COMM_WORLD)) > 0); + while (MPI_COMM_WORLD->group()->unuse() > 0); MSG_barrier_destroy(process_data[0]->finalization_barrier); }else{ smpi_deployment_cleanup_instances(); } for (int i = 0; i < count; i++) { if(process_data[i]->comm_self!=MPI_COMM_NULL){ - smpi_comm_destroy(process_data[i]->comm_self); + process_data[i]->comm_self->destroy(); } if(process_data[i]->comm_intra!=MPI_COMM_NULL){ - smpi_comm_destroy(process_data[i]->comm_intra); + process_data[i]->comm_intra->destroy(); } xbt_os_timer_free(process_data[i]->timer); xbt_mutex_destroy(process_data[i]->mailboxes_mutex); @@ -642,8 +642,8 @@ void smpi_global_destroy() process_data = nullptr; if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ - smpi_comm_cleanup_smp(MPI_COMM_WORLD); - smpi_comm_cleanup_attributes(MPI_COMM_WORLD); + MPI_COMM_WORLD->cleanup_smp(); + MPI_COMM_WORLD->cleanup_attributes(); if(smpi_coll_cleanup_callback!=nullptr) smpi_coll_cleanup_callback(); xbt_free(MPI_COMM_WORLD); diff --git a/src/smpi/smpi_group.cpp b/src/smpi/smpi_group.cpp index 41eb2456a1..d92277b609 100644 --- a/src/smpi/smpi_group.cpp +++ b/src/smpi/smpi_group.cpp @@ -7,43 +7,35 @@ #include "private.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_group, smpi, "Logging specific to SMPI (group)"); +simgrid::SMPI::Group mpi_MPI_GROUP_EMPTY; +MPI_Group MPI_GROUP_EMPTY=&mpi_MPI_GROUP_EMPTY; -typedef struct s_smpi_mpi_group { - int size; - int *rank_to_index_map; - xbt_dict_t index_to_rank_map; - int refcount; -} s_smpi_mpi_group_t; +namespace simgrid{ +namespace SMPI{ -static s_smpi_mpi_group_t mpi_MPI_GROUP_EMPTY = { - 0, /* size */ - nullptr, /* rank_to_index_map */ - nullptr, /* index_to_rank_map */ - 1, /* refcount: start > 0 so that this group never gets freed */ -}; - -MPI_Group MPI_GROUP_EMPTY = &mpi_MPI_GROUP_EMPTY; +Group::Group() +{ + m_size=0; /* size */ + m_rank_to_index_map=nullptr; /* m_rank_to_index_map */ + m_index_to_rank_map=nullptr; /* m_index_to_rank_map */ + m_refcount=1; /* m_refcount: start > 0 so that this group never gets freed */ +} -MPI_Group smpi_group_new(int size) +Group::Group(int n) { - MPI_Group group; int i; - group = xbt_new(s_smpi_mpi_group_t, 1); - group->size = size; - group->rank_to_index_map = xbt_new(int, size); - group->index_to_rank_map = xbt_dict_new_homogeneous(xbt_free_f); - group->refcount = 1; - for (i = 0; i < size; i++) { - group->rank_to_index_map[i] = MPI_UNDEFINED; + m_size = n; + m_rank_to_index_map = xbt_new(int, m_size); + m_index_to_rank_map = xbt_dict_new_homogeneous(xbt_free_f); + m_refcount = 1; + for (i = 0; i < m_size; i++) { + m_rank_to_index_map[i] = MPI_UNDEFINED; } - - return group; } -MPI_Group smpi_group_copy(MPI_Group origin) +Group::Group(MPI_Group origin) { - MPI_Group group=origin; char *key; char *ptr_rank; xbt_dict_cursor_t cursor = nullptr; @@ -52,67 +44,70 @@ MPI_Group smpi_group_copy(MPI_Group origin) if(origin != MPI_GROUP_NULL && origin != MPI_GROUP_EMPTY) { - group = xbt_new(s_smpi_mpi_group_t, 1); - group->size = origin->size; - group->rank_to_index_map = xbt_new(int, group->size); - group->index_to_rank_map = xbt_dict_new_homogeneous(xbt_free_f); - group->refcount = 1; - for (i = 0; i < group->size; i++) { - group->rank_to_index_map[i] = origin->rank_to_index_map[i]; + m_size = origin->size(); + m_rank_to_index_map = xbt_new(int, m_size); + m_index_to_rank_map = xbt_dict_new_homogeneous(xbt_free_f); + m_refcount = 1; + for (i = 0; i < m_size; i++) { + m_rank_to_index_map[i] = origin->m_rank_to_index_map[i]; } - xbt_dict_foreach(origin->index_to_rank_map, cursor, key, ptr_rank) { + xbt_dict_foreach(origin->m_index_to_rank_map, cursor, key, ptr_rank) { int * cp = static_cast(xbt_malloc(sizeof(int))); *cp=*reinterpret_cast(ptr_rank); - xbt_dict_set(group->index_to_rank_map, key, cp, nullptr); + xbt_dict_set(m_index_to_rank_map, key, cp, nullptr); } } +} - return group; +Group::~Group() +{ + xbt_free(m_rank_to_index_map); + xbt_dict_free(&m_index_to_rank_map); } -void smpi_group_destroy(MPI_Group group) +void Group::destroy() { - if(group!= smpi_comm_group(MPI_COMM_WORLD) - && group != MPI_GROUP_NULL - && group != MPI_GROUP_EMPTY) - smpi_group_unuse(group); + if(this != MPI_COMM_WORLD->group() + && this != MPI_GROUP_NULL + && this != MPI_GROUP_EMPTY) + this->unuse(); } -void smpi_group_set_mapping(MPI_Group group, int index, int rank) +void Group::set_mapping(int index, int rank) { int * val_rank; - if (rank < group->size) { - group->rank_to_index_map[rank] = index; + if (rank < m_size) { + m_rank_to_index_map[rank] = index; if (index!=MPI_UNDEFINED ) { val_rank = static_cast(xbt_malloc(sizeof(int))); *val_rank = rank; char * key = bprintf("%d", index); - xbt_dict_set(group->index_to_rank_map, key, val_rank, nullptr); + xbt_dict_set(m_index_to_rank_map, key, val_rank, nullptr); xbt_free(key); } } } -int smpi_group_index(MPI_Group group, int rank) +int Group::index(int rank) { int index = MPI_UNDEFINED; - if (0 <= rank && rank < group->size) { - index = group->rank_to_index_map[rank]; + if (0 <= rank && rank < m_size) { + index = m_rank_to_index_map[rank]; } return index; } -int smpi_group_rank(MPI_Group group, int index) +int Group::rank(int index) { int * ptr_rank = nullptr; - if (group==MPI_GROUP_EMPTY) + if (this==MPI_GROUP_EMPTY) return MPI_UNDEFINED; char * key = bprintf("%d", index); - ptr_rank = static_cast(xbt_dict_get_or_null(group->index_to_rank_map, key)); + ptr_rank = static_cast(xbt_dict_get_or_null(m_index_to_rank_map, key)); xbt_free(key); if (ptr_rank==nullptr) @@ -120,42 +115,40 @@ int smpi_group_rank(MPI_Group group, int index) return *ptr_rank; } -int smpi_group_use(MPI_Group group) +int Group::use() { - group->refcount++; - return group->refcount; + m_refcount++; + return m_refcount; } -int smpi_group_unuse(MPI_Group group) +int Group::unuse() { - group->refcount--; - if (group->refcount <= 0) { - xbt_free(group->rank_to_index_map); - xbt_dict_free(&group->index_to_rank_map); - xbt_free(group); + m_refcount--; + if (m_refcount <= 0) { + delete this; return 0; } - return group->refcount; + return m_refcount; } -int smpi_group_size(MPI_Group group) +int Group::size() { - return group->size; + return m_size; } -int smpi_group_compare(MPI_Group group1, MPI_Group group2) +int Group::compare(MPI_Group group2) { int result; - int i, index, rank, size; + int i, index, rank, sz; result = MPI_IDENT; - if (smpi_group_size(group1) != smpi_group_size(group2)) { + if (m_size != group2->size()) { result = MPI_UNEQUAL; } else { - size = smpi_group_size(group2); - for (i = 0; i < size; i++) { - index = smpi_group_index(group1, i); - rank = smpi_group_rank(group2, index); + sz = group2->size(); + for (i = 0; i < sz; i++) { + index = this->index(i); + rank = group2->rank(index); if (rank == MPI_UNDEFINED) { result = MPI_UNEQUAL; break; @@ -168,24 +161,233 @@ int smpi_group_compare(MPI_Group group1, MPI_Group group2) return result; } -int smpi_group_incl(MPI_Group group, int n, int* ranks, MPI_Group* newgroup) +int Group::incl(int n, int* ranks, MPI_Group* newgroup) { int i=0, index=0; if (n == 0) { *newgroup = MPI_GROUP_EMPTY; - } else if (n == smpi_group_size(group)) { - *newgroup = group; - if(group!= smpi_comm_group(MPI_COMM_WORLD) - && group != MPI_GROUP_NULL - && group != smpi_comm_group(MPI_COMM_SELF) - && group != MPI_GROUP_EMPTY) - smpi_group_use(group); + } else if (n == m_size) { + *newgroup = this; + if(this!= MPI_COMM_WORLD->group() + && this != MPI_GROUP_NULL + && this != MPI_COMM_SELF->group() + && this != MPI_GROUP_EMPTY) + this->use(); } else { - *newgroup = smpi_group_new(n); + *newgroup = new Group(n); for (i = 0; i < n; i++) { - index = smpi_group_index(group, ranks[i]); - smpi_group_set_mapping(*newgroup, index, i); + index = this->index(ranks[i]); + (*newgroup)->set_mapping(index, i); + } + } + return MPI_SUCCESS; +} + +int Group::group_union(MPI_Group group2, MPI_Group* newgroup) +{ + int size1 = m_size; + int size2 = group2->size(); + for (int i = 0; i < size2; i++) { + int proc2 = group2->index(i); + int proc1 = this->rank(proc2); + if (proc1 == MPI_UNDEFINED) { + size1++; + } + } + if (size1 == 0) { + *newgroup = MPI_GROUP_EMPTY; + } else { + *newgroup = new simgrid::SMPI::Group(size1); + size2 = this->size(); + for (int i = 0; i < size2; i++) { + int proc1 = this->index(i); + (*newgroup)->set_mapping(proc1, i); + } + for (int i = size2; i < size1; i++) { + int proc2 = group2->index(i - size2); + (*newgroup)->set_mapping(proc2, i); + } + } + return MPI_SUCCESS; +} + +int Group::intersection(MPI_Group group2, MPI_Group* newgroup) +{ + int size2 = group2->size(); + for (int i = 0; i < size2; i++) { + int proc2 = group2->index(i); + int proc1 = this->rank(proc2); + if (proc1 == MPI_UNDEFINED) { + size2--; + } + } + if (size2 == 0) { + *newgroup = MPI_GROUP_EMPTY; + } else { + *newgroup = new simgrid::SMPI::Group(size2); + int j=0; + for (int i = 0; i < group2->size(); i++) { + int proc2 = group2->index(i); + int proc1 = this->rank(proc2); + if (proc1 != MPI_UNDEFINED) { + (*newgroup)->set_mapping(proc2, j); + j++; + } } } return MPI_SUCCESS; } + +int Group::difference(MPI_Group group2, MPI_Group* newgroup) +{ + int newsize = m_size; + int size2 = m_size; + for (int i = 0; i < size2; i++) { + int proc1 = this->index(i); + int proc2 = group2->rank(proc1); + if (proc2 != MPI_UNDEFINED) { + newsize--; + } + } + if (newsize == 0) { + *newgroup = MPI_GROUP_EMPTY; + } else { + *newgroup = new simgrid::SMPI::Group(newsize); + for (int i = 0; i < size2; i++) { + int proc1 = this->index(i); + int proc2 = group2->rank(proc1); + if (proc2 == MPI_UNDEFINED) { + (*newgroup)->set_mapping(proc1, i); + } + } + } + return MPI_SUCCESS; +} + +int Group::excl(int n, int *ranks, MPI_Group * newgroup){ + int oldsize = m_size; + int newsize = oldsize - n; + *newgroup = new simgrid::SMPI::Group(newsize); + int* to_exclude=xbt_new0(int, m_size); + for (int i = 0; i < oldsize; i++) + to_exclude[i]=0; + for (int i = 0; i < n; i++) + to_exclude[ranks[i]]=1; + int j = 0; + for (int i = 0; i < oldsize; i++) { + if(to_exclude[i]==0){ + int index = this->index(i); + (*newgroup)->set_mapping(index, j); + j++; + } + } + xbt_free(to_exclude); + return MPI_SUCCESS; + +} + +int Group::range_incl(int n, int ranges[][3], MPI_Group * newgroup){ + int newsize = 0; + for (int i = 0; i < n; i++) { + for (int rank = ranges[i][0]; /* First */ + rank >= 0 && rank < m_size; /* Last */ + ) { + newsize++; + if(rank == ranges[i][1]){/*already last ?*/ + break; + } + rank += ranges[i][2]; /* Stride */ + if (ranges[i][0] < ranges[i][1]) { + if (rank > ranges[i][1]) + break; + } else { + if (rank < ranges[i][1]) + break; + } + } + } + *newgroup = new simgrid::SMPI::Group(newsize); + int j = 0; + for (int i = 0; i < n; i++) { + for (int rank = ranges[i][0]; /* First */ + rank >= 0 && rank < m_size; /* Last */ + ) { + int index = this->index(rank); + (*newgroup)->set_mapping(index, j); + j++; + if(rank == ranges[i][1]){/*already last ?*/ + break; + } + rank += ranges[i][2]; /* Stride */ + if (ranges[i][0] < ranges[i][1]) { + if (rank > ranges[i][1]) + break; + } else { + if (rank < ranges[i][1]) + break; + } + } + } + return MPI_SUCCESS; +} + +int Group::range_excl(int n, int ranges[][3], MPI_Group * newgroup){ + int newsize = m_size; + for (int i = 0; i < n; i++) { + for (int rank = ranges[i][0]; /* First */ + rank >= 0 && rank < m_size; /* Last */ + ) { + newsize--; + if(rank == ranges[i][1]){/*already last ?*/ + break; + } + rank += ranges[i][2]; /* Stride */ + if (ranges[i][0] < ranges[i][1]) { + if (rank > ranges[i][1]) + break; + } else { + if (rank < ranges[i][1]) + break; + } + } + } + if (newsize == 0) { + *newgroup = MPI_GROUP_EMPTY; + } else { + *newgroup = new simgrid::SMPI::Group(newsize); + int newrank = 0; + int oldrank = 0; + while (newrank < newsize) { + int add = 1; + for (int i = 0; i < n; i++) { + for (int rank = ranges[i][0]; rank >= 0 && rank < m_size;) { + if(rank==oldrank){ + add = 0; + break; + } + if(rank == ranges[i][1]){/*already last ?*/ + break; + } + rank += ranges[i][2]; /* Stride */ + if (ranges[i][0] ranges[i][1]) + break; + }else{ + if (rank < ranges[i][1]) + break; + } + } + } + if(add==1){ + int index = this->index(oldrank); + (*newgroup)->set_mapping(index, newrank); + newrank++; + } + oldrank++; + } + } + return MPI_SUCCESS; +} + +} +} diff --git a/src/smpi/smpi_group.hpp b/src/smpi/smpi_group.hpp new file mode 100644 index 0000000000..466d7c2f47 --- /dev/null +++ b/src/smpi/smpi_group.hpp @@ -0,0 +1,47 @@ +/* Copyright (c) 2010, 2013-2015. The SimGrid Team. + * All rights reserved. */ + +/* This program is free software; you can redistribute it and/or modify it + * under the terms of the license (GNU LGPL) which comes with this package. */ + +#ifndef SMPI_GROUP_HPP_INCLUDED +#define SMPI_GROUP_HPP_INCLUDED + +#include "private.h" + +namespace simgrid{ +namespace SMPI{ + +class Group { + private: + int m_size; + int *m_rank_to_index_map; + xbt_dict_t m_index_to_rank_map; + int m_refcount; + public: + + Group(); + Group(int size); + Group(Group* origin); + ~Group(); + + void destroy(); + void set_mapping(int index, int rank); + int index(int rank); + int rank(int index); + int use(); + int unuse(); + int size(); + int compare(MPI_Group group2); + int incl(int n, int* ranks, MPI_Group* newgroup); + int excl(int n, int *ranks, MPI_Group * newgroup); + int group_union(MPI_Group group2, MPI_Group* newgroup); + int intersection(MPI_Group group2, MPI_Group* newgroup); + int difference(MPI_Group group2, MPI_Group* newgroup); + int range_incl(int n, int ranges[][3], MPI_Group * newgroup); + int range_excl(int n, int ranges[][3], MPI_Group * newgroup); +}; +} +} + +#endif diff --git a/src/smpi/smpi_pmpi.cpp b/src/smpi/smpi_pmpi.cpp index 658588d36f..57075dfe69 100644 --- a/src/smpi/smpi_pmpi.cpp +++ b/src/smpi/smpi_pmpi.cpp @@ -270,7 +270,7 @@ int PMPI_Group_free(MPI_Group * group) if (group == nullptr) { return MPI_ERR_ARG; } else { - smpi_group_destroy(*group); + (*group)->destroy(); *group = MPI_GROUP_NULL; return MPI_SUCCESS; } @@ -283,7 +283,7 @@ int PMPI_Group_size(MPI_Group group, int *size) } else if (size == nullptr) { return MPI_ERR_ARG; } else { - *size = smpi_group_size(group); + *size = group->size(); return MPI_SUCCESS; } } @@ -295,7 +295,7 @@ int PMPI_Group_rank(MPI_Group group, int *rank) } else if (rank == nullptr) { return MPI_ERR_ARG; } else { - *rank = smpi_group_rank(group, smpi_process_index()); + *rank = group->rank(smpi_process_index()); return MPI_SUCCESS; } } @@ -309,8 +309,8 @@ int PMPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1, MPI_Group g if(ranks1[i]==MPI_PROC_NULL){ ranks2[i]=MPI_PROC_NULL; }else{ - int index = smpi_group_index(group1, ranks1[i]); - ranks2[i] = smpi_group_rank(group2, index); + int index = group1->index(ranks1[i]); + ranks2[i] = group2->rank(index); } } return MPI_SUCCESS; @@ -324,7 +324,7 @@ int PMPI_Group_compare(MPI_Group group1, MPI_Group group2, int *result) } else if (result == nullptr) { return MPI_ERR_ARG; } else { - *result = smpi_group_compare(group1, group2); + *result = group1->compare(group2); return MPI_SUCCESS; } } @@ -337,30 +337,7 @@ int PMPI_Group_union(MPI_Group group1, MPI_Group group2, MPI_Group * newgroup) } else if (newgroup == nullptr) { return MPI_ERR_ARG; } else { - int size = smpi_group_size(group1); - int size2 = smpi_group_size(group2); - for (int i = 0; i < size2; i++) { - int proc2 = smpi_group_index(group2, i); - int proc1 = smpi_group_rank(group1, proc2); - if (proc1 == MPI_UNDEFINED) { - size++; - } - } - if (size == 0) { - *newgroup = MPI_GROUP_EMPTY; - } else { - *newgroup = smpi_group_new(size); - size2 = smpi_group_size(group1); - for (int i = 0; i < size2; i++) { - int proc1 = smpi_group_index(group1, i); - smpi_group_set_mapping(*newgroup, proc1, i); - } - for (int i = size2; i < size; i++) { - int proc2 = smpi_group_index(group2, i - size2); - smpi_group_set_mapping(*newgroup, proc2, i); - } - } - return MPI_SUCCESS; + return group1->group_union(group2, newgroup); } } @@ -372,29 +349,7 @@ int PMPI_Group_intersection(MPI_Group group1, MPI_Group group2, MPI_Group * newg } else if (newgroup == nullptr) { return MPI_ERR_ARG; } else { - int size = smpi_group_size(group2); - for (int i = 0; i < size; i++) { - int proc2 = smpi_group_index(group2, i); - int proc1 = smpi_group_rank(group1, proc2); - if (proc1 == MPI_UNDEFINED) { - size--; - } - } - if (size == 0) { - *newgroup = MPI_GROUP_EMPTY; - } else { - *newgroup = smpi_group_new(size); - int j=0; - for (int i = 0; i < smpi_group_size(group2); i++) { - int proc2 = smpi_group_index(group2, i); - int proc1 = smpi_group_rank(group1, proc2); - if (proc1 != MPI_UNDEFINED) { - smpi_group_set_mapping(*newgroup, proc2, j); - j++; - } - } - } - return MPI_SUCCESS; + return group1->intersection(group2,newgroup); } } @@ -405,28 +360,7 @@ int PMPI_Group_difference(MPI_Group group1, MPI_Group group2, MPI_Group * newgro } else if (newgroup == nullptr) { return MPI_ERR_ARG; } else { - int size = smpi_group_size(group1); - int size2 = size; - for (int i = 0; i < size2; i++) { - int proc1 = smpi_group_index(group1, i); - int proc2 = smpi_group_rank(group2, proc1); - if (proc2 != MPI_UNDEFINED) { - size--; - } - } - if (size == 0) { - *newgroup = MPI_GROUP_EMPTY; - } else { - *newgroup = smpi_group_new(size); - for (int i = 0; i < size2; i++) { - int proc1 = smpi_group_index(group1, i); - int proc2 = smpi_group_rank(group2, proc1); - if (proc2 == MPI_UNDEFINED) { - smpi_group_set_mapping(*newgroup, proc1, i); - } - } - } - return MPI_SUCCESS; + return group1->difference(group2,newgroup); } } @@ -437,7 +371,7 @@ int PMPI_Group_incl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup) } else if (newgroup == nullptr) { return MPI_ERR_ARG; } else { - return smpi_group_incl(group, n, ranks, newgroup); + return group->incl(n, ranks, newgroup); } } @@ -450,34 +384,16 @@ int PMPI_Group_excl(MPI_Group group, int n, int *ranks, MPI_Group * newgroup) } else { if (n == 0) { *newgroup = group; - if (group != smpi_comm_group(MPI_COMM_WORLD) - && group != smpi_comm_group(MPI_COMM_SELF) && group != MPI_GROUP_EMPTY) - smpi_group_use(group); - } else if (n == smpi_group_size(group)) { + if (group != MPI_COMM_WORLD->group() + && group != MPI_COMM_SELF->group() && group != MPI_GROUP_EMPTY) + group->use(); + return MPI_SUCCESS; + } else if (n == group->size()) { *newgroup = MPI_GROUP_EMPTY; + return MPI_SUCCESS; } else { - int oldsize = smpi_group_size(group); - int newsize = oldsize - n; - *newgroup = smpi_group_new(newsize); - - int* to_exclude=xbt_new0(int, smpi_group_size(group)); - for (int i = 0; i < oldsize; i++) - to_exclude[i]=0; - for (int i = 0; i < n; i++) - to_exclude[ranks[i]]=1; - - int j = 0; - for (int i = 0; i < oldsize; i++) { - if(to_exclude[i]==0){ - int index = smpi_group_index(group, i); - smpi_group_set_mapping(*newgroup, index, j); - j++; - } - } - - xbt_free(to_exclude); + return group->excl(n,ranks,newgroup); } - return MPI_SUCCESS; } } @@ -490,51 +406,10 @@ int PMPI_Group_range_incl(MPI_Group group, int n, int ranges[][3], MPI_Group * n } else { if (n == 0) { *newgroup = MPI_GROUP_EMPTY; + return MPI_SUCCESS; } else { - int size = 0; - for (int i = 0; i < n; i++) { - for (int rank = ranges[i][0]; /* First */ - rank >= 0 && rank < smpi_group_size(group); /* Last */ - ) { - size++; - if(rank == ranges[i][1]){/*already last ?*/ - break; - } - rank += ranges[i][2]; /* Stride */ - if (ranges[i][0] < ranges[i][1]) { - if (rank > ranges[i][1]) - break; - } else { - if (rank < ranges[i][1]) - break; - } - } - } - - *newgroup = smpi_group_new(size); - int j = 0; - for (int i = 0; i < n; i++) { - for (int rank = ranges[i][0]; /* First */ - rank >= 0 && rank < smpi_group_size(group); /* Last */ - ) { - int index = smpi_group_index(group, rank); - smpi_group_set_mapping(*newgroup, index, j); - j++; - if(rank == ranges[i][1]){/*already last ?*/ - break; - } - rank += ranges[i][2]; /* Stride */ - if (ranges[i][0] < ranges[i][1]) { - if (rank > ranges[i][1]) - break; - } else { - if (rank < ranges[i][1]) - break; - } - } - } + return group->range_incl(n,ranges,newgroup); } - return MPI_SUCCESS; } } @@ -547,66 +422,13 @@ int PMPI_Group_range_excl(MPI_Group group, int n, int ranges[][3], MPI_Group * n } else { if (n == 0) { *newgroup = group; - if (group != smpi_comm_group(MPI_COMM_WORLD) && group != smpi_comm_group(MPI_COMM_SELF) && + if (group != MPI_COMM_WORLD->group() && group != MPI_COMM_SELF->group() && group != MPI_GROUP_EMPTY) - smpi_group_use(group); + group->use(); + return MPI_SUCCESS; } else { - int size = smpi_group_size(group); - for (int i = 0; i < n; i++) { - for (int rank = ranges[i][0]; /* First */ - rank >= 0 && rank < smpi_group_size(group); /* Last */ - ) { - size--; - if(rank == ranges[i][1]){/*already last ?*/ - break; - } - rank += ranges[i][2]; /* Stride */ - if (ranges[i][0] < ranges[i][1]) { - if (rank > ranges[i][1]) - break; - } else { - if (rank < ranges[i][1]) - break; - } - } - } - if (size == 0) { - *newgroup = MPI_GROUP_EMPTY; - } else { - *newgroup = smpi_group_new(size); - int newrank = 0; - int oldrank = 0; - while (newrank < size) { - int add = 1; - for (int i = 0; i < n; i++) { - for (int rank = ranges[i][0]; rank >= 0 && rank < smpi_group_size(group);) { - if(rank==oldrank){ - add = 0; - break; - } - if(rank == ranges[i][1]){/*already last ?*/ - break; - } - rank += ranges[i][2]; /* Stride */ - if (ranges[i][0] ranges[i][1]) - break; - }else{ - if (rank < ranges[i][1]) - break; - } - } - } - if(add==1){ - int index = smpi_group_index(group, oldrank); - smpi_group_set_mapping(*newgroup, index, newrank); - newrank++; - } - oldrank++; - } - } + return group->range_excl(n,ranges,newgroup); } - return MPI_SUCCESS; } } @@ -617,7 +439,7 @@ int PMPI_Comm_rank(MPI_Comm comm, int *rank) } else if (rank == nullptr) { return MPI_ERR_ARG; } else { - *rank = smpi_comm_rank(comm); + *rank = comm->rank(); return MPI_SUCCESS; } } @@ -629,7 +451,7 @@ int PMPI_Comm_size(MPI_Comm comm, int *size) } else if (size == nullptr) { return MPI_ERR_ARG; } else { - *size = smpi_comm_size(comm); + *size = comm->size(); return MPI_SUCCESS; } } @@ -641,7 +463,7 @@ int PMPI_Comm_get_name (MPI_Comm comm, char* name, int* len) } else if (name == nullptr || len == nullptr) { return MPI_ERR_ARG; } else { - smpi_comm_get_name(comm, name, len); + comm->get_name(name, len); return MPI_SUCCESS; } } @@ -653,9 +475,9 @@ int PMPI_Comm_group(MPI_Comm comm, MPI_Group * group) } else if (group == nullptr) { return MPI_ERR_ARG; } else { - *group = smpi_comm_group(comm); - if (*group != smpi_comm_group(MPI_COMM_WORLD) && *group != MPI_GROUP_NULL && *group != MPI_GROUP_EMPTY) - smpi_group_use(*group); + *group = comm->group(); + if (*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_NULL && *group != MPI_GROUP_EMPTY) + (*group)->use(); return MPI_SUCCESS; } } @@ -670,7 +492,7 @@ int PMPI_Comm_compare(MPI_Comm comm1, MPI_Comm comm2, int *result) if (comm1 == comm2) { /* Same communicators means same groups */ *result = MPI_IDENT; } else { - *result = smpi_group_compare(smpi_comm_group(comm1), smpi_comm_group(comm2)); + *result = comm1->group()->compare(comm2->group()); if (*result == MPI_IDENT) { *result = MPI_CONGRUENT; } @@ -686,7 +508,7 @@ int PMPI_Comm_dup(MPI_Comm comm, MPI_Comm * newcomm) } else if (newcomm == nullptr) { return MPI_ERR_ARG; } else { - return smpi_comm_dup(comm, newcomm); + return comm->dup(newcomm); } } @@ -698,12 +520,12 @@ int PMPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm * newcomm) return MPI_ERR_GROUP; } else if (newcomm == nullptr) { return MPI_ERR_ARG; - } else if(smpi_group_rank(group,smpi_process_index())==MPI_UNDEFINED){ + } else if(group->rank(smpi_process_index())==MPI_UNDEFINED){ *newcomm= MPI_COMM_NULL; return MPI_SUCCESS; }else{ - smpi_group_use(group); - *newcomm = smpi_comm_new(group, nullptr); + group->use(); + *newcomm = new simgrid::SMPI::Comm(group, nullptr); return MPI_SUCCESS; } } @@ -715,7 +537,7 @@ int PMPI_Comm_free(MPI_Comm * comm) } else if (*comm == MPI_COMM_NULL) { return MPI_ERR_COMM; } else { - smpi_comm_destroy(*comm); + (*comm)->destroy(); *comm = MPI_COMM_NULL; return MPI_SUCCESS; } @@ -729,7 +551,7 @@ int PMPI_Comm_disconnect(MPI_Comm * comm) } else if (*comm == MPI_COMM_NULL) { return MPI_ERR_COMM; } else { - smpi_comm_destroy(*comm); + (*comm)->destroy(); *comm = MPI_COMM_NULL; return MPI_SUCCESS; } @@ -745,7 +567,7 @@ int PMPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm* comm_out) } else if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; } else { - *comm_out = smpi_comm_split(comm, color, key); + *comm_out = comm->split(color, key); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -903,7 +725,7 @@ int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MP } else if (src == MPI_PROC_NULL) { *request = MPI_REQUEST_NULL; retval = MPI_SUCCESS; - } else if (src!=MPI_ANY_SOURCE && (src >= smpi_group_size(smpi_comm_group(comm)) || src <0)){ + } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){ retval = MPI_ERR_RANK; } else if ((count < 0) || (buf==nullptr && count > 0)) { retval = MPI_ERR_COUNT; @@ -914,7 +736,7 @@ int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MP } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int src_traced = smpi_group_index(smpi_comm_group(comm), src); + int src_traced = comm->group()->index(src); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_IRECV; @@ -954,7 +776,7 @@ int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MP } else if (dst == MPI_PROC_NULL) { *request = MPI_REQUEST_NULL; retval = MPI_SUCCESS; - } else if (dst >= smpi_group_size(smpi_comm_group(comm)) || dst <0){ + } else if (dst >= comm->group()->size() || dst <0){ retval = MPI_ERR_RANK; } else if ((count < 0) || (buf==nullptr && count > 0)) { retval = MPI_ERR_COUNT; @@ -964,7 +786,7 @@ int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MP retval = MPI_ERR_TAG; } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int dst_traced = smpi_group_index(smpi_comm_group(comm), dst); + int dst_traced = comm->group()->index(dst); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_ISEND; extra->src = rank; @@ -1003,7 +825,7 @@ int PMPI_Issend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, M } else if (dst == MPI_PROC_NULL) { *request = MPI_REQUEST_NULL; retval = MPI_SUCCESS; - } else if (dst >= smpi_group_size(smpi_comm_group(comm)) || dst <0){ + } else if (dst >= comm->group()->size() || dst <0){ retval = MPI_ERR_RANK; } else if ((count < 0)|| (buf==nullptr && count > 0)) { retval = MPI_ERR_COUNT; @@ -1013,7 +835,7 @@ int PMPI_Issend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, M retval = MPI_ERR_TAG; } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int dst_traced = smpi_group_index(smpi_comm_group(comm), dst); + int dst_traced = comm->group()->index(dst); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_ISSEND; extra->src = rank; @@ -1051,7 +873,7 @@ int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI smpi_empty_status(status); status->MPI_SOURCE = MPI_PROC_NULL; retval = MPI_SUCCESS; - } else if (src!=MPI_ANY_SOURCE && (src >= smpi_group_size(smpi_comm_group(comm)) || src <0)){ + } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){ retval = MPI_ERR_RANK; } else if ((count < 0) || (buf==nullptr && count > 0)) { retval = MPI_ERR_COUNT; @@ -1061,7 +883,7 @@ int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI retval = MPI_ERR_TAG; } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int src_traced = smpi_group_index(smpi_comm_group(comm), src); + int src_traced = comm->group()->index(src); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_RECV; extra->src = src_traced; @@ -1079,7 +901,7 @@ int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) if (status != MPI_STATUS_IGNORE) { - src_traced = smpi_group_index(smpi_comm_group(comm), status->MPI_SOURCE); + src_traced = comm->group()->index(status->MPI_SOURCE); if (!TRACE_smpi_view_internals()) { TRACE_smpi_recv(rank, src_traced, rank, tag); } @@ -1101,7 +923,7 @@ int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI retval = MPI_ERR_COMM; } else if (dst == MPI_PROC_NULL) { retval = MPI_SUCCESS; - } else if (dst >= smpi_group_size(smpi_comm_group(comm)) || dst <0){ + } else if (dst >= comm->group()->size() || dst <0){ retval = MPI_ERR_RANK; } else if ((count < 0) || (buf == nullptr && count > 0)) { retval = MPI_ERR_COUNT; @@ -1111,7 +933,7 @@ int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI retval = MPI_ERR_TAG; } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int dst_traced = smpi_group_index(smpi_comm_group(comm), dst); + int dst_traced = comm->group()->index(dst); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_SEND; extra->src = rank; @@ -1147,7 +969,7 @@ int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MP retval = MPI_ERR_COMM; } else if (dst == MPI_PROC_NULL) { retval = MPI_SUCCESS; - } else if (dst >= smpi_group_size(smpi_comm_group(comm)) || dst <0){ + } else if (dst >= comm->group()->size() || dst <0){ retval = MPI_ERR_RANK; } else if ((count < 0) || (buf==nullptr && count > 0)) { retval = MPI_ERR_COUNT; @@ -1157,7 +979,7 @@ int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MP retval = MPI_ERR_TAG; } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int dst_traced = smpi_group_index(smpi_comm_group(comm), dst); + int dst_traced = comm->group()->index(dst); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_SSEND; extra->src = rank; @@ -1197,8 +1019,8 @@ int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dst, smpi_empty_status(status); status->MPI_SOURCE = MPI_PROC_NULL; retval = MPI_SUCCESS; - }else if (dst >= smpi_group_size(smpi_comm_group(comm)) || dst <0 || - (src!=MPI_ANY_SOURCE && (src >= smpi_group_size(smpi_comm_group(comm)) || src <0))){ + }else if (dst >= comm->group()->size() || dst <0 || + (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0))){ retval = MPI_ERR_RANK; } else if ((sendcount < 0 || recvcount<0) || (sendbuf==nullptr && sendcount > 0) || (recvbuf==nullptr && recvcount>0)) { @@ -1208,8 +1030,8 @@ int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dst, } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int dst_traced = smpi_group_index(smpi_comm_group(comm), dst); - int src_traced = smpi_group_index(smpi_comm_group(comm), src); + int dst_traced = comm->group()->index(dst); + int src_traced = comm->group()->index(src); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_SENDRECV; extra->src = src_traced; @@ -1392,7 +1214,7 @@ int PMPI_Wait(MPI_Request * request, MPI_Status * status) if (is_wait_for_receive) { if(src_traced==MPI_ANY_SOURCE) src_traced = (status!=MPI_STATUS_IGNORE) ? - smpi_group_rank(smpi_comm_group(comm), status->MPI_SOURCE) : + comm->group()->rank(status->MPI_SOURCE) : src_traced; TRACE_smpi_recv(rank, src_traced, dst_traced, tag_traced); } @@ -1442,7 +1264,7 @@ int PMPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status * sta if (is_wait_for_receive) { if(savedvals[*index].src==MPI_ANY_SOURCE) src_traced = (status != MPI_STATUSES_IGNORE) - ? smpi_group_rank(smpi_comm_group(savedvals[*index].comm), status->MPI_SOURCE) + ? savedvals[*index].comm->group()->rank(status->MPI_SOURCE) : savedvals[*index].src; TRACE_smpi_recv(rank_traced, src_traced, dst_traced, savedvals[*index].tag); } @@ -1493,7 +1315,7 @@ int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[]) if (is_wait_for_receive) { if(src_traced==MPI_ANY_SOURCE) src_traced = (status!=MPI_STATUSES_IGNORE) ? - smpi_group_rank(smpi_comm_group(savedvals[i].comm), status[i].MPI_SOURCE) : savedvals[i].src; + savedvals[i].comm->group()->rank(status[i].MPI_SOURCE) : savedvals[i].src; TRACE_smpi_recv(rank_traced, src_traced, dst_traced,savedvals[i].tag); } } @@ -1548,7 +1370,7 @@ int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm c retval = MPI_ERR_ARG; } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int root_traced = smpi_group_index(smpi_comm_group(comm), root); + int root_traced = comm->group()->index(root); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_BCAST; @@ -1560,7 +1382,7 @@ int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm c dt_size_send = smpi_datatype_size(datatype); extra->send_size = count * dt_size_send; TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - if (smpi_comm_size(comm) > 1) + if (comm->size() > 1) mpi_coll_bcast_fun(buf, count, datatype, root, comm); retval = MPI_SUCCESS; @@ -1604,21 +1426,21 @@ int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbu if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || - ((smpi_comm_rank(comm) == root) && (recvtype == MPI_DATATYPE_NULL))){ + ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){ retval = MPI_ERR_TYPE; - } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || ((smpi_comm_rank(comm) == root) && (recvcount <0))){ + } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || ((comm->rank() == root) && (recvcount <0))){ retval = MPI_ERR_COUNT; } else { char* sendtmpbuf = static_cast(sendbuf); int sendtmpcount = sendcount; MPI_Datatype sendtmptype = sendtype; - if( (smpi_comm_rank(comm) == root) && (sendbuf == MPI_IN_PLACE )) { + if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) { sendtmpcount=0; sendtmptype=recvtype; } int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int root_traced = smpi_group_index(smpi_comm_group(comm), root); + int root_traced = comm->group()->index(root); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_GATHER; extra->root = root_traced; @@ -1630,7 +1452,7 @@ int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbu extra->send_size = sendtmpcount * dt_size_send; extra->datatype2 = encode_datatype(recvtype, &known); int dt_size_recv = 1; - if ((smpi_comm_rank(comm) == root) && known == 0) + if ((comm->rank() == root) && known == 0) dt_size_recv = smpi_datatype_size(recvtype); extra->recv_size = recvcount * dt_size_recv; @@ -1656,7 +1478,7 @@ int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recv if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || - ((smpi_comm_rank(comm) == root) && (recvtype == MPI_DATATYPE_NULL))){ + ((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){ retval = MPI_ERR_TYPE; } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){ retval = MPI_ERR_COUNT; @@ -1666,15 +1488,15 @@ int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recv char* sendtmpbuf = static_cast(sendbuf); int sendtmpcount = sendcount; MPI_Datatype sendtmptype = sendtype; - if( (smpi_comm_rank(comm) == root) && (sendbuf == MPI_IN_PLACE )) { + if( (comm->rank() == root) && (sendbuf == MPI_IN_PLACE )) { sendtmpcount=0; sendtmptype=recvtype; } int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int root_traced = smpi_group_index(smpi_comm_group(comm), root); + int root_traced = comm->group()->index(root); int i = 0; - int size = smpi_comm_size(comm); + int size = comm->size(); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_GATHERV; extra->num_processes = size; @@ -1689,7 +1511,7 @@ int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recv int dt_size_recv = 1; if (known == 0) dt_size_recv = smpi_datatype_size(recvtype); - if ((smpi_comm_rank(comm) == root)) { + if ((comm->rank() == root)) { extra->recvcounts = xbt_new(int, size); for (i = 0; i < size; i++) // copy data to avoid bad free extra->recvcounts[i] = recvcounts[i] * dt_size_recv; @@ -1722,7 +1544,7 @@ int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, retval = MPI_ERR_COUNT; } else { if(sendbuf == MPI_IN_PLACE) { - sendbuf=static_cast(recvbuf)+smpi_datatype_get_extent(recvtype)*recvcount*smpi_comm_rank(comm); + sendbuf=static_cast(recvbuf)+smpi_datatype_get_extent(recvtype)*recvcount*comm->rank(); sendcount=recvcount; sendtype=recvtype; } @@ -1769,13 +1591,13 @@ int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, } else { if(sendbuf == MPI_IN_PLACE) { - sendbuf=static_cast(recvbuf)+smpi_datatype_get_extent(recvtype)*displs[smpi_comm_rank(comm)]; - sendcount=recvcounts[smpi_comm_rank(comm)]; + sendbuf=static_cast(recvbuf)+smpi_datatype_get_extent(recvtype)*displs[comm->rank()]; + sendcount=recvcounts[comm->rank()]; sendtype=recvtype; } int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; int i = 0; - int size = smpi_comm_size(comm); + int size = comm->size(); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_ALLGATHERV; extra->num_processes = size; @@ -1813,11 +1635,11 @@ int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; - } else if (((smpi_comm_rank(comm) == root) && (!is_datatype_valid(sendtype))) || + } else if (((comm->rank() == root) && (!is_datatype_valid(sendtype))) || ((recvbuf != MPI_IN_PLACE) && (!is_datatype_valid(recvtype)))) { retval = MPI_ERR_TYPE; } else if ((sendbuf == recvbuf) || - ((smpi_comm_rank(comm)==root) && sendcount>0 && (sendbuf == nullptr))){ + ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr))){ retval = MPI_ERR_BUFFER; }else { @@ -1826,14 +1648,14 @@ int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, recvcount = sendcount; } int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int root_traced = smpi_group_index(smpi_comm_group(comm), root); + int root_traced = comm->group()->index(root); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_SCATTER; extra->root = root_traced; int known = 0; extra->datatype1 = encode_datatype(sendtype, &known); int dt_size_send = 1; - if ((smpi_comm_rank(comm) == root) && known == 0) + if ((comm->rank() == root) && known == 0) dt_size_send = smpi_datatype_size(sendtype); extra->send_size = sendcount * dt_size_send; extra->datatype2 = encode_datatype(recvtype, &known); @@ -1863,18 +1685,18 @@ int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, retval = MPI_ERR_COMM; } else if (sendcounts == nullptr || displs == nullptr) { retval = MPI_ERR_ARG; - } else if (((smpi_comm_rank(comm) == root) && (sendtype == MPI_DATATYPE_NULL)) || + } else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL)) || ((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL))) { retval = MPI_ERR_TYPE; } else { if (recvbuf == MPI_IN_PLACE) { recvtype = sendtype; - recvcount = sendcounts[smpi_comm_rank(comm)]; + recvcount = sendcounts[comm->rank()]; } int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int root_traced = smpi_group_index(smpi_comm_group(comm), root); + int root_traced = comm->group()->index(root); int i = 0; - int size = smpi_comm_size(comm); + int size = comm->size(); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_SCATTERV; extra->num_processes = size; @@ -1884,7 +1706,7 @@ int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, int dt_size_send = 1; if (known == 0) dt_size_send = smpi_datatype_size(sendtype); - if ((smpi_comm_rank(comm) == root)) { + if ((comm->rank() == root)) { extra->sendcounts = xbt_new(int, size); for (i = 0; i < size; i++) // copy data to avoid bad free extra->sendcounts[i] = sendcounts[i] * dt_size_send; @@ -1918,7 +1740,7 @@ int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, retval = MPI_ERR_ARG; } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; - int root_traced = smpi_group_index(smpi_comm_group(comm), root); + int root_traced = comm->group()->index(root); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_REDUCE; int known = 0; @@ -2089,7 +1911,7 @@ int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datat } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; int i = 0; - int size = smpi_comm_size(comm); + int size = comm->size(); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_REDUCE_SCATTER; extra->num_processes = size; @@ -2140,7 +1962,7 @@ int PMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount, } else if (recvcount < 0) { retval = MPI_ERR_ARG; } else { - int count = smpi_comm_size(comm); + int count = comm->size(); int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); @@ -2199,8 +2021,8 @@ int PMPI_Alltoall(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* rec int sendtmpcount = sendcount; MPI_Datatype sendtmptype = sendtype; if (sendbuf == MPI_IN_PLACE) { - sendtmpbuf = static_cast(xbt_malloc(recvcount * smpi_comm_size(comm) * smpi_datatype_size(recvtype))); - memcpy(sendtmpbuf, recvbuf, recvcount * smpi_comm_size(comm) * smpi_datatype_size(recvtype)); + sendtmpbuf = static_cast(xbt_malloc(recvcount * comm->size() * smpi_datatype_size(recvtype))); + memcpy(sendtmpbuf, recvbuf, recvcount * comm->size() * smpi_datatype_size(recvtype)); sendtmpcount = recvcount; sendtmptype = recvtype; } @@ -2248,7 +2070,7 @@ int PMPI_Alltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype } else { int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1; int i = 0; - int size = smpi_comm_size(comm); + int size = comm->size(); instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); extra->type = TRACING_ALLTOALLV; extra->send_size = 0; @@ -2489,7 +2311,7 @@ int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periodic, int } int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) { - if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == nullptr) { + if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { return MPI_ERR_TOPOLOGY; } if (coords == nullptr) { @@ -2499,7 +2321,7 @@ int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) { } int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) { - if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == nullptr) { + if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { return MPI_ERR_TOPOLOGY; } if (source == nullptr || dest == nullptr || direction < 0 ) { @@ -2509,10 +2331,10 @@ int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* d } int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) { - if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == nullptr) { + if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { return MPI_ERR_TOPOLOGY; } - if (rank < 0 || rank >= smpi_comm_size(comm)) { + if (rank < 0 || rank >= comm->size()) { return MPI_ERR_RANK; } if (maxdims <= 0) { @@ -2525,7 +2347,7 @@ int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) { } int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) { - if(comm == nullptr || smpi_comm_topo(comm) == nullptr) { + if(comm == nullptr || comm->topo() == nullptr) { return MPI_ERR_TOPOLOGY; } if(maxdims <= 0 || dims == nullptr || periods == nullptr || coords == nullptr) { @@ -2535,7 +2357,7 @@ int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coor } int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) { - if (comm == MPI_COMM_NULL || smpi_comm_topo(comm) == nullptr) { + if (comm == MPI_COMM_NULL || comm->topo() == nullptr) { return MPI_ERR_TOPOLOGY; } if (ndims == nullptr) { @@ -2556,7 +2378,7 @@ int PMPI_Dims_create(int nnodes, int ndims, int* dims) { } int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) { - if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == nullptr) { + if(comm == MPI_COMM_NULL || comm->topo() == nullptr) { return MPI_ERR_TOPOLOGY; } if (comm_new == nullptr) { @@ -2636,7 +2458,7 @@ int PMPI_Win_get_group(MPI_Win win, MPI_Group * group){ return MPI_ERR_WIN; }else { smpi_mpi_win_get_group(win, group); - smpi_group_use(*group); + (*group)->use(); return MPI_SUCCESS; } } @@ -2677,7 +2499,7 @@ int PMPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int rank = smpi_process_index(); MPI_Group group; smpi_mpi_win_get_group(win, &group); - int src_traced = smpi_group_index(group, target_rank); + int src_traced = group->index(target_rank); TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); retval = smpi_mpi_get( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, @@ -2710,7 +2532,7 @@ int PMPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int rank = smpi_process_index(); MPI_Group group; smpi_mpi_win_get_group(win, &group); - int dst_traced = smpi_group_index(group, target_rank); + int dst_traced = group->index(target_rank); TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, nullptr); TRACE_smpi_send(rank, rank, dst_traced, SMPI_RMA_TAG, origin_count*smpi_datatype_size(origin_datatype)); @@ -2747,7 +2569,7 @@ int PMPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_da int rank = smpi_process_index(); MPI_Group group; smpi_mpi_win_get_group(win, &group); - int src_traced = smpi_group_index(group, target_rank); + int src_traced = group->index(target_rank); TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, nullptr); retval = smpi_mpi_accumulate( origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count, @@ -2937,7 +2759,7 @@ int PMPI_Attr_delete(MPI_Comm comm, int keyval) { else if (comm==MPI_COMM_NULL) return MPI_ERR_COMM; else - return smpi_comm_attr_delete(comm, keyval); + return comm->attr_delete(keyval); } int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) { @@ -2975,7 +2797,7 @@ int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) { *static_cast(attr_value) = &one; return MPI_SUCCESS; default: - return smpi_comm_attr_get(comm, keyval, attr_value, flag); + return comm->attr_get(keyval, attr_value, flag); } } @@ -2986,7 +2808,7 @@ int PMPI_Attr_put(MPI_Comm comm, int keyval, void* attr_value) { else if (comm==MPI_COMM_NULL) return MPI_ERR_COMM; else - return smpi_comm_attr_put(comm, keyval, attr_value); + return comm->attr_put(keyval, attr_value); } int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag) diff --git a/src/smpi/smpi_replay.cpp b/src/smpi/smpi_replay.cpp index 1b6dd415b7..6d7d878e8d 100644 --- a/src/smpi/smpi_replay.cpp +++ b/src/smpi/smpi_replay.cpp @@ -217,7 +217,7 @@ static void action_send(const char *const *action) int rank = smpi_process_index(); - int dst_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), to); + int dst_traced = MPI_COMM_WORLD->group()->rank(to); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_SEND; extra->send_size = size; @@ -248,7 +248,7 @@ static void action_Isend(const char *const *action) MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; int rank = smpi_process_index(); - int dst_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), to); + int dst_traced = MPI_COMM_WORLD->group()->rank(to); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_ISEND; extra->send_size = size; @@ -282,7 +282,7 @@ static void action_recv(const char *const *action) { MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; int rank = smpi_process_index(); - int src_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), from); + int src_traced = MPI_COMM_WORLD->group()->rank(from); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_RECV; @@ -321,7 +321,7 @@ static void action_Irecv(const char *const *action) MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE; int rank = smpi_process_index(); - int src_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), from); + int src_traced = MPI_COMM_WORLD->group()->rank(from); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_IRECV; extra->send_size = size; @@ -387,11 +387,11 @@ static void action_wait(const char *const *action){ return; } - int rank = request->comm != MPI_COMM_NULL ? smpi_comm_rank(request->comm) : -1; + int rank = request->comm != MPI_COMM_NULL ? request->comm->rank() : -1; - MPI_Group group = smpi_comm_group(request->comm); - int src_traced = smpi_group_rank(group, request->src); - int dst_traced = smpi_group_rank(group, request->dst); + MPI_Group group = request->comm->group(); + int src_traced = group->rank(request->src); + int dst_traced = group->rank(request->dst); int is_wait_for_receive = request->recv; instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_WAIT; @@ -469,7 +469,7 @@ static void action_bcast(const char *const *action) } int rank = smpi_process_index(); - int root_traced = smpi_group_index(smpi_comm_group(MPI_COMM_WORLD), root); + int root_traced = MPI_COMM_WORLD->group()->index(root); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_BCAST; @@ -501,7 +501,7 @@ static void action_reduce(const char *const *action) } int rank = smpi_process_index(); - int root_traced = smpi_group_rank(smpi_comm_group(MPI_COMM_WORLD), root); + int root_traced = MPI_COMM_WORLD->group()->rank(root); instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1); extra->type = TRACING_REDUCE; extra->send_size = comm_size; @@ -551,7 +551,7 @@ static void action_allReduce(const char *const *action) { static void action_allToAll(const char *const *action) { CHECK_ACTION_PARAMS(action, 2, 2) //two mandatory (send and recv volumes) and two optional (corresponding datatypes) double clock = smpi_process_simulated_elapsed(); - int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int comm_size = MPI_COMM_WORLD->size(); int send_size = parse_double(action[2]); int recv_size = parse_double(action[3]); MPI_Datatype MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE; @@ -594,7 +594,7 @@ static void action_gather(const char *const *action) { */ CHECK_ACTION_PARAMS(action, 2, 3) double clock = smpi_process_simulated_elapsed(); - int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int comm_size = MPI_COMM_WORLD->size(); int send_size = parse_double(action[2]); int recv_size = parse_double(action[3]); MPI_Datatype MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE; @@ -609,7 +609,7 @@ static void action_gather(const char *const *action) { int root=0; if(action[4]) root=atoi(action[4]); - int rank = smpi_comm_rank(MPI_COMM_WORLD); + int rank = MPI_COMM_WORLD->rank(); if(rank==root) recv = smpi_get_tmp_recvbuffer(recv_size*comm_size* smpi_datatype_size(MPI_CURRENT_TYPE2)); @@ -641,7 +641,7 @@ static void action_gatherv(const char *const *action) { 5) 0 is the recv datatype id, see decode_datatype() */ double clock = smpi_process_simulated_elapsed(); - int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int comm_size = MPI_COMM_WORLD->size(); CHECK_ACTION_PARAMS(action, comm_size+1, 2) int send_size = parse_double(action[2]); int disps[comm_size]; @@ -664,7 +664,7 @@ static void action_gatherv(const char *const *action) { } int root=atoi(action[3+comm_size]); - int rank = smpi_comm_rank(MPI_COMM_WORLD); + int rank = MPI_COMM_WORLD->rank(); if(rank==root) recv = smpi_get_tmp_recvbuffer(recv_sum* smpi_datatype_size(MPI_CURRENT_TYPE2)); @@ -697,7 +697,7 @@ static void action_reducescatter(const char *const *action) { 3) The last value corresponds to the datatype, see decode_datatype(). */ double clock = smpi_process_simulated_elapsed(); - int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int comm_size = MPI_COMM_WORLD->size(); CHECK_ACTION_PARAMS(action, comm_size+1, 1) int comp_size = parse_double(action[2+comm_size]); int recvcounts[comm_size]; @@ -767,7 +767,7 @@ static void action_allgather(const char *const *action) { extra->recv_size= recvcount; extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE, nullptr); extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2, nullptr); - extra->num_processes = smpi_comm_size(MPI_COMM_WORLD); + extra->num_processes = MPI_COMM_WORLD->size(); TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra); @@ -787,7 +787,7 @@ static void action_allgatherv(const char *const *action) { */ double clock = smpi_process_simulated_elapsed(); - int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int comm_size = MPI_COMM_WORLD->size(); CHECK_ACTION_PARAMS(action, comm_size+1, 2) int sendcount=atoi(action[2]); int recvcounts[comm_size]; @@ -841,7 +841,7 @@ static void action_allToAllv(const char *const *action) { */ double clock = smpi_process_simulated_elapsed(); - int comm_size = smpi_comm_size(MPI_COMM_WORLD); + int comm_size = MPI_COMM_WORLD->size(); CHECK_ACTION_PARAMS(action, 2*comm_size+2, 2) int sendcounts[comm_size]; int recvcounts[comm_size]; diff --git a/src/smpi/smpi_rma.cpp b/src/smpi/smpi_rma.cpp index 159bbc1c9a..28dc6f96e1 100644 --- a/src/smpi/smpi_rma.cpp +++ b/src/smpi/smpi_rma.cpp @@ -28,8 +28,8 @@ typedef struct s_smpi_mpi_win{ MPI_Win smpi_mpi_win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm){ - int comm_size = smpi_comm_size(comm); - int rank = smpi_comm_rank(comm); + int comm_size = comm->size(); + int rank = comm->rank(); XBT_DEBUG("Creating window"); MPI_Win win = xbt_new(s_smpi_mpi_win_t, 1); @@ -77,7 +77,7 @@ int smpi_mpi_win_free( MPI_Win* win){ } mpi_coll_barrier_fun((*win)->comm); - int rank=smpi_comm_rank((*win)->comm); + int rank=(*win)->comm->rank(); if(rank == 0) MSG_barrier_destroy((*win)->bar); xbt_mutex_destroy((*win)->mut); @@ -98,7 +98,7 @@ void smpi_mpi_win_get_name(MPI_Win win, char* name, int* length){ void smpi_mpi_win_get_group(MPI_Win win, MPI_Group* group){ if(win->comm != MPI_COMM_NULL){ - *group = smpi_comm_group(win->comm); + *group = win->comm->group(); } else { *group = MPI_GROUP_NULL; } @@ -156,14 +156,14 @@ int smpi_mpi_put( void *origin_addr, int origin_count, MPI_Datatype origin_datat void* recv_addr = static_cast ( static_cast(recv_win->base) + target_disp * recv_win->disp_unit); XBT_DEBUG("Entering MPI_Put to %d", target_rank); - if(target_rank != smpi_comm_rank(win->comm)){ + if(target_rank != win->comm->rank()){ //prepare send_request MPI_Request sreq = smpi_rma_send_init(origin_addr, origin_count, origin_datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(win->comm),target_rank), SMPI_RMA_TAG+1, win->comm, MPI_OP_NULL); + win->comm->group()->index(target_rank), SMPI_RMA_TAG+1, win->comm, MPI_OP_NULL); //prepare receiver request MPI_Request rreq = smpi_rma_recv_init(recv_addr, target_count, target_datatype, smpi_process_index(), - smpi_group_index(smpi_comm_group(win->comm),target_rank), SMPI_RMA_TAG+1, recv_win->comm, MPI_OP_NULL); + win->comm->group()->index(target_rank), SMPI_RMA_TAG+1, recv_win->comm, MPI_OP_NULL); //push request to receiver's win xbt_mutex_acquire(recv_win->mut); @@ -194,15 +194,15 @@ int smpi_mpi_get( void *origin_addr, int origin_count, MPI_Datatype origin_datat void* send_addr = static_cast(static_cast(send_win->base) + target_disp * send_win->disp_unit); XBT_DEBUG("Entering MPI_Get from %d", target_rank); - if(target_rank != smpi_comm_rank(win->comm)){ + if(target_rank != win->comm->rank()){ //prepare send_request MPI_Request sreq = smpi_rma_send_init(send_addr, target_count, target_datatype, - smpi_group_index(smpi_comm_group(win->comm),target_rank), smpi_process_index(), SMPI_RMA_TAG+2, send_win->comm, + win->comm->group()->index(target_rank), smpi_process_index(), SMPI_RMA_TAG+2, send_win->comm, MPI_OP_NULL); //prepare receiver request MPI_Request rreq = smpi_rma_recv_init(origin_addr, origin_count, origin_datatype, - smpi_group_index(smpi_comm_group(win->comm),target_rank), smpi_process_index(), SMPI_RMA_TAG+2, win->comm, + win->comm->group()->index(target_rank), smpi_process_index(), SMPI_RMA_TAG+2, win->comm, MPI_OP_NULL); //start the send, with another process than us as sender. @@ -240,11 +240,11 @@ int smpi_mpi_accumulate( void *origin_addr, int origin_count, MPI_Datatype origi //As the tag will be used for ordering of the operations, add count to it //prepare send_request MPI_Request sreq = smpi_rma_send_init(origin_addr, origin_count, origin_datatype, - smpi_process_index(), smpi_group_index(smpi_comm_group(win->comm),target_rank), SMPI_RMA_TAG+3+win->count, win->comm, op); + smpi_process_index(), win->comm->group()->index(target_rank), SMPI_RMA_TAG+3+win->count, win->comm, op); //prepare receiver request MPI_Request rreq = smpi_rma_recv_init(recv_addr, target_count, target_datatype, - smpi_process_index(), smpi_group_index(smpi_comm_group(win->comm),target_rank), SMPI_RMA_TAG+3+win->count, recv_win->comm, op); + smpi_process_index(), win->comm->group()->index(target_rank), SMPI_RMA_TAG+3+win->count, recv_win->comm, op); win->count++; //push request to receiver's win @@ -278,11 +278,11 @@ int smpi_mpi_win_start(MPI_Group group, int assert, MPI_Win win){ //naive, blocking implementation. int i = 0; int j = 0; - int size = smpi_group_size(group); + int size = group->size(); MPI_Request* reqs = xbt_new0(MPI_Request, size); while (j != size) { - int src = smpi_group_index(group, j); + int src = group->index(j); if (src != smpi_process_index() && src != MPI_UNDEFINED) { reqs[i] = smpi_irecv_init(nullptr, 0, MPI_CHAR, src, SMPI_RMA_TAG + 4, MPI_COMM_WORLD); i++; @@ -298,7 +298,7 @@ int smpi_mpi_win_start(MPI_Group group, int assert, MPI_Win win){ xbt_free(reqs); win->opened++; //we're open for business ! win->group=group; - smpi_group_use(group); + group->use(); return MPI_SUCCESS; } @@ -306,11 +306,11 @@ int smpi_mpi_win_post(MPI_Group group, int assert, MPI_Win win){ //let's make a synchronous send here int i = 0; int j = 0; - int size = smpi_group_size(group); + int size = group->size(); MPI_Request* reqs = xbt_new0(MPI_Request, size); while(j!=size){ - int dst=smpi_group_index(group,j); + int dst=group->index(j); if(dst!=smpi_process_index() && dst!=MPI_UNDEFINED){ reqs[i]=smpi_mpi_send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+4, MPI_COMM_WORLD); i++; @@ -327,7 +327,7 @@ int smpi_mpi_win_post(MPI_Group group, int assert, MPI_Win win){ xbt_free(reqs); win->opened++; //we're open for business ! win->group=group; - smpi_group_use(group); + group->use(); return MPI_SUCCESS; } @@ -338,11 +338,11 @@ int smpi_mpi_win_complete(MPI_Win win){ XBT_DEBUG("Entering MPI_Win_Complete"); int i = 0; int j = 0; - int size = smpi_group_size(win->group); + int size = win->group->size(); MPI_Request* reqs = xbt_new0(MPI_Request, size); while(j!=size){ - int dst=smpi_group_index(win->group,j); + int dst=win->group->index(j); if(dst!=smpi_process_index() && dst!=MPI_UNDEFINED){ reqs[i]=smpi_mpi_send_init(nullptr, 0, MPI_CHAR, dst, SMPI_RMA_TAG+5, MPI_COMM_WORLD); i++; @@ -378,7 +378,7 @@ int smpi_mpi_win_complete(MPI_Win win){ } xbt_mutex_release(win->mut); - smpi_group_unuse(win->group); + win->group->unuse(); win->opened--; //we're closed for business ! return MPI_SUCCESS; } @@ -387,11 +387,11 @@ int smpi_mpi_win_wait(MPI_Win win){ //naive, blocking implementation. XBT_DEBUG("Entering MPI_Win_Wait"); int i=0,j=0; - int size = smpi_group_size(win->group); + int size = win->group->size(); MPI_Request* reqs = xbt_new0(MPI_Request, size); while(j!=size){ - int src=smpi_group_index(win->group,j); + int src=win->group->index(j); if(src!=smpi_process_index() && src!=MPI_UNDEFINED){ reqs[i]=smpi_irecv_init(nullptr, 0, MPI_CHAR, src,SMPI_RMA_TAG+5, MPI_COMM_WORLD); i++; @@ -424,7 +424,7 @@ int smpi_mpi_win_wait(MPI_Win win){ } xbt_mutex_release(win->mut); - smpi_group_unuse(win->group); + win->group->unuse(); win->opened--; //we're opened for business ! return MPI_SUCCESS; } diff --git a/src/smpi/smpi_topo.cpp b/src/smpi/smpi_topo.cpp index 57b45a5a49..d7912c796d 100644 --- a/src/smpi/smpi_topo.cpp +++ b/src/smpi/smpi_topo.cpp @@ -124,7 +124,7 @@ int smpi_mpi_cart_create(MPI_Comm comm_old, int ndims, int dims[], int periods[] MPI_Group oldGroup; int nranks; - int rank = smpi_comm_rank(comm_old); + int rank = comm_old->rank(); int newSize = 1; if(ndims != 0) { @@ -136,10 +136,10 @@ int smpi_mpi_cart_create(MPI_Comm comm_old, int ndims, int dims[], int periods[] return retval; } newCart = smpi_cart_topo_create(ndims); - oldGroup = smpi_comm_group(comm_old); - newGroup = smpi_group_new(newSize); + oldGroup = comm_old->group(); + newGroup = new simgrid::SMPI::Group(newSize); for (int i = 0 ; i < newSize ; i++) { - smpi_group_set_mapping(newGroup, smpi_group_index(oldGroup, i), i); + newGroup->set_mapping(oldGroup->index(i), i); } newCart->topo.cart->nnodes = newSize; @@ -155,11 +155,11 @@ int smpi_mpi_cart_create(MPI_Comm comm_old, int ndims, int dims[], int periods[] rank = rank % nranks; } - *comm_cart = smpi_comm_new(newGroup, newCart); + *comm_cart = new simgrid::SMPI::Comm(newGroup, newCart); } else { if (rank == 0) { newCart = smpi_cart_topo_create(ndims); - *comm_cart = smpi_comm_new(smpi_group_copy(smpi_comm_group(MPI_COMM_SELF)), newCart); + *comm_cart = new simgrid::SMPI::Comm(new simgrid::SMPI::Group(MPI_COMM_SELF->group()), newCart); } else { *comm_cart = MPI_COMM_NULL; } @@ -168,7 +168,7 @@ int smpi_mpi_cart_create(MPI_Comm comm_old, int ndims, int dims[], int periods[] } int smpi_mpi_cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *newcomm) { - MPI_Topology oldTopo = smpi_comm_topo(comm); + MPI_Topology oldTopo = comm->topo(); int oldNDims = oldTopo->topo.cart->ndims; int j = 0; int *newDims = nullptr; @@ -200,7 +200,7 @@ int smpi_mpi_cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *newcomm) } int smpi_mpi_cart_coords(MPI_Comm comm, int rank, int maxdims, int coords[]) { - MPI_Topology topo = smpi_comm_topo(comm); + MPI_Topology topo = comm->topo(); int nnodes = topo->topo.cart->nnodes; for (int i = 0; i< topo->topo.cart->ndims; i++ ) { nnodes = nnodes / topo->topo.cart->dims[i]; @@ -211,7 +211,7 @@ int smpi_mpi_cart_coords(MPI_Comm comm, int rank, int maxdims, int coords[]) { } int smpi_mpi_cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) { - MPI_Topology topo = smpi_comm_topo(comm); + MPI_Topology topo = comm->topo(); int ndims=topo->topo.cart->ndims < maxdims ? topo->topo.cart->ndims : maxdims; for(int i = 0 ; i < ndims ; i++) { dims[i] = topo->topo.cart->dims[i]; @@ -222,7 +222,7 @@ int smpi_mpi_cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* } int smpi_mpi_cart_rank(MPI_Comm comm, int* coords, int* rank) { - MPI_Topology topo = smpi_comm_topo(comm); + MPI_Topology topo = comm->topo(); int ndims = topo->topo.cart->ndims; int coord; *rank = 0; @@ -260,7 +260,7 @@ int smpi_mpi_cart_rank(MPI_Comm comm, int* coords, int* rank) { } int smpi_mpi_cart_shift(MPI_Comm comm, int direction, int disp, int *rank_source, int *rank_dest) { - MPI_Topology topo = smpi_comm_topo(comm); + MPI_Topology topo = comm->topo(); int position[topo->topo.cart->ndims]; if(topo->topo.cart->ndims == 0) { @@ -270,7 +270,7 @@ int smpi_mpi_cart_shift(MPI_Comm comm, int direction, int disp, int *rank_source return MPI_ERR_DIMS; } - smpi_mpi_cart_coords(comm, smpi_comm_rank(comm), topo->topo.cart->ndims, position); + smpi_mpi_cart_coords(comm, comm->rank(), topo->topo.cart->ndims, position); position[direction] += disp; if(position[direction] < 0 || @@ -301,7 +301,7 @@ int smpi_mpi_cart_shift(MPI_Comm comm, int direction, int disp, int *rank_source } int smpi_mpi_cartdim_get(MPI_Comm comm, int *ndims) { - MPI_Topology topo = smpi_comm_topo(comm); + MPI_Topology topo = comm->topo(); *ndims = topo->topo.cart->ndims; return MPI_SUCCESS; diff --git a/tools/cmake/DefinePackages.cmake b/tools/cmake/DefinePackages.cmake index 4be1c2fabc..f4f901ee8f 100644 --- a/tools/cmake/DefinePackages.cmake +++ b/tools/cmake/DefinePackages.cmake @@ -104,108 +104,108 @@ set(EXTRA_DIST ) set(SMPI_SRC - src/smpi/colls/allgather-2dmesh.c - src/smpi/colls/allgather-3dmesh.c - src/smpi/colls/allgather-GB.c - src/smpi/colls/allgather-NTSLR-NB.c - src/smpi/colls/allgather-NTSLR.c - src/smpi/colls/allgather-SMP-NTS.c - src/smpi/colls/allgather-bruck.c - src/smpi/colls/allgather-loosely-lr.c - src/smpi/colls/allgather-ompi-neighborexchange.c - src/smpi/colls/allgather-pair.c - src/smpi/colls/allgather-mvapich-smp.c - src/smpi/colls/allgather-rdb.c - src/smpi/colls/allgather-rhv.c - src/smpi/colls/allgather-ring.c - src/smpi/colls/allgather-smp-simple.c - src/smpi/colls/allgather-spreading-simple.c - src/smpi/colls/allgatherv-GB.c - src/smpi/colls/allgatherv-mpich-rdb.c - src/smpi/colls/allgatherv-mpich-ring.c - src/smpi/colls/allgatherv-ompi-bruck.c - src/smpi/colls/allgatherv-ompi-neighborexchange.c - src/smpi/colls/allgatherv-pair.c - src/smpi/colls/allgatherv-ring.c - src/smpi/colls/allreduce-lr.c - src/smpi/colls/allreduce-ompi-ring-segmented.c - src/smpi/colls/allreduce-rab-rdb.c - src/smpi/colls/allreduce-rab1.c - src/smpi/colls/allreduce-rab2.c - src/smpi/colls/allreduce-rdb.c - src/smpi/colls/allreduce-redbcast.c - src/smpi/colls/allreduce-smp-binomial-pipeline.c - src/smpi/colls/allreduce-smp-binomial.c - src/smpi/colls/allreduce-smp-rdb.c - src/smpi/colls/allreduce-smp-rsag-lr.c - src/smpi/colls/allreduce-smp-rsag-rab.c - src/smpi/colls/allreduce-smp-rsag.c - src/smpi/colls/allreduce-mvapich-rs.c - src/smpi/colls/allreduce-mvapich-two-level.c - src/smpi/colls/alltoall-2dmesh.c - src/smpi/colls/alltoall-3dmesh.c -# src/smpi/colls/alltoall-bruck.c - src/smpi/colls/alltoall-pair-light-barrier.c - src/smpi/colls/alltoall-pair-mpi-barrier.c - src/smpi/colls/alltoall-pair-one-barrier.c - src/smpi/colls/alltoall-pair.c - src/smpi/colls/alltoall-rdb.c - src/smpi/colls/alltoall-ring-light-barrier.c - src/smpi/colls/alltoall-ring-mpi-barrier.c - src/smpi/colls/alltoall-ring-one-barrier.c - src/smpi/colls/alltoall-ring.c - src/smpi/colls/alltoall-mvapich-scatter-dest.c - src/smpi/colls/alltoallv-bruck.c - src/smpi/colls/alltoallv-ompi-basic-linear.c - src/smpi/colls/alltoallv-pair-light-barrier.c - src/smpi/colls/alltoallv-pair-mpi-barrier.c - src/smpi/colls/alltoallv-pair-one-barrier.c - src/smpi/colls/alltoallv-pair.c - src/smpi/colls/alltoallv-ring-light-barrier.c - src/smpi/colls/alltoallv-ring-mpi-barrier.c - src/smpi/colls/alltoallv-ring-one-barrier.c - src/smpi/colls/alltoallv-ring.c - src/smpi/colls/barrier-ompi.c - src/smpi/colls/barrier-mvapich2-pair.c - src/smpi/colls/bcast-NTSB.c - src/smpi/colls/bcast-NTSL-Isend.c - src/smpi/colls/bcast-NTSL.c - src/smpi/colls/bcast-SMP-binary.c - src/smpi/colls/bcast-SMP-binomial.c - src/smpi/colls/bcast-SMP-linear.c - src/smpi/colls/bcast-arrival-pattern-aware-wait.c - src/smpi/colls/bcast-arrival-pattern-aware.c - src/smpi/colls/bcast-arrival-scatter.c - src/smpi/colls/bcast-binomial-tree.c - src/smpi/colls/bcast-flattree-pipeline.c - src/smpi/colls/bcast-flattree.c - src/smpi/colls/bcast-ompi-pipeline.c - src/smpi/colls/bcast-ompi-split-bintree.c - src/smpi/colls/bcast-mvapich-smp.c - src/smpi/colls/bcast-scatter-LR-allgather.c - src/smpi/colls/bcast-scatter-rdb-allgather.c - src/smpi/colls/coll_tuned_topo.c - src/smpi/colls/colls_global.c - src/smpi/colls/gather-ompi.c - src/smpi/colls/gather-mvapich.c - src/smpi/colls/reduce-NTSL.c - src/smpi/colls/reduce-arrival-pattern-aware.c - src/smpi/colls/reduce-binomial.c - src/smpi/colls/reduce-flat-tree.c - src/smpi/colls/reduce-ompi.c - src/smpi/colls/reduce-scatter-gather.c - src/smpi/colls/reduce_scatter-mpich.c - src/smpi/colls/reduce_scatter-ompi.c - src/smpi/colls/reduce-mvapich-knomial.c - src/smpi/colls/reduce-mvapich-two-level.c - src/smpi/colls/reduce-rab.c - src/smpi/colls/scatter-ompi.c - src/smpi/colls/scatter-mvapich-two-level.c + src/smpi/colls/allgather-2dmesh.cpp + src/smpi/colls/allgather-3dmesh.cpp + src/smpi/colls/allgather-GB.cpp + src/smpi/colls/allgather-NTSLR-NB.cpp + src/smpi/colls/allgather-NTSLR.cpp + src/smpi/colls/allgather-SMP-NTS.cpp + src/smpi/colls/allgather-bruck.cpp + src/smpi/colls/allgather-loosely-lr.cpp + src/smpi/colls/allgather-ompi-neighborexchange.cpp + src/smpi/colls/allgather-pair.cpp + src/smpi/colls/allgather-mvapich-smp.cpp + src/smpi/colls/allgather-rdb.cpp + src/smpi/colls/allgather-rhv.cpp + src/smpi/colls/allgather-ring.cpp + src/smpi/colls/allgather-smp-simple.cpp + src/smpi/colls/allgather-spreading-simple.cpp + src/smpi/colls/allgatherv-GB.cpp + src/smpi/colls/allgatherv-mpich-rdb.cpp + src/smpi/colls/allgatherv-mpich-ring.cpp + src/smpi/colls/allgatherv-ompi-bruck.cpp + src/smpi/colls/allgatherv-ompi-neighborexchange.cpp + src/smpi/colls/allgatherv-pair.cpp + src/smpi/colls/allgatherv-ring.cpp + src/smpi/colls/allreduce-lr.cpp + src/smpi/colls/allreduce-ompi-ring-segmented.cpp + src/smpi/colls/allreduce-rab-rdb.cpp + src/smpi/colls/allreduce-rab1.cpp + src/smpi/colls/allreduce-rab2.cpp + src/smpi/colls/allreduce-rdb.cpp + src/smpi/colls/allreduce-redbcast.cpp + src/smpi/colls/allreduce-smp-binomial-pipeline.cpp + src/smpi/colls/allreduce-smp-binomial.cpp + src/smpi/colls/allreduce-smp-rdb.cpp + src/smpi/colls/allreduce-smp-rsag-lr.cpp + src/smpi/colls/allreduce-smp-rsag-rab.cpp + src/smpi/colls/allreduce-smp-rsag.cpp + src/smpi/colls/allreduce-mvapich-rs.cpp + src/smpi/colls/allreduce-mvapich-two-level.cpp + src/smpi/colls/alltoall-2dmesh.cpp + src/smpi/colls/alltoall-3dmesh.cpp +# src/smpi/colls/alltoall-bruck.cpp + src/smpi/colls/alltoall-pair-light-barrier.cpp + src/smpi/colls/alltoall-pair-mpi-barrier.cpp + src/smpi/colls/alltoall-pair-one-barrier.cpp + src/smpi/colls/alltoall-pair.cpp + src/smpi/colls/alltoall-rdb.cpp + src/smpi/colls/alltoall-ring-light-barrier.cpp + src/smpi/colls/alltoall-ring-mpi-barrier.cpp + src/smpi/colls/alltoall-ring-one-barrier.cpp + src/smpi/colls/alltoall-ring.cpp + src/smpi/colls/alltoall-mvapich-scatter-dest.cpp + src/smpi/colls/alltoallv-bruck.cpp + src/smpi/colls/alltoallv-ompi-basic-linear.cpp + src/smpi/colls/alltoallv-pair-light-barrier.cpp + src/smpi/colls/alltoallv-pair-mpi-barrier.cpp + src/smpi/colls/alltoallv-pair-one-barrier.cpp + src/smpi/colls/alltoallv-pair.cpp + src/smpi/colls/alltoallv-ring-light-barrier.cpp + src/smpi/colls/alltoallv-ring-mpi-barrier.cpp + src/smpi/colls/alltoallv-ring-one-barrier.cpp + src/smpi/colls/alltoallv-ring.cpp + src/smpi/colls/barrier-ompi.cpp + src/smpi/colls/barrier-mvapich2-pair.cpp + src/smpi/colls/bcast-NTSB.cpp + src/smpi/colls/bcast-NTSL-Isend.cpp + src/smpi/colls/bcast-NTSL.cpp + src/smpi/colls/bcast-SMP-binary.cpp + src/smpi/colls/bcast-SMP-binomial.cpp + src/smpi/colls/bcast-SMP-linear.cpp + src/smpi/colls/bcast-arrival-pattern-aware-wait.cpp + src/smpi/colls/bcast-arrival-pattern-aware.cpp + src/smpi/colls/bcast-arrival-scatter.cpp + src/smpi/colls/bcast-binomial-tree.cpp + src/smpi/colls/bcast-flattree-pipeline.cpp + src/smpi/colls/bcast-flattree.cpp + src/smpi/colls/bcast-ompi-pipeline.cpp + src/smpi/colls/bcast-ompi-split-bintree.cpp + src/smpi/colls/bcast-mvapich-smp.cpp + src/smpi/colls/bcast-scatter-LR-allgather.cpp + src/smpi/colls/bcast-scatter-rdb-allgather.cpp + src/smpi/colls/coll_tuned_topo.cpp + src/smpi/colls/colls_global.cpp + src/smpi/colls/gather-ompi.cpp + src/smpi/colls/gather-mvapich.cpp + src/smpi/colls/reduce-NTSL.cpp + src/smpi/colls/reduce-arrival-pattern-aware.cpp + src/smpi/colls/reduce-binomial.cpp + src/smpi/colls/reduce-flat-tree.cpp + src/smpi/colls/reduce-ompi.cpp + src/smpi/colls/reduce-scatter-gather.cpp + src/smpi/colls/reduce_scatter-mpich.cpp + src/smpi/colls/reduce_scatter-ompi.cpp + src/smpi/colls/reduce-mvapich-knomial.cpp + src/smpi/colls/reduce-mvapich-two-level.cpp + src/smpi/colls/reduce-rab.cpp + src/smpi/colls/scatter-ompi.cpp + src/smpi/colls/scatter-mvapich-two-level.cpp src/smpi/colls/smpi_automatic_selector.cpp - src/smpi/colls/smpi_mpich_selector.c - src/smpi/colls/smpi_intel_mpi_selector.c - src/smpi/colls/smpi_openmpi_selector.c - src/smpi/colls/smpi_mvapich2_selector.c + src/smpi/colls/smpi_mpich_selector.cpp + src/smpi/colls/smpi_intel_mpi_selector.cpp + src/smpi/colls/smpi_openmpi_selector.cpp + src/smpi/colls/smpi_mvapich2_selector.cpp src/smpi/instr_smpi.cpp src/smpi/smpi_base.cpp src/smpi/smpi_bench.cpp @@ -213,10 +213,12 @@ set(SMPI_SRC src/smpi/smpi_static_variables.cpp src/smpi/smpi_coll.cpp src/smpi/smpi_comm.cpp + src/smpi/smpi_comm.hpp src/smpi/smpi_deployment.cpp src/smpi/smpi_dvfs.cpp src/smpi/smpi_global.cpp src/smpi/smpi_group.cpp + src/smpi/smpi_group.hpp src/smpi/smpi_mpi.cpp src/smpi/smpi_mpi_dt.cpp src/smpi/smpi_pmpi.cpp @@ -668,6 +670,7 @@ set(headers_to_install include/smpi/smpi_main.h include/smpi/smpi_extended_traces.h include/smpi/smpi_extended_traces_fortran.h + include/smpi/forward.hpp include/surf/surf_routing.h include/xbt.h include/xbt/RngStream.h