int i, send_offset, recv_offset;
int intra_rank, inter_rank;
- intra_rank = rank % NUM_CORE;
- inter_rank = rank / NUM_CORE;
- int inter_comm_size = (comm_size + NUM_CORE - 1) / NUM_CORE;
- int num_core_in_current_smp = NUM_CORE;
- if(comm_size%NUM_CORE)
- THROWF(arg_error,0, "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", NUM_CORE);
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
+
+
+ intra_rank = rank % num_core;
+ inter_rank = rank / num_core;
+ int inter_comm_size = (comm_size + num_core - 1) / num_core;
+ int num_core_in_current_smp = num_core;
+
+ if(comm_size%num_core)
+ THROWF(arg_error,0, "allgather SMP NTS algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core);
/* for too small number of processes, use default implementation */
- if (comm_size <= NUM_CORE) {
+ if (comm_size <= num_core) {
XBT_WARN("MPI_allgather_SMP_NTS use default MPI_allgather.");
smpi_mpi_allgather(sbuf, scount, stype, rbuf, rcount, rtype, comm);
return MPI_SUCCESS;
// the last SMP node may have fewer number of running processes than all others
if (inter_rank == (inter_comm_size - 1)) {
- num_core_in_current_smp = comm_size - (inter_rank * NUM_CORE);
+ num_core_in_current_smp = comm_size - (inter_rank * num_core);
}
//copy corresponding message from sbuf to rbuf
recv_offset = rank * rextent * rcount;
for (i = 1; i < num_core_in_current_smp; i++) {
dst =
- (inter_rank * NUM_CORE) + (intra_rank + i) % (num_core_in_current_smp);
+ (inter_rank * num_core) + (intra_rank + i) % (num_core_in_current_smp);
src =
- (inter_rank * NUM_CORE) + (intra_rank - i +
+ (inter_rank * num_core) + (intra_rank - i +
num_core_in_current_smp) %
(num_core_in_current_smp);
recv_offset = src * rextent * rcount;
MPI_Request *rrequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
MPI_Request *srequest_array = xbt_new(MPI_Request, inter_comm_size - 1);
- src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * NUM_CORE;
- dst = ((inter_rank + 1) % inter_comm_size) * NUM_CORE;
+ src = ((inter_rank - 1 + inter_comm_size) % inter_comm_size) * num_core;
+ dst = ((inter_rank + 1) % inter_comm_size) * num_core;
// post all inter Irecv
for (i = 0; i < inter_comm_size - 1; i++) {
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- rrequest_array[i] = smpi_mpi_irecv((char *)rbuf + recv_offset, rcount * NUM_CORE,
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ rrequest_array[i] = smpi_mpi_irecv((char *)rbuf + recv_offset, rcount * num_core,
rtype, src, tag + i, comm);
}
// send first message
send_offset =
((inter_rank +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- srequest_array[0] = smpi_mpi_isend((char *)rbuf + send_offset, scount * NUM_CORE,
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ srequest_array[0] = smpi_mpi_isend((char *)rbuf + send_offset, scount * num_core,
stype, dst, tag, comm);
// loop : recv-inter , send-inter, send-intra (linear-bcast)
for (i = 0; i < inter_comm_size - 2; i++) {
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
smpi_mpi_wait(&rrequest_array[i], MPI_STATUS_IGNORE);
- srequest_array[i + 1] = smpi_mpi_isend((char *)rbuf + recv_offset, scount * NUM_CORE,
+ srequest_array[i + 1] = smpi_mpi_isend((char *)rbuf + recv_offset, scount * num_core,
stype, dst, tag + i + 1, comm);
if (num_core_in_current_smp > 1) {
- smpi_mpi_send((char *)rbuf + recv_offset, scount * NUM_CORE,
+ smpi_mpi_send((char *)rbuf + recv_offset, scount * num_core,
stype, (rank + 1), tag + i + 1, comm);
}
}
// recv last message and send_intra
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- //recv_offset = ((inter_rank + 1) % inter_comm_size) * NUM_CORE * sextent * scount;
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ //recv_offset = ((inter_rank + 1) % inter_comm_size) * num_core * sextent * scount;
//i=inter_comm_size-2;
smpi_mpi_wait(&rrequest_array[i], MPI_STATUS_IGNORE);
if (num_core_in_current_smp > 1) {
- smpi_mpi_send((char *)rbuf + recv_offset, scount * NUM_CORE,
+ smpi_mpi_send((char *)rbuf + recv_offset, scount * num_core,
stype, (rank + 1), tag + i + 1, comm);
}
for (i = 0; i < inter_comm_size - 1; i++) {
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * NUM_CORE), rtype,
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
}
}
for (i = 0; i < inter_comm_size - 1; i++) {
recv_offset =
((inter_rank - i - 1 +
- inter_comm_size) % inter_comm_size) * NUM_CORE * sextent * scount;
- smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * NUM_CORE), rtype,
+ inter_comm_size) % inter_comm_size) * num_core * sextent * scount;
+ smpi_mpi_recv((char *) rbuf + recv_offset, (rcount * num_core), rtype,
rank - 1, tag + i + 1, comm, MPI_STATUS_IGNORE);
- smpi_mpi_send((char *) rbuf + recv_offset, (scount * NUM_CORE), stype,
+ smpi_mpi_send((char *) rbuf + recv_offset, (scount * num_core), stype,
(rank + 1), tag + i + 1, comm);
}
}
comm_size = smpi_comm_size(comm);
- if(comm_size%4)
- THROWF(arg_error,0, "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=4 number of processes ! ");
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
+
+ if(comm_size%num_core)
+ THROWF(arg_error,0, "allgather loosely lr algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ",num_core);
rank = smpi_comm_rank(comm);
MPI_Aint rextent, sextent;
MPI_Status status;
- intra_rank = rank % NUM_CORE;
- inter_rank = rank / NUM_CORE;
- inter_comm_size = (comm_size + NUM_CORE - 1) / NUM_CORE;
- intra_comm_size = NUM_CORE;
+ intra_rank = rank % num_core;
+ inter_rank = rank / num_core;
+ inter_comm_size = (comm_size + num_core - 1) / num_core;
+ intra_comm_size = num_core;
int src_seg, dst_seg;
} // intra loop
- // wait for inter communication to finish for these rounds (# of round equals NUM_CORE)
+ // wait for inter communication to finish for these rounds (# of round equals num_core)
if (i != inter_comm_size - 1) {
smpi_mpi_wait(&inter_rrequest, &status);
}
int src, dst, comm_size, rank;
comm_size = smpi_comm_size(comm);
- if(comm_size%NUM_CORE)
- THROWF(arg_error,0, "allgather SMP simple algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", NUM_CORE);
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
+
+ if(comm_size%num_core)
+ THROWF(arg_error,0, "allgather SMP simple algorithm can't be used with non multiple of NUM_CORE=%d number of processes ! ", num_core);
rank = smpi_comm_rank(comm);
MPI_Aint rextent, sextent;
MPI_Status status;
int i, send_offset, recv_offset;
int intra_rank, inter_rank;
- int num_core = NUM_CORE;
intra_rank = rank % num_core;
inter_rank = rank / num_core;
int inter_comm_size = (comm_size + num_core - 1) / num_core;
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
comm_size = smpi_comm_size(comm);
rank = smpi_comm_rank(comm);
void *tmp_buf;
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
- int num_core = NUM_CORE;
+
+
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
MPI_Status status;
comm_size=smpi_comm_size(comm);
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
/*
#ifdef MPICH2_REDUCTION
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
/*
#ifdef MPICH2_REDUCTION
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
comm_size = smpi_comm_size(comm);
int tag = COLL_TAG_ALLREDUCE;
int mask, src, dst;
MPI_Status status;
- int num_core = NUM_CORE;
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
/*
#ifdef MPICH2_REDUCTION
MPI_User_function * uop = MPIR_Op_table[op % 16 - 1];
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
+ int host_num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (host_num_core == 1) host_num_core = NUM_CORE;
- if(size%NUM_CORE)
- THROWF(arg_error,0, "bcast SMP binary can't be used with non multiple of NUM_CORE=%d number of processes ! ",NUM_CORE);
+ if(size%host_num_core)
+ THROWF(arg_error,0, "bcast SMP binary can't be used with non multiple of NUM_CORE=%d number of processes ! ",host_num_core);
int segment = bcast_SMP_binary_segment_byte / extent;
int pipe_length = count / segment;
int remainder = count % segment;
- int to_intra_left = (rank / NUM_CORE) * NUM_CORE + (rank % NUM_CORE) * 2 + 1;
- int to_intra_right = (rank / NUM_CORE) * NUM_CORE + (rank % NUM_CORE) * 2 + 2;
- int to_inter_left = ((rank / NUM_CORE) * 2 + 1) * NUM_CORE;
- int to_inter_right = ((rank / NUM_CORE) * 2 + 2) * NUM_CORE;
- int from_inter = (((rank / NUM_CORE) - 1) / 2) * NUM_CORE;
- int from_intra = (rank / NUM_CORE) * NUM_CORE + ((rank % NUM_CORE) - 1) / 2;
+ int to_intra_left = (rank / host_num_core) * host_num_core + (rank % host_num_core) * 2 + 1;
+ int to_intra_right = (rank / host_num_core) * host_num_core + (rank % host_num_core) * 2 + 2;
+ int to_inter_left = ((rank / host_num_core) * 2 + 1) * host_num_core;
+ int to_inter_right = ((rank / host_num_core) * 2 + 2) * host_num_core;
+ int from_inter = (((rank / host_num_core) - 1) / 2) * host_num_core;
+ int from_intra = (rank / host_num_core) * host_num_core + ((rank % host_num_core) - 1) / 2;
int increment = segment * extent;
- int base = (rank / NUM_CORE) * NUM_CORE;
- int num_core = NUM_CORE;
- if (((rank / NUM_CORE) * NUM_CORE) == ((size / NUM_CORE) * NUM_CORE))
- num_core = size - (rank / NUM_CORE) * NUM_CORE;
+ int base = (rank / host_num_core) * host_num_core;
+ int num_core = host_num_core;
+ if (((rank / host_num_core) * host_num_core) == ((size / host_num_core) * host_num_core))
+ num_core = size - (rank / host_num_core) * host_num_core;
// if root is not zero send to rank zero first
if (root != 0) {
// when a message is smaller than a block size => no pipeline
if (count <= segment) {
// case ROOT-of-each-SMP
- if (rank % NUM_CORE == 0) {
+ if (rank % host_num_core == 0) {
// case ROOT
if (rank == 0) {
//printf("node %d left %d right %d\n",rank,to_inter_left,to_inter_right);
(MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
// case ROOT-of-each-SMP
- if (rank % NUM_CORE == 0) {
+ if (rank % host_num_core == 0) {
// case ROOT
if (rank == 0) {
for (i = 0; i < pipe_length; i++) {
size = smpi_comm_size(comm);
rank = smpi_comm_rank(comm);
- if(size%NUM_CORE)
- THROWF(arg_error,0, "bcast SMP binomial can't be used with non multiple of NUM_CORE=%d number of processes ! ",NUM_CORE);
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
+
+ if(size%num_core)
+ THROWF(arg_error,0, "bcast SMP binomial can't be used with non multiple of NUM_CORE=%d number of processes ! ",num_core);
int to_intra, to_inter;
int from_intra, from_inter;
- int inter_rank = rank / NUM_CORE;
- int inter_size = (size - 1) / NUM_CORE + 1;
- int intra_rank = rank % NUM_CORE;
- int intra_size = NUM_CORE;
- if (((rank / NUM_CORE) * NUM_CORE) == ((size / NUM_CORE) * NUM_CORE))
- intra_size = size - (rank / NUM_CORE) * NUM_CORE;
+ int inter_rank = rank / num_core;
+ int inter_size = (size - 1) / num_core + 1;
+ int intra_rank = rank % num_core;
+ int intra_size = num_core;
+ if (((rank / num_core) * num_core) == ((size / num_core) * num_core))
+ intra_size = size - (rank / num_core) * num_core;
// if root is not zero send to rank zero first
if (root != 0) {
mask = 1;
while (mask < inter_size) {
if (inter_rank & mask) {
- from_inter = (inter_rank - mask) * NUM_CORE;
+ from_inter = (inter_rank - mask) * num_core;
//printf("Node %d recv from node %d when mask is %d\n", rank, from_inter, mask);
smpi_mpi_recv(buf, count, datatype, from_inter, tag, comm, &status);
break;
while (mask > 0) {
if (inter_rank < inter_size) {
- to_inter = (inter_rank + mask) * NUM_CORE;
+ to_inter = (inter_rank + mask) * num_core;
if (to_inter < size) {
//printf("Node %d send to node %d when mask is %d\n", rank, to_inter, mask);
smpi_mpi_send(buf, count, datatype, to_inter, tag, comm);
}
// SECOND STEP every root-of-each-SMP send to all children with binomial tree
// base is a rank of root-of-each-SMP
- int base = (rank / NUM_CORE) * NUM_CORE;
+ int base = (rank / num_core) * num_core;
mask = 1;
while (mask < intra_size) {
if (intra_rank & mask) {
rank = smpi_comm_rank(comm);
size = smpi_comm_size(comm);
+ int num_core = simcall_host_get_core(SIMIX_host_self());
+ // do we use the default one or the number of cores in the platform ?
+ // if the number of cores is one, the platform may be simulated with 1 node = 1 core
+ if (num_core == 1) num_core = NUM_CORE;
- if(size%NUM_CORE)
- THROWF(arg_error,0, "bcast SMP linear can't be used with non multiple of NUM_CORE=%d number of processes ! ",NUM_CORE);
+ if(size%num_core)
+ THROWF(arg_error,0, "bcast SMP linear can't be used with non multiple of num_core=%d number of processes!",num_core);
int segment = bcast_SMP_linear_segment_byte / extent;
int pipe_length = count / segment;
/* leader of each SMP do inter-communication
and act as a root for intra-communication */
- int to_inter = (rank + NUM_CORE) % size;
+ int to_inter = (rank + num_core) % size;
int to_intra = (rank + 1) % size;
- int from_inter = (rank - NUM_CORE + size) % size;
+ int from_inter = (rank - num_core + size) % size;
int from_intra = (rank + size - 1) % size;
// call native when MPI communication size is too small
- if (size <= NUM_CORE) {
+ if (size <= num_core) {
XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast.");
smpi_mpi_bcast(buf, count, datatype, root, comm);
return MPI_SUCCESS;
smpi_mpi_send(buf, count, datatype, to_intra, tag, comm);
}
// case last ROOT of each SMP
- else if (rank == (((size - 1) / NUM_CORE) * NUM_CORE)) {
+ else if (rank == (((size - 1) / num_core) * num_core)) {
request = smpi_mpi_irecv(buf, count, datatype, from_inter, tag, comm);
smpi_mpi_wait(&request, &status);
smpi_mpi_send(buf, count, datatype, to_intra, tag, comm);
}
// case intermediate ROOT of each SMP
- else if (rank % NUM_CORE == 0) {
+ else if (rank % num_core == 0) {
request = smpi_mpi_irecv(buf, count, datatype, from_inter, tag, comm);
smpi_mpi_wait(&request, &status);
smpi_mpi_send(buf, count, datatype, to_inter, tag, comm);
smpi_mpi_send(buf, count, datatype, to_intra, tag, comm);
}
// case last non-ROOT of each SMP
- else if (((rank + 1) % NUM_CORE == 0) || (rank == (size - 1))) {
+ else if (((rank + 1) % num_core == 0) || (rank == (size - 1))) {
request = smpi_mpi_irecv(buf, count, datatype, from_intra, tag, comm);
smpi_mpi_wait(&request, &status);
}
(MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
// case ROOT of each SMP
- if (rank % NUM_CORE == 0) {
+ if (rank % num_core == 0) {
// case real root
if (rank == 0) {
for (i = 0; i < pipe_length; i++) {
}
}
// case last ROOT of each SMP
- else if (rank == (((size - 1) / NUM_CORE) * NUM_CORE)) {
+ else if (rank == (((size - 1) / num_core) * num_core)) {
for (i = 0; i < pipe_length; i++) {
request_array[i] = smpi_mpi_irecv((char *) buf + (i * increment), segment, datatype,
from_inter, (tag + i), comm);
}
}
} else { // case last non-ROOT of each SMP
- if (((rank + 1) % NUM_CORE == 0) || (rank == (size - 1))) {
+ if (((rank + 1) % num_core == 0) || (rank == (size - 1))) {
for (i = 0; i < pipe_length; i++) {
request_array[i] = smpi_mpi_irecv((char *) buf + (i * increment), segment, datatype,
from_intra, (tag + i), comm);