/* PMPI User level calls */
-int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
-{
- return PMPI_Ibcast(buf, count, datatype, root, comm, MPI_REQUEST_IGNORED);
-}
-
int PMPI_Barrier(MPI_Comm comm)
{
return PMPI_Ibarrier(comm, MPI_REQUEST_IGNORED);
return retval;
}
+int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
+{
+ return PMPI_Ibcast(buf, count, datatype, root, comm, MPI_REQUEST_IGNORED);
+}
+
int PMPI_Ibcast(void *buf, int count, MPI_Datatype datatype,
int root, MPI_Comm comm, MPI_Request* request)
{
- int retval = 0;
- smpi_bench_end();
if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
+ return MPI_ERR_COMM;
+ } if (buf == nullptr) {
+ return MPI_ERR_BUFFER;
} else if (datatype == MPI_DATATYPE_NULL || not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
+ return MPI_ERR_TYPE;
} else if (count < 0){
- retval = MPI_ERR_COUNT;
+ return MPI_ERR_COUNT;
} else if (root < 0 || root >= comm->size()){
- retval = MPI_ERR_ROOT;
+ return MPI_ERR_ROOT;
} else if (request == nullptr){
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else {
+ smpi_bench_end();
int rank = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank, request==MPI_REQUEST_IGNORED?"PMPI_Bcast":"PMPI_Ibcast",
new simgrid::instr::CollTIData(request==MPI_REQUEST_IGNORED?"bcast":"ibcast", root, -1.0,
if(request!=MPI_REQUEST_IGNORED)
*request = MPI_REQUEST_NULL;
}
- retval = MPI_SUCCESS;
-
TRACE_smpi_comm_out(rank);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
}
- smpi_bench_begin();
- return retval;
}
int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,
int PMPI_Igather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbuf, int recvcount, MPI_Datatype recvtype,
int root, MPI_Comm comm, MPI_Request *request)
{
- int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
+ return MPI_ERR_COMM;
+ } else if ((sendbuf == nullptr) || ((comm->rank() == root) && recvbuf == nullptr)) {
+ return MPI_ERR_BUFFER;
} else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){
- retval = MPI_ERR_TYPE;
+ return MPI_ERR_TYPE;
} else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) || ((comm->rank() == root) && (recvcount <0))){
- retval = MPI_ERR_COUNT;
+ return MPI_ERR_COUNT;
} else if (root < 0 || root >= comm->size()){
- retval = MPI_ERR_ROOT;
+ return MPI_ERR_ROOT;
} else if (request == nullptr){
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else {
-
+ smpi_bench_end();
char* sendtmpbuf = static_cast<char*>(sendbuf);
int sendtmpcount = sendcount;
MPI_Datatype sendtmptype = sendtype;
else
simgrid::smpi::Colls::igather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm, request);
- retval = MPI_SUCCESS;
TRACE_smpi_comm_out(rank);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
}
-
- smpi_bench_begin();
- return retval;
}
int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
int PMPI_Igatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request)
{
- int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
+ return MPI_ERR_COMM;
+ } else if ((sendbuf == nullptr) || ((comm->rank() == root) && recvbuf == nullptr)) {
+ return MPI_ERR_BUFFER;
} else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
((comm->rank() == root) && (recvtype == MPI_DATATYPE_NULL))){
- retval = MPI_ERR_TYPE;
+ return MPI_ERR_TYPE;
} else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){
- retval = MPI_ERR_COUNT;
+ return MPI_ERR_COUNT;
} else if ((comm->rank() == root) && (recvcounts == nullptr || displs == nullptr)) {
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else if (root < 0 || root >= comm->size()){
- retval = MPI_ERR_ROOT;
+ return MPI_ERR_ROOT;
} else if (request == nullptr){
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else {
+ for (int i = 0; i < comm->size(); i++){
+ if((comm->rank() == root) && (recvcounts[i]<0))
+ return MPI_ERR_COUNT;
+ }
+
+ smpi_bench_end();
char* sendtmpbuf = static_cast<char*>(sendbuf);
int sendtmpcount = sendcount;
MPI_Datatype sendtmptype = sendtype;
dt_size_recv, trace_recvcounts, simgrid::smpi::Datatype::encode(sendtmptype),
simgrid::smpi::Datatype::encode(recvtype)));
if(request == MPI_REQUEST_IGNORED)
- retval = simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm);
+ simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm);
else
- retval = simgrid::smpi::Colls::igatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm, request);
+ simgrid::smpi::Colls::igatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm, request);
TRACE_smpi_comm_out(rank);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
}
-
- smpi_bench_begin();
- return retval;
}
int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
+ } else if ((sendbuf == nullptr) || (recvbuf == nullptr)){
+ retval = MPI_ERR_BUFFER;
} else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
(recvtype == MPI_DATATYPE_NULL)){
retval = MPI_ERR_TYPE;
int PMPI_Iallgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int *recvcounts, int *displs, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request* request)
{
- int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
+ return MPI_ERR_COMM;
+ } else if ((sendbuf == nullptr) || (recvbuf == nullptr)){
+ return MPI_ERR_BUFFER;
} else if (((sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) || (recvtype == MPI_DATATYPE_NULL)) {
- retval = MPI_ERR_TYPE;
+ return MPI_ERR_TYPE;
} else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){
- retval = MPI_ERR_COUNT;
+ return MPI_ERR_COUNT;
} else if (recvcounts == nullptr || displs == nullptr) {
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else if (request == nullptr){
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else {
+ for (int i = 0; i < comm->size(); i++){ // copy data to avoid bad free
+ if (recvcounts[i] < 0)
+ return MPI_ERR_COUNT;
+ }
+ smpi_bench_end();
if(sendbuf == MPI_IN_PLACE) {
sendbuf=static_cast<char*>(recvbuf)+recvtype->get_extent()*displs[comm->rank()];
sendcount=recvcounts[comm->rank()];
int dt_size_recv = recvtype->is_replayable() ? 1 : recvtype->size();
std::vector<int>* trace_recvcounts = new std::vector<int>;
- for (int i = 0; i < comm->size(); i++) // copy data to avoid bad free
+ for (int i = 0; i < comm->size(); i++){ // copy data to avoid bad free
trace_recvcounts->push_back(recvcounts[i] * dt_size_recv);
+ }
TRACE_smpi_comm_in(rank, request==MPI_REQUEST_IGNORED?"PMPI_Allgatherv":"PMPI_Iallgatherv",
new simgrid::instr::VarCollTIData(
simgrid::smpi::Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
else
simgrid::smpi::Colls::iallgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm, request);
- retval = MPI_SUCCESS;
TRACE_smpi_comm_out(rank);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
}
-
- smpi_bench_begin();
- return retval;
}
int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
int PMPI_Iscatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request* request)
{
- int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
+ return MPI_ERR_COMM;
} else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL || not sendtype->is_valid())) ||
((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL || not recvtype->is_valid()))) {
- retval = MPI_ERR_TYPE;
+ return MPI_ERR_TYPE;
} else if (((comm->rank() == root) && (sendcount < 0)) ||
((recvbuf != MPI_IN_PLACE) && (recvcount < 0))) {
- retval = MPI_ERR_COUNT;
+ return MPI_ERR_COUNT;
} else if ((sendbuf == recvbuf) ||
- ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr))){
- retval = MPI_ERR_BUFFER;
+ ((comm->rank()==root) && sendcount>0 && (sendbuf == nullptr)) || (recvbuf == nullptr)){
+ return MPI_ERR_BUFFER;
} else if (root < 0 || root >= comm->size()){
- retval = MPI_ERR_ROOT;
+ return MPI_ERR_ROOT;
} else if (request == nullptr){
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else {
-
+ smpi_bench_end();
if (recvbuf == MPI_IN_PLACE) {
recvtype = sendtype;
recvcount = sendcount;
simgrid::smpi::Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm);
else
simgrid::smpi::Colls::iscatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm, request);
- retval = MPI_SUCCESS;
TRACE_smpi_comm_out(rank);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
}
-
- smpi_bench_begin();
- return retval;
}
int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
int PMPI_Iscatterv(void *sendbuf, int *sendcounts, int *displs,
MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request)
{
- int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
+ return MPI_ERR_COMM;
} else if (sendcounts == nullptr || displs == nullptr) {
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else if (((comm->rank() == root) && (sendtype == MPI_DATATYPE_NULL)) ||
((recvbuf != MPI_IN_PLACE) && (recvtype == MPI_DATATYPE_NULL))) {
- retval = MPI_ERR_TYPE;
+ return MPI_ERR_TYPE;
} else if (request == nullptr){
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else if (recvbuf != MPI_IN_PLACE && recvcount < 0){
- retval = MPI_ERR_COUNT;
+ return MPI_ERR_COUNT;
} else if (root < 0 || root >= comm->size()){
- retval = MPI_ERR_ROOT;
+ return MPI_ERR_ROOT;
} else {
- if (recvbuf == MPI_IN_PLACE) {
+ if (comm->rank() == root){
+ if(recvbuf == MPI_IN_PLACE) {
recvtype = sendtype;
- if(sendcounts[comm->rank()]<0)
- return MPI_ERR_COUNT;
recvcount = sendcounts[comm->rank()];
+ }
+ for (int i = 0; i < comm->size(); i++){
+ if(sendcounts[i]<0)
+ return MPI_ERR_COUNT;
+ }
}
+ smpi_bench_end();
+
int rank = simgrid::s4u::this_actor::get_pid();
int dt_size_send = sendtype->is_replayable() ? 1 : sendtype->size();
if (comm->rank() == root) {
for (int i = 0; i < comm->size(); i++){ // copy data to avoid bad free
trace_sendcounts->push_back(sendcounts[i] * dt_size_send);
- if(sendcounts[i]<0)
- return MPI_ERR_COUNT;
- }
+ }
}
TRACE_smpi_comm_in(rank, request==MPI_REQUEST_IGNORED?"PMPI_Scatterv":"PMPI_Iscatterv",
recvtype->is_replayable() ? recvcount : recvcount * recvtype->size(), nullptr,
simgrid::smpi::Datatype::encode(sendtype), simgrid::smpi::Datatype::encode(recvtype)));
if(request == MPI_REQUEST_IGNORED)
- retval = simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm);
+ simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm);
else
- retval = simgrid::smpi::Colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, request);
-
+ simgrid::smpi::Colls::iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, request);
TRACE_smpi_comm_out(rank);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
}
- smpi_bench_begin();
- return retval;
}
int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
int PMPI_Ireduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm, MPI_Request* request)
{
- int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
+ return MPI_ERR_COMM;
+ } if ((sendbuf == nullptr) || ((comm->rank() == root) && recvbuf == nullptr)) {
+ return MPI_ERR_BUFFER;
} else if (datatype == MPI_DATATYPE_NULL || not datatype->is_valid()){
- retval = MPI_ERR_TYPE;
+ return MPI_ERR_TYPE;
} else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
+ return MPI_ERR_OP;
} else if (request == nullptr){
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else if (root < 0 || root >= comm->size()){
- retval = MPI_ERR_ROOT;
+ return MPI_ERR_ROOT;
} else if (count < 0){
- retval = MPI_ERR_COUNT;
+ return MPI_ERR_COUNT;
} else {
+ smpi_bench_end();
int rank = simgrid::s4u::this_actor::get_pid();
TRACE_smpi_comm_in(rank, request==MPI_REQUEST_IGNORED ? "PMPI_Reduce":"PMPI_Ireduce",
else
simgrid::smpi::Colls::ireduce(sendbuf, recvbuf, count, datatype, op, root, comm, request);
-
- retval = MPI_SUCCESS;
TRACE_smpi_comm_out(rank);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
}
-
- smpi_bench_begin();
- return retval;
}
int PMPI_Reduce_local(void *inbuf, void *inoutbuf, int count, MPI_Datatype datatype, MPI_Op op){
int PMPI_Iallreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
{
- int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
- retval = MPI_ERR_COMM;
+ return MPI_ERR_COMM;
+ } if ((sendbuf == nullptr) || (recvbuf == nullptr)) {
+ return MPI_ERR_BUFFER;
} else if (datatype == MPI_DATATYPE_NULL || not datatype->is_valid()) {
- retval = MPI_ERR_TYPE;
+ return MPI_ERR_TYPE;
} else if (count < 0){
- retval = MPI_ERR_COUNT;
+ return MPI_ERR_COUNT;
} else if (op == MPI_OP_NULL) {
- retval = MPI_ERR_OP;
+ return MPI_ERR_OP;
} else if (request == nullptr){
- retval = MPI_ERR_ARG;
+ return MPI_ERR_ARG;
} else {
+ smpi_bench_end();
char* sendtmpbuf = static_cast<char*>(sendbuf);
if( sendbuf == MPI_IN_PLACE ) {
sendtmpbuf = static_cast<char*>(xbt_malloc(count*datatype->get_extent()));
if( sendbuf == MPI_IN_PLACE )
xbt_free(sendtmpbuf);
- retval = MPI_SUCCESS;
TRACE_smpi_comm_out(rank);
+ smpi_bench_begin();
+ return MPI_SUCCESS;
}
-
- smpi_bench_begin();
- return retval;
}
int PMPI_Scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
int PMPI_Ireduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm, MPI_Request *request)
{
int retval = 0;
- smpi_bench_end();
if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
+ } else if ((sendbuf == nullptr) || (recvbuf == nullptr)) {
+ retval = MPI_ERR_BUFFER;
} else if (datatype == MPI_DATATYPE_NULL || not datatype->is_valid()){
retval = MPI_ERR_TYPE;
} else if (op == MPI_OP_NULL) {
retval = MPI_ERR_OP;
} else if (recvcounts == nullptr) {
retval = MPI_ERR_ARG;
- } else if (request == nullptr){
+ } else if (request == nullptr){
retval = MPI_ERR_ARG;
} else {
+ for (int i = 0; i < comm->size(); i++) { // copy data to avoid bad free
+ if(recvcounts[i]<0)
+ return MPI_ERR_COUNT;
+ }
+ smpi_bench_end();
+
int rank = simgrid::s4u::this_actor::get_pid();
std::vector<int>* trace_recvcounts = new std::vector<int>;
int dt_send_size = datatype->is_replayable() ? 1 : datatype->size();
int totalcount = 0;
for (int i = 0; i < comm->size(); i++) { // copy data to avoid bad free
- if(recvcounts[i]<0)
- return MPI_ERR_COUNT;
trace_recvcounts->push_back(recvcounts[i] * dt_send_size);
totalcount += recvcounts[i];
}
if (sendbuf == MPI_IN_PLACE)
xbt_free(sendtmpbuf);
+ smpi_bench_begin();
}
- smpi_bench_begin();
return retval;
}
if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
+ } else if (sendbuf == nullptr || recvbuf == nullptr) {
+ retval = MPI_ERR_BUFFER;
} else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
} else if ((sendbuf != MPI_IN_PLACE && sendcount < 0) || recvcount < 0){
int* recvcounts, int* recvdisps, MPI_Datatype recvtype, MPI_Comm comm, MPI_Request *request)
{
int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
+ } else if (sendbuf == nullptr || recvbuf == nullptr) {
+ retval = MPI_ERR_BUFFER;
} else if ((sendbuf != MPI_IN_PLACE && sendtype == MPI_DATATYPE_NULL) || recvtype == MPI_DATATYPE_NULL) {
retval = MPI_ERR_TYPE;
} else if ((sendbuf != MPI_IN_PLACE && (sendcounts == nullptr || senddisps == nullptr)) || recvcounts == nullptr ||
} else {
int rank = simgrid::s4u::this_actor::get_pid();
int size = comm->size();
+ for (int i = 0; i < size; i++) {
+ if (recvcounts[i] <0 || (sendbuf != MPI_IN_PLACE && sendcounts[i]<0))
+ return MPI_ERR_COUNT;
+ }
+ smpi_bench_end();
int send_size = 0;
int recv_size = 0;
std::vector<int>* trace_sendcounts = new std::vector<int>;
MPI_Datatype sendtmptype = sendtype;
int maxsize = 0;
for (int i = 0; i < size; i++) { // copy data to avoid bad free
- if (recvcounts[i] <0 || (sendbuf != MPI_IN_PLACE && sendcounts[i]<0))
- return MPI_ERR_COUNT;
recv_size += recvcounts[i] * dt_size_recv;
trace_recvcounts->push_back(recvcounts[i] * dt_size_recv);
if (((recvdisps[i] + recvcounts[i]) * dt_size_recv) > maxsize)
xbt_free(sendtmpcounts);
xbt_free(sendtmpdisps);
}
+ smpi_bench_begin();
}
-
- smpi_bench_begin();
return retval;
}
int* recvcounts, int* recvdisps, MPI_Datatype* recvtypes, MPI_Comm comm, MPI_Request *request)
{
int retval = 0;
-
- smpi_bench_end();
-
if (comm == MPI_COMM_NULL) {
retval = MPI_ERR_COMM;
+ } else if (sendbuf == nullptr || recvbuf == nullptr) {
+ retval = MPI_ERR_BUFFER;
} else if ((sendbuf != MPI_IN_PLACE && sendtypes == nullptr) || recvtypes == nullptr) {
retval = MPI_ERR_TYPE;
} else if ((sendbuf != MPI_IN_PLACE && (sendcounts == nullptr || senddisps == nullptr)) || recvcounts == nullptr ||
} else if (request == nullptr){
retval = MPI_ERR_ARG;
} else {
+ smpi_bench_end();
int rank = simgrid::s4u::this_actor::get_pid();
int size = comm->size();
+ for (int i = 0; i < size; i++) { // copy data to avoid bad free
+ if (recvcounts[i] <0 || (sendbuf != MPI_IN_PLACE && sendcounts[i]<0))
+ return MPI_ERR_COUNT;
+ }
int send_size = 0;
int recv_size = 0;
std::vector<int>* trace_sendcounts = new std::vector<int>;
xbt_free(sendtmpdisps);
xbt_free(sendtmptypes);
}
+ smpi_bench_begin();
}
-
- smpi_bench_begin();
return retval;
}
for (int i = 0; i < count * size; ++i)
rb[i] = 0;
+ status = MPI_Allgather(NULL, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Allgather did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ status = MPI_Allgather(sb, -1, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Allgather did not return MPI_ERR_COUNT for -1 sendcount\n");
+ status = MPI_Allgather(sb, count, MPI_DATATYPE_NULL, rb, count, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Allgather did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n");
+ status = MPI_Allgather(sb, count, MPI_INT, NULL, count, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Allgather did not return MPI_ERR_BUFFER for empty recvbuf\n");
+ status = MPI_Allgather(sb, count, MPI_INT, rb, -1, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Allgather did not return MPI_ERR_COUNT for -1 recvcount\n");
+ status = MPI_Allgather(sb, count, MPI_INT, rb, count, MPI_DATATYPE_NULL, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Allgather did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL recvtype\n");
+ status = MPI_Allgather(sb, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Allgather did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
printf("[%d] sndbuf=[", rank);
for (int i = 0; i < count; i++)
printf("%d ", sb[i]);
! output sort
p Test allgather
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
int* sb = (int *) xbt_malloc(recv_counts[rank] * sizeof(int));
int* rb = (int *) xbt_malloc(recv_sb_size * sizeof(int));
+ status = MPI_Allgatherv(NULL, recv_counts[rank], MPI_INT, rb, recv_counts, recv_disps, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Allgatherv did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ status = MPI_Allgatherv(sb, -1, MPI_INT, rb, recv_counts, recv_disps, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Allgatherv did not return MPI_ERR_COUNT for -1 sendcount\n");
+ status = MPI_Allgatherv(sb, recv_counts[rank], MPI_DATATYPE_NULL, rb, recv_counts, recv_disps, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Allgatherv did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n");
+ status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, NULL, recv_counts, recv_disps, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Allgatherv did not return MPI_ERR_BUFFER for empty recvbuf\n");
+ status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, rb, NULL, recv_disps, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ARG)
+ printf("MPI_Allgatherv did not return MPI_ERR_ARG for NULL recvcounts\n");
+ status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, rb, recv_counts, NULL, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ARG)
+ printf("MPI_Allgatherv did not return MPI_ERR_ARG for NULL recvdisps\n");
+ status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, rb, recv_counts, recv_disps, MPI_DATATYPE_NULL, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Allgatherv did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL recvtype\n");
+ status = MPI_Allgatherv(sb, recv_counts[rank], MPI_INT, rb, recv_counts, recv_disps, MPI_INT, MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Allgatherv did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
printf("[%d] sndbuf=[", rank);
for (i = 0; i < recv_counts[rank]; i++){
sb[i] = recv_disps[rank] + i;
! output sort
p Test allgatherv
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgatherv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allgatherv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
p Test allreduce
! output sort
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --cfg=smpi/allreduce:automatic --cfg=smpi/async-small-thresh:65536 --cfg=smpi/send-is-detached-thresh:128000 --cfg=smpi/simulate-computation:no "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n"
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --cfg=smpi/allreduce:automatic --cfg=smpi/async-small-thresh:65536 --cfg=smpi/send-is-detached-thresh:128000 --cfg=smpi/simulate-computation:no "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! timeout 20
p Test allreduce
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce 300000 --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile -platform ../../../examples/platforms/small_platform.xml -np 4 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce 300000 --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Jupiter
> [rank 2] -> Fafard
rb[i] = 0;
}
+ status = MPI_Allreduce(NULL, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Allreduce did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ status = MPI_Allreduce(sb, NULL, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Allreduce did not return MPI_ERR_BUFFER for empty recvbuf\n");
+ status = MPI_Allreduce(sb, rb, -1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Allreduce did not return MPI_ERR_COUNT for -1 count\n");
+ status = MPI_Allreduce(sb, rb, size, MPI_DATATYPE_NULL, MPI_SUM, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Allreduce did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL type\n");
+ status = MPI_Allreduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_OP_NULL, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_OP)
+ printf("MPI_Allreduce did not return MPI_ERR_COMM for MPI_OP_NULL op\n");
+ status = MPI_Allreduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Allreduce did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
printf("[%d] sndbuf=[", rank);
for (i = 0; i < size *mult; i++)
printf("%d ", sb[i]);
! output sort
p Test allreduce
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-allreduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
! output sort
p Test classic - backbone
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir}/../hostfile_cluster -platform ${platfdir:=.}/cluster_backbone.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir}/../hostfile_cluster -platform ${platfdir:=.}/cluster_backbone.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> node-0.simgrid.org
> [rank 1] -> node-1.simgrid.org
> [rank 2] -> node-2.simgrid.org
! output sort
p Test separate clusters
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -platform ../../../examples/platforms/cluster_multi.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -platform ../../../examples/platforms/cluster_multi.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> node-0.1core.org
> [rank 1] -> node-1.1core.org
> [rank 2] -> node-2.1core.org
! output sort
p Test torus
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_torus.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_torus.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> node-0.simgrid.org
> [rank 1] -> node-1.simgrid.org
> [rank 2] -> node-2.simgrid.org
! output sort
p Test fat tree
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> node-0.simgrid.org
> [rank 1] -> node-1.simgrid.org
> [rank 2] -> node-2.simgrid.org
! output sort
p Test fat tree IB
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_fat_tree.xml -np 12 --cfg=network/model:IB --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> node-0.simgrid.org
> [rank 1] -> node-1.simgrid.org
> [rank 2] -> node-2.simgrid.org
! output sort
p Test Dragonfly
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_dragonfly.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_cluster -platform ../../../examples/platforms/cluster_dragonfly.xml -np 12 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> node-0.simgrid.org
> [rank 1] -> node-1.simgrid.org
> [rank 2] -> node-2.simgrid.org
for (i = 0; i < size; i++)
printf("%d ", sb[i]);
printf("]\n");
+ int count=1;
- status = MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD);
+ status = MPI_Alltoall(NULL, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Alltoall did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ status = MPI_Alltoall(sb, -1, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Alltoall did not return MPI_ERR_COUNT for -1 sendcount\n");
+ status = MPI_Alltoall(sb, count, MPI_DATATYPE_NULL, rb, count, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Alltoall did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n");
+ status = MPI_Alltoall(sb, count, MPI_INT, NULL, count, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Alltoall did not return MPI_ERR_BUFFER for empty recvbuf\n");
+ status = MPI_Alltoall(sb, count, MPI_INT, rb, -1, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Alltoall did not return MPI_ERR_COUNT for -1 recvcount\n");
+ status = MPI_Alltoall(sb, count, MPI_INT, rb, count, MPI_DATATYPE_NULL, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Alltoall did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL recvtype\n");
+ status = MPI_Alltoall(sb, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Alltoall did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
+ status = MPI_Alltoall(sb, count, MPI_INT, rb, count, MPI_INT, MPI_COMM_WORLD);
printf("[%d] rcvbuf=[", rank);
for (i = 0; i < size; i++)
! output sort
p Test all to all
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir:=.}/../hostfile_coll -platform ${platfdir}/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ${bindir:=.}/../hostfile_coll -platform ${platfdir}/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoall -q --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
<2> sdisp: (#3): [0][1][3]
<2> rdisp: (#3): [0][2][4]
- after MPI_Alltoallv :
+ after MPI_Alltoallvv :
<0> rbuf: (#9): [-1][-1][-1][-1][-1][-1][-1][-1][-1]
<1> rbuf: (#9): [1][101][201][-1][-1][-1][-1][-1][-1]
<2> rbuf: (#9): [3][4][103][104][203][204][-1][-1][-1]
rdispls[i] = i * rank;
sdispls[i] = (i * (i + 1)) / 2;
}
+ int status;
+
+ status = MPI_Alltoallv(NULL, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Alltoallv did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ status = MPI_Alltoallv(sbuf, NULL, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ARG)
+ printf("MPI_Alltoallv did not return MPI_ERR_ARG for NULL sendcounts\n");
+ status = MPI_Alltoallv(sbuf, sendcounts, NULL, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ARG)
+ printf("MPI_Alltoallv did not return MPI_ERR_ARG for NULL senddispl\n");
+ status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_DATATYPE_NULL, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Alltoallv did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n");
+ status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, NULL, recvcounts, rdispls, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Alltoallv did not return MPI_ERR_BUFFER for empty recvbuf\n");
+ status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, NULL, rdispls, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ARG)
+ printf("MPI_Alltoallv did not return MPI_ERR_ARG for NULL recvcounts\n");
+ status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, NULL, MPI_INT, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ARG)
+ printf("MPI_Alltoallv did not return MPI_ERR_ARG for NULL recvdispl\n");
+ status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_DATATYPE_NULL, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Alltoallv did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL recvtype\n");
+ status = MPI_Alltoallv(sbuf, sendcounts, sdispls, MPI_INT, rbuf, recvcounts, rdispls, MPI_INT, MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Alltoallv did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
print_buffer_int(sbuf, size2, "sbuf:", rank);
print_buffer_int(sendcounts, size, "scount:", rank);
! output sort
p Test all to all
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoallv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-alltoallv --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+ int status = MPI_Barrier(MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Barrier did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
MPI_Barrier(MPI_COMM_WORLD);
if (0 == rank) {
! output sort
p Test barrier
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-barrier --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-barrier --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> ... Barrier ....
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
int size;
int rank;
int count = 2048;
+ int status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
for (i = 0; i < count; i++)
values[i] = (0 == rank) ? 17 : 3;
+ status = MPI_Bcast(NULL, count, MPI_INT, 0, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Bcast did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ status = MPI_Bcast(values, -1, MPI_INT, 0, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Bcast did not return MPI_ERR_COUNT for -1 sendcount\n");
+ status = MPI_Bcast(values, count, MPI_DATATYPE_NULL, 0, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Bcast did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n");
+ status = MPI_Bcast(values, count, MPI_INT, -1, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ROOT)
+ printf("MPI_Bcast did not return MPI_ERR_ROOT for -1 root\n");
+ status = MPI_Bcast(values, count, MPI_INT, size, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ROOT)
+ printf("MPI_Bcast did not return MPI_ERR_ROOT for root > size\n");
+ status = MPI_Bcast(values, count, MPI_INT, 0, MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Bcast did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
MPI_Bcast(values, count, MPI_INT, 0, MPI_COMM_WORLD);
int good = 0;
for (i = 0; i < count; i++)
values[i] = (size -1 == rank) ? 17 : 3;
- int status = MPI_Bcast(values, count, MPI_INT, size-1, MPI_COMM_WORLD);
+ status = MPI_Bcast(values, count, MPI_INT, size-1, MPI_COMM_WORLD);
good = 0;
for (i = 0; i < count; i++)
p Test Broadcast with more processes than hosts
! output sort
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-bcast --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-bcast --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
printf("%d ", sb[i]);
printf("]\n");
+ status = MPI_Gather(NULL, count, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Gather did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ status = MPI_Gather(sb, -1, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Gather did not return MPI_ERR_COUNT for -1 sendcount\n");
+ status = MPI_Gather(sb, count, MPI_DATATYPE_NULL, rb, count, MPI_INT, root, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Gather did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n");
+ status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, -1, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ROOT)
+ printf("MPI_Gather did not return MPI_ERR_ROOT for root -1\n");
+ status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, size+1, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ROOT)
+ printf("MPI_Gather did not return MPI_ERR_ROOT for root > size\n");
+ status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Gather did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
status = MPI_Gather(sb, count, MPI_INT, rb, count, MPI_INT, root, MPI_COMM_WORLD);
if (rank == root) {
printf("]\n");
if (status != MPI_SUCCESS) {
- printf("allgather returned %d\n", status);
+ printf("gather returned %d\n", status);
fflush(stdout);
}
}
! timeout 30
p Test all to all
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-gather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-gather --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [rank 0] -> Tremblay
> [rank 1] -> Tremblay
> [rank 2] -> Tremblay
/* Copyright (c) 2013-2019. The SimGrid Team.
- * All rights reserved. */
+ * All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
int main( int argc, char **argv )
{
- int err = 0;
- int toterr;
- int size;
- int rank;
- int i;
- MPI_Comm comm;
-
- MPI_Init( &argc, &argv );
- comm = MPI_COMM_WORLD;
-
- MPI_Comm_size( comm, &size );
- MPI_Comm_rank( comm, &rank );
- int* sendbuf = (int *) malloc( size * sizeof(int) );
- for (i=0; i<size; i++)
- sendbuf[i] = rank + i;
- int* recvcounts = (int*) malloc (size * sizeof(int));
- int* recvbuf = (int*) malloc (size * sizeof(int));
- for (i=0; i<size; i++)
- recvcounts[i] = 1;
- MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );
- int sumval = size * rank + ((size - 1) * size)/2;
- /* recvbuf should be size * (rank + i) */
- if (recvbuf[0] != sumval) {
- err++;
- fprintf( stdout, "Did not get expected value for reduce scatter\n" );
- fprintf( stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval );
- }
-
- MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
- if (rank == 0 && toterr == 0) {
- printf( " No Errors\n" );
- }
- free(sendbuf);
- free(recvcounts);
- free(recvbuf);
-
- MPI_Finalize();
-
- return toterr;
+ int err = 0;
+ int toterr;
+ int size;
+ int rank;
+ int i;
+ MPI_Comm comm;
+
+ MPI_Init( &argc, &argv );
+ comm = MPI_COMM_WORLD;
+
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ int* sendbuf = (int *) malloc( size * sizeof(int) );
+ for (i=0; i<size; i++)
+ sendbuf[i] = rank + i;
+ int* recvcounts = (int*) malloc (size * sizeof(int));
+ int* recvbuf = (int*) malloc (size * sizeof(int));
+ for (i=0; i<size; i++)
+ recvcounts[i] = 1;
+ int retval;
+
+ retval = MPI_Reduce_scatter(NULL, recvbuf, recvcounts, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_BUFFER)
+ printf("MPI_Reduce_scatter did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ retval = MPI_Reduce_scatter(sendbuf, NULL, recvcounts, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_BUFFER)
+ printf("MPI_Reduce_scatter did not return MPI_ERR_BUFFER for empty recvbuf\n");
+ retval = MPI_Reduce_scatter(sendbuf, recvbuf, NULL, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_ARG)
+ printf("MPI_Reduce_scatter did not return MPI_ERR_ARG for NULL recvcounts\n");
+ retval = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_DATATYPE_NULL, MPI_SUM, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_TYPE)
+ printf("MPI_Reduce_scatter did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL type\n");
+ retval = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_DOUBLE, MPI_OP_NULL, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_OP)
+ printf("MPI_Reduce_scatter did not return MPI_ERR_OP for MPI_OP_NULL op\n");
+ retval = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, MPI_DOUBLE, MPI_SUM, MPI_COMM_NULL);
+ if(retval!=MPI_ERR_COMM)
+ printf("MPI_Reduce_scatter did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
+ MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm );
+ int sumval = size * rank + ((size - 1) * size)/2;
+ /* recvbuf should be size * (rank + i) */
+ if (recvbuf[0] != sumval) {
+ err++;
+ fprintf( stdout, "Did not get expected value for reduce scatter\n" );
+ fprintf( stdout, "[%d] Got %d expected %d\n", rank, recvbuf[0], sumval );
+ }
+
+ MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
+ if (rank == 0 && toterr == 0) {
+ printf( " No Errors\n" );
+ }
+ free(sendbuf);
+ free(recvcounts);
+ free(recvbuf);
+
+ MPI_Finalize();
+
+ return toterr;
}
! output sort
p Test reduce_scatter
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> No Errors
> [rank 0] -> Tremblay
> [rank 10] -> Fafard
for (i = 0; i < size; i++)
printf("%llu ", sb[i]);
printf("]\n");
-
int root=0;
+
+ status = MPI_Reduce(NULL, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, root, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_BUFFER)
+ printf("MPI_Reduce did not return MPI_ERR_BUFFER for empty sendbuf\n");
+ status = MPI_Reduce(sb, rb, -1, MPI_UNSIGNED_LONG_LONG, MPI_SUM, root, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_COUNT)
+ printf("MPI_Reduce did not return MPI_ERR_COUNT for -1 count\n");
+ status = MPI_Reduce(sb, rb, size, MPI_DATATYPE_NULL, MPI_SUM, root, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_TYPE)
+ printf("MPI_Reduce did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL type\n");
+ status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_OP_NULL, root, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_OP)
+ printf("MPI_Reduce did not return MPI_ERR_COMM for MPI_OP_NULL op\n");
+ status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, -1, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ROOT)
+ printf("MPI_Reduce did not return MPI_ERR_ROOT for root -1\n");
+ status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, size+1, MPI_COMM_WORLD);
+ if(status!=MPI_ERR_ROOT)
+ printf("MPI_Reduce did not return MPI_ERR_ROOT for root > size\n");
+ status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, root, MPI_COMM_NULL);
+ if(status!=MPI_ERR_COMM)
+ printf("MPI_Reduce did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
status = MPI_Reduce(sb, rb, size, MPI_UNSIGNED_LONG_LONG, MPI_SUM, root, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
! output sort
p Test allreduce
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-reduce --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [0] rcvbuf=[1920 1936 1952 1968 1984 2000 2016 2032 2048 2064 2080 2096 2112 2128 2144 2160 ]
> [0] second sndbuf=[0 ]
> [0] sndbuf=[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 ]
int main(int argc, char **argv)
{
- int size;
- int rank;
- int success = 1;
- int retval;
- int sendcount = 1; // one double to each process
- int recvcount = 1;
- double *sndbuf = NULL;
- double rcvd;
- int root = 0; // arbitrary choice
-
- MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
-
- // on root, initialize sendbuf
- if (root == rank) {
- sndbuf = malloc(size * sizeof(double));
- for (int i = 0; i < size; i++) {
- sndbuf[i] = (double) i;
- }
- }
-
- retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, root, MPI_COMM_WORLD);
- if (root == rank) {
- free(sndbuf);
- }
- if (retval != MPI_SUCCESS) {
- fprintf(stderr, "(%s:%d) MPI_Scatter() returned retval=%d\n", __FILE__, __LINE__, retval);
- return 0;
- }
- // verification
- if ((double) rank != rcvd) {
- fprintf(stderr, "[%d] has %f instead of %d\n", rank, rcvd, rank);
- success = 0;
- }
+ int size;
+ int rank;
+ int success = 1;
+ int retval;
+ int sendcount = 1; // one double to each process
+ int recvcount = 1;
+ double *sndbuf = NULL;
+ double rcvd;
+ int root = 0; // arbitrary choice
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+
+ // on root, initialize sendbuf
+ if (root == rank) {
+ sndbuf = malloc(size * sizeof(double));
+ for (int i = 0; i < size; i++) {
+ sndbuf[i] = (double) i;
+ }
+ }
+
+ retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, NULL, recvcount, MPI_DOUBLE, root, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_BUFFER)
+ printf("MPI_Scatter did not return MPI_ERR_BUFFER for empty recvbuf\n");
+ retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, -1, MPI_DOUBLE, root, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_COUNT)
+ printf("MPI_Scatter did not return MPI_ERR_COUNT for -1 recvcount\n");
+ retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DATATYPE_NULL, root, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_TYPE)
+ printf("MPI_Scatter did not return MPI_ERR_TYPE for MPI_DATATYPE_NULL sendtype\n");
+ retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, -1, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_ROOT)
+ printf("MPI_Scatter did not return MPI_ERR_ROOT for root -1\n");
+ retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, size+1, MPI_COMM_WORLD);
+ if(retval!=MPI_ERR_ROOT)
+ printf("MPI_Scatter did not return MPI_ERR_ROOT for root > size\n");
+ retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, root, MPI_COMM_NULL);
+ if(retval!=MPI_ERR_COMM)
+ printf("MPI_Scatter did not return MPI_ERR_COMM for MPI_COMM_NULL comm\n");
+
+ retval = MPI_Scatter(sndbuf, sendcount, MPI_DOUBLE, &rcvd, recvcount, MPI_DOUBLE, root, MPI_COMM_WORLD);
+ if (root == rank) {
+ free(sndbuf);
+ }
+ if (retval != MPI_SUCCESS) {
+ fprintf(stderr, "(%s:%d) MPI_Scatter() returned retval=%d\n", __FILE__, __LINE__, retval);
+ return 0;
+ }
+ // verification
+ if ((double) rank != rcvd) {
+ fprintf(stderr, "[%d] has %f instead of %d\n", rank, rcvd, rank);
+ success = 0;
+ }
/* test 1 */
if (0 == rank)
printf("** Small Test Result: ...\n");
! output sort
p Test scatter
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_coll -platform ../../../examples/platforms/small_platform.xml -np 16 --log=xbt_cfg.thres:critical ${bindir:=.}/coll-scatter --log=smpi_kernel.thres:warning --log=smpi_coll.thres:error --log=smpi_mpi.thres:error
> [0] ok.
> [10] ok.
> [11] ok.