rextent = rtype->get_extent();
sextent = stype->get_extent();
- // irregular case use default MPI functions
if (scount * sextent != rcount * rextent) {
- XBT_WARN("MPI_allgather_NTSLR_NB use default MPI_allgather.");
+ XBT_INFO("MPI_allgather_NTSLR_NB: irregular case, use default MPI_allgather.");
allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
return MPI_SUCCESS;
}
rextent = rtype->get_extent();
sextent = stype->get_extent();
- // irregular case use default MPI functions
if (scount * sextent != rcount * rextent) {
- XBT_WARN("MPI_allgather_NTSLR use default MPI_allgather.");
+ XBT_INFO("MPI_allgather_NTSLR: irregular case, use default MPI_allgather.");
allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
return MPI_SUCCESS;
}
/* for too small number of processes, use default implementation */
if (comm_size <= num_core) {
- XBT_WARN("MPI_allgather_SMP_NTS use default MPI_allgather.");
+ XBT_INFO("MPI_allgather_SMP_NTS: comm_size <= num_core, use default MPI_allgather.");
allgather__default(sbuf, scount, stype, rbuf, rcount, rtype, comm);
return MPI_SUCCESS;
}
rank = comm->rank();
if (size % 2) {
- XBT_DEBUG(
- "coll:tuned:allgather_intra_neighborexchange WARNING: odd size %d, switching to ring algorithm",
+ XBT_INFO(
+ "coll:tuned:allgather_intra_neighborexchange: odd size %d, switching to ring algorithm",
size);
return allgather__ring(sbuf, scount, sdtype,
rbuf, rcount, rdtype,
recv_chunk = r_extent * recv_count;
if (send_chunk != recv_chunk) {
- XBT_WARN("MPI_allgather_rhv use default MPI_allgather.");
+ XBT_INFO("MPI_allgather_rhv: send_chunk != recv_chunk, use default MPI_allgather.");
allgather__default(sbuf, send_count, send_type, rbuf, recv_count,
recv_type, comm);
return MPI_SUCCESS;
rank = comm->rank();
if (size % 2) {
- XBT_DEBUG("allgatherv__ompi_neighborexchange WARNING: odd size %d, switching to ring algorithm",
+ XBT_INFO("allgatherv__ompi_neighborexchange: odd size %d, switching to ring algorithm",
size);
return allgatherv__ring(sbuf, scount, sdtype,
rbuf, rcounts,
return MPI_SUCCESS;
err_hndl:
- XBT_DEBUG( "%s:%4d\tError occurred %d, rank %2d",
+ XBT_WARN( "%s:%4d\tError occurred %d, rank %2d",
__FILE__, line, err, rank);
return err;
}
MPI_Aint extent;
extent = dtype->get_extent();
- /* when communication size is smaller than number of process (not support) */
if (rcount < size) {
- XBT_WARN("MPI_allreduce_lr use default MPI_allreduce.");
+ XBT_INFO("MPI_allreduce_lr: communication size smaller than number of process, use default MPI_allreduce.");
allreduce__default(sbuf, rbuf, rcount, dtype, op, comm);
return MPI_SUCCESS;
}
delete[] recv_status_array;
} /* end pipeline */
- /* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_bcast_NTSB use default MPI_bcast.");
+ XBT_INFO("MPI_bcast_NTSB: count is not divisible by block size, use default MPI_bcast for remainder.");
colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm);
}
delete[] recv_status_array;
} /* end pipeline */
- /* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_bcast_NTSL_Isend_nb use default MPI_bcast.");
+ XBT_INFO("MPI_bcast_NTSL_Isend_nb: count is not divisible by block size, use default MPI_bcast for remainder.");
colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm);
}
delete[] recv_status_array;
} /* end pipeline */
- /* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_bcast_arrival_NTSL use default MPI_bcast.");
+ XBT_INFO("MPI_bcast_arrival_NTSL: count is not divisible by block size, use default MPI_bcast for remainder.");
colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm);
}
delete[] status_array;
}
- // when count is not divisible by block size, use default BCAST for the remainder
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_bcast_SMP_binary use default MPI_bcast.");
+ XBT_INFO("MPI_bcast_SMP_binary: count is not divisible by block size, use default MPI_bcast for remainer.");
colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm);
}
// call native when MPI communication size is too small
if (size <= num_core) {
- XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast.");
+ XBT_INFO("size <= num_core : MPI_bcast_SMP_linear use default MPI_bcast.");
bcast__default(buf, count, datatype, root, comm);
return MPI_SUCCESS;
}
delete[] status_array;
}
- // when count is not divisible by block size, use default BCAST for the remainder
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast.");
+ XBT_INFO("MPI_bcast_SMP_linear: count is not divisible by block size, use default MPI_bcast for remainder.");
colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm);
}
delete[] recv_status_array;
/* end pipeline */
- /* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_bcast_arrival_pattern_aware_wait use default MPI_bcast.");
+ XBT_INFO("MPI_bcast_arrival_pattern_aware_wait: count is not divisible by block size, use default MPI_bcast for remainder.");
colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm);
}
delete[] recv_status_array;
} /* end pipeline */
- /* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_bcast_arrival_pattern_aware use default MPI_bcast.");
+ XBT_INFO("MPI_bcast_arrival_pattern_aware: count is not divisible by block size, use default MPI_bcast for remainder.");
colls::bcast((char*)buf + (pipe_length * increment), remainder, datatype, root, comm);
}
/* message too small */
if (count < size) {
- XBT_WARN("MPI_bcast_arrival_scatter use default MPI_bcast.");
+ XBT_INFO("MPI_bcast_arrival_scatter: count < size, use default MPI_bcast.");
colls::bcast(buf, count, datatype, root, comm);
return MPI_SUCCESS;
}
int pipe_length = count / segment;
int increment = segment * extent;
if (pipe_length==0) {
- XBT_WARN("MPI_bcast_flattree_pipeline use default MPI_bcast_flattree.");
+ XBT_INFO("MPI_bcast_flattree_pipeline: pipe_length=0, use default MPI_bcast_flattree.");
return bcast__flattree(buff, count, data_type, root, comm);
}
rank = comm->rank();
delete[] recv_status_array;
} /* end pipeline */
- /* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- XBT_WARN("MPI_reduce_NTSL use default MPI_reduce.");
+ XBT_INFO("MPI_reduce_NTSL: count is not divisible by block size, use default MPI_reduce for remainder.");
reduce__default((char *)buf + (pipe_length * increment),
(char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,
comm);