UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_view,(MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info), (fh, disp, etype, filetype, datarep, info))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_view,(MPI_File fh, MPI_Offset *disp, MPI_Datatype *etype, MPI_Datatype *filetype, char *datarep), (fh, disp, etype, filetype, datarep))
WRAPPED_PMPI_CALL(int, MPI_File_read_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_read_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
WRAPPED_PMPI_CALL(int, MPI_File_write_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_write_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_at,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request))
WRAPPED_PMPI_CALL(int, MPI_File_read,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_all,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_read_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
WRAPPED_PMPI_CALL(int, MPI_File_write,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_write_all,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
}\r
}\r
\r
+int PMPI_File_read_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh);
+ CHECK_BUFFER(buf, count);\r
+ CHECK_COUNT(count);\r
+ CHECK_DATATYPE(datatype, count);\r
+ CHECK_STATUS(status);\r
+ CHECK_FLAGS(fh);\r
+ else {\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_all", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+ }\r
+}\r
+\r
+int PMPI_File_write_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh);
+ CHECK_BUFFER(buf, count);\r
+ CHECK_COUNT(count);\r
+ CHECK_DATATYPE(datatype, count);\r
+ CHECK_STATUS(status);\r
+ CHECK_FLAGS(fh);\r
+ else {\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_all", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+ }\r
+}\r
+\r
int PMPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
CHECK_FILE(fh);
CHECK_BUFFER(buf, count);\r
}\r
}\r
\r
+int PMPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh);
+ CHECK_BUFFER(buf, count);\r
+ CHECK_OFFSET(offset);\r
+ CHECK_COUNT(count);\r
+ CHECK_DATATYPE(datatype, count);\r
+ CHECK_STATUS(status);\r
+ CHECK_FLAGS(fh);\r
+ else {\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_at_all", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->seek(offset,SEEK_SET);\r
+ if(ret!=MPI_SUCCESS)\r
+ return ret;\r
+ ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+ }\r
+}\r
\r
int PMPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
CHECK_FILE(fh);
}\r
}\r
\r
+int PMPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh);
+ CHECK_BUFFER(buf, count);\r
+ CHECK_OFFSET(offset);\r
+ CHECK_COUNT(count);\r
+ CHECK_DATATYPE(datatype, count);\r
+ CHECK_STATUS(status);\r
+ CHECK_FLAGS(fh);\r
+ else {\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_at_all", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->seek(offset,SEEK_SET);\r
+ if(ret!=MPI_SUCCESS)\r
+ return ret;\r
+ ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+ }\r
+}\r
+\r
int PMPI_File_delete(char *filename, MPI_Info info){\r
if (filename == nullptr) {\r
return MPI_ERR_FILE;\r
#ifndef SMPI_FILE_HPP_INCLUDED\r
#define SMPI_FILE_HPP_INCLUDED\r
#include "simgrid/plugins/file_system.h"\r
+#include "smpi_comm.hpp"\r
+#include "smpi_coll.hpp"\r
+#include "smpi_datatype.hpp"\r
+#include "smpi_info.hpp"\r
+#include <algorithm>\r
\r
\r
namespace simgrid{\r
int seek(MPI_Offset offset, int whence);\r
static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
+ template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)> int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
static int close(MPI_File *fh);\r
static int del(char *filename, MPI_Info info);\r
};\r
+\r
+ template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)>\r
+ int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
+ //get min and max offsets from everyone.\r
+ int size = comm_->size();\r
+ int rank = comm_-> rank();\r
+ MPI_Offset min_offset = file_->tell();\r
+ MPI_Offset max_offset = min_offset + count * datatype->size();//cheating, as we don't care about exact data location, we can skip extent\r
+ MPI_Offset* min_offsets = xbt_new(MPI_Offset, size);\r
+ MPI_Offset* max_offsets = xbt_new(MPI_Offset, size);\r
+ simgrid::smpi::Colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);\r
+ simgrid::smpi::Colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);\r
+ MPI_Offset min=min_offset;\r
+ MPI_Offset max=max_offset;\r
+ MPI_Offset tot= 0;\r
+ for(int i=0;i<size;i++){\r
+ tot+=(max_offsets[i]-min_offsets[i]);\r
+ if(min_offsets[i]<min)\r
+ min=min_offsets[i];\r
+ if(max_offsets[i]>max)\r
+ max=max_offsets[i];\r
+ }\r
+ MPI_Offset total = max-min;\r
+ if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){\r
+ //contiguous. Just have each proc perform its read\r
+ return T(this,buf,count,datatype, status);\r
+ }\r
+\r
+ //Interleaved case : How much do I need to read, and whom to send it ?\r
+ MPI_Offset my_chunk_start=(max-min)/size*rank;\r
+ MPI_Offset my_chunk_end=((max-min)/size*(rank+1))-1;\r
+ int* send_sizes = xbt_new0(int, size);\r
+ int* recv_sizes = xbt_new(int, size);\r
+ int* send_disps = xbt_new(int, size);\r
+ int* recv_disps = xbt_new(int, size);\r
+ int total_sent=0;\r
+ for(int i=0;i<size;i++){\r
+ if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])||\r
+ ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){\r
+ send_sizes[i]=(std::min(max_offsets[i], my_chunk_end+1)-std::max(min_offsets[i], my_chunk_start));\r
+ //store min and max offest to actually read\r
+ min_offset=std::max(min_offsets[i], my_chunk_start);\r
+ max_offset=std::min(max_offsets[i], my_chunk_end+1);\r
+ send_disps[i]=0;//send_sizes[i]; cheat to avoid issues when send>recv as we use recv buffer\r
+ total_sent+=send_sizes[i];\r
+ }\r
+ }\r
+\r
+ //merge the ranges of every process\r
+ std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;\r
+ for(int i=0; i<size; ++i)\r
+ ranges.push_back(std::make_pair(min_offsets[i],max_offsets[i]));\r
+ std::sort(ranges.begin(), ranges.end());\r
+ std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;\r
+ chunks.push_back(ranges[0]);\r
+\r
+ unsigned int nchunks=0;\r
+ unsigned int i=1;\r
+ while(i < ranges.size()){\r
+ if(ranges[i].second>chunks[nchunks].second){\r
+ // else range included - ignore\r
+ if(ranges[i].first>chunks[nchunks].second){\r
+ //new disjoint range\r
+ chunks.push_back(ranges[i]);\r
+ nchunks++;\r
+ } else {\r
+ //merge ranges\r
+ chunks[nchunks].second=ranges[i].second;\r
+ }\r
+ }\r
+ i++;\r
+ }\r
+ //what do I need to read ?\r
+ MPI_Offset totreads=0;\r
+ for(i=0; i<chunks.size();i++){\r
+ if(chunks[i].second < my_chunk_start)\r
+ continue;\r
+ else if (chunks[i].first > my_chunk_end)\r
+ continue;\r
+ else\r
+ totreads += (std::min(chunks[i].second, my_chunk_end+1)-std::max(chunks[i].first, my_chunk_start));\r
+ }\r
+ char* sendbuf= static_cast<char *>(smpi_get_tmp_sendbuffer(totreads));\r
+\r
+ if(totreads>0){\r
+ seek(min_offset, MPI_SEEK_SET);\r
+ T(this,sendbuf,totreads/datatype->size(),datatype, status);\r
+ }\r
+ simgrid::smpi::Colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);\r
+ int total_recv=0;\r
+ for(int i=0;i<size;i++){\r
+ recv_disps[i]=total_recv;\r
+ total_recv+=recv_sizes[i];\r
+ }\r
+ //Set buf value to avoid copying dumb data\r
+ simgrid::smpi::Colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE,\r
+ buf, recv_sizes, recv_disps, MPI_BYTE, comm_);\r
+ smpi_free_tmp_buffer(sendbuf);\r
+ xbt_free(send_sizes);\r
+ xbt_free(recv_sizes);\r
+ xbt_free(send_disps);\r
+ xbt_free(recv_disps);\r
+ xbt_free(min_offsets);\r
+ xbt_free(max_offsets);\r
+ return MPI_SUCCESS;\r
+ }\r
}\r
}\r
+\r
#endif\r