X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/39c935d6d5ee86d153f6f7e6a10d723ae7c57f6f..5ed37babb2fa9097abe82df299c0aa259ed84d5a:/src/smpi/include/smpi_file.hpp diff --git a/src/smpi/include/smpi_file.hpp b/src/smpi/include/smpi_file.hpp index c89c1fef3f..41c20d9c3d 100644 --- a/src/smpi/include/smpi_file.hpp +++ b/src/smpi/include/smpi_file.hpp @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2021. The SimGrid Team. +/* Copyright (c) 2010-2023. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it @@ -14,10 +14,9 @@ #include "smpi_info.hpp" #include -XBT_LOG_EXTERNAL_CATEGORY(smpi_pmpi); +XBT_LOG_EXTERNAL_CATEGORY(smpi_io); -namespace simgrid{ -namespace smpi{ +namespace simgrid::smpi { class File : public F2C{ MPI_Comm comm_; int flags_; @@ -31,6 +30,8 @@ class File : public F2C{ MPI_Datatype etype_; MPI_Datatype filetype_; std::string datarep_; + MPI_Offset disp_; + bool atomicity_; public: File(MPI_Comm comm, const char *filename, int amode, MPI_Info info); @@ -41,7 +42,10 @@ class File : public F2C{ int get_position(MPI_Offset* offset) const; int get_position_shared(MPI_Offset* offset) const; int flags() const; + MPI_Datatype etype() const; MPI_Comm comm() const; + std::string name() const override { return file_ ? "MPI_File: " + std::string(file_->get_path()) : "MPI_File"; } + int sync(); int seek(MPI_Offset offset, int whence); int seek_shared(MPI_Offset offset, int whence); @@ -49,6 +53,7 @@ class File : public F2C{ int get_view(MPI_Offset* disp, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep) const; MPI_Info info(); void set_info( MPI_Info info); + void set_size(int size); static int read(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status); static int read_shared(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status); static int read_ordered(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status); @@ -61,6 +66,8 @@ class File : public F2C{ static int del(const char* filename, const Info* info); MPI_Errhandler errhandler(); void set_errhandler( MPI_Errhandler errhandler); + void set_atomicity(bool a); + bool get_atomicity() const; static File* f2c(int id); }; @@ -100,25 +107,27 @@ int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* sta max = max_offsets[i]; } - XBT_CDEBUG(smpi_pmpi, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, + XBT_CDEBUG(smpi_io, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, max); if (empty == 1) { if (status != MPI_STATUS_IGNORE) status->count = 0; return MPI_SUCCESS; } - MPI_Offset total = max - min; - if (total == tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)) { + XBT_CDEBUG(smpi_io, "min:max : %lld:%lld, tot %lld contig %u", min, max, tot, (datatype->flags() & DT_FLAG_CONTIGUOUS)); + if ( size==1 || (max - min == tot && (datatype->flags() & DT_FLAG_CONTIGUOUS))) { // contiguous. Just have each proc perform its read if (status != MPI_STATUS_IGNORE) status->count = count * datatype->size(); - return T(this, buf, count, datatype, status); + int ret = T(this, buf, count, datatype, status); + seek(max_offset, MPI_SEEK_SET); + return ret; } // Interleaved case : How much do I need to read, and whom to send it ? - MPI_Offset my_chunk_start = (max - min + 1) / size * rank; - MPI_Offset my_chunk_end = ((max - min + 1) / size * (rank + 1)); - XBT_CDEBUG(smpi_pmpi, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end); + MPI_Offset my_chunk_start = min + (max - min + 1) / size * rank; + MPI_Offset my_chunk_end = min + ((max - min + 1) / size * (rank + 1)) +1; + XBT_CDEBUG(smpi_io, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end); std::vector send_sizes(size); std::vector recv_sizes(size); std::vector send_disps(size); @@ -129,11 +138,14 @@ int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* sta send_disps[i] = 0; // cheat to avoid issues when send>recv as we use recv buffer if ((my_chunk_start >= min_offsets[i] && my_chunk_start < max_offsets[i]) || ((my_chunk_end <= max_offsets[i]) && my_chunk_end > min_offsets[i])) { - send_sizes[i] = (std::min(max_offsets[i] - 1, my_chunk_end - 1) - std::max(min_offsets[i], my_chunk_start)); + send_sizes[i] = (std::min(max_offsets[i], my_chunk_end) - std::max(min_offsets[i], my_chunk_start)); + //we want to send only useful data, so let's pretend we pack it + send_sizes[i]=send_sizes[i]/datatype->get_extent()*datatype->size(); // store min and max offset to actually read + min_offset = std::min(min_offset, min_offsets[i]); total_sent += send_sizes[i]; - XBT_CDEBUG(smpi_pmpi, "will have to send %d bytes to %d", send_sizes[i], i); + XBT_CDEBUG(smpi_io, "will have to send %d bytes to %d", send_sizes[i], i); } } min_offset = std::max(min_offset, my_chunk_start); @@ -147,8 +159,7 @@ int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* sta chunks.push_back(ranges[0]); unsigned int nchunks = 0; - unsigned int i = 1; - while (i < ranges.size()) { + for (unsigned i = 1; i < ranges.size(); i++) { if (ranges[i].second > chunks[nchunks].second) { // else range included - ignore if (ranges[i].first > chunks[nchunks].second) { @@ -160,25 +171,25 @@ int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* sta chunks[nchunks].second = ranges[i].second; } } - i++; } // what do I need to read ? MPI_Offset totreads = 0; - for (i = 0; i < chunks.size(); i++) { - if (chunks[i].second < my_chunk_start) + for (auto const& [chunk_start, chunk_end] : chunks) { + if (chunk_end < my_chunk_start) continue; - else if (chunks[i].first > my_chunk_end) + else if (chunk_start > my_chunk_end) continue; else - totreads += (std::min(chunks[i].second, my_chunk_end - 1) - std::max(chunks[i].first, my_chunk_start)); + totreads += (std::min(chunk_end, my_chunk_end) - std::max(chunk_start, my_chunk_start)); } - XBT_CDEBUG(smpi_pmpi, "will have to access %lld from my chunk", totreads); + XBT_CDEBUG(smpi_io, "will have to access %lld from my chunk", totreads); unsigned char* sendbuf = smpi_get_tmp_sendbuffer(total_sent); if (totreads > 0) { seek(min_offset, MPI_SEEK_SET); - T(this, sendbuf, totreads / datatype->size(), datatype, status); + T(this, sendbuf, totreads / datatype->get_extent(), datatype, status); + seek(max_offset, MPI_SEEK_SET); } simgrid::smpi::colls::alltoall(send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1, MPI_INT, comm_); int total_recv = 0; @@ -194,7 +205,6 @@ int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* sta smpi_free_tmp_buffer(sendbuf); return MPI_SUCCESS; } -} -} +} // namespace simgrid::smpi #endif