1 /* Copyright (c) 2010-2020. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #ifndef SMPI_FILE_HPP_INCLUDED
8 #define SMPI_FILE_HPP_INCLUDED
9 #include "simgrid/plugins/file_system.h"
10 #include "smpi_comm.hpp"
11 #include "smpi_coll.hpp"
12 #include "smpi_datatype.hpp"
13 #include "smpi_errhandler.hpp"
14 #include "smpi_info.hpp"
17 XBT_LOG_EXTERNAL_CATEGORY(smpi_pmpi);
21 class File : public F2C{
24 simgrid::s4u::File* file_;
26 MPI_Offset* shared_file_pointer_;
27 s4u::MutexPtr shared_mutex_;
30 MPI_Errhandler errhandler_;
32 MPI_Datatype filetype_;
36 File(MPI_Comm comm, const char *filename, int amode, MPI_Info info);
37 File(const File&) = delete;
38 File& operator=(const File&) = delete;
41 int get_position(MPI_Offset* offset) const;
42 int get_position_shared(MPI_Offset* offset) const;
44 MPI_Comm comm() const;
46 int seek(MPI_Offset offset, int whence);
47 int seek_shared(MPI_Offset offset, int whence);
48 int set_view(MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, const char* datarep, const Info* info);
49 int get_view(MPI_Offset* disp, MPI_Datatype* etype, MPI_Datatype* filetype, char* datarep) const;
51 void set_info( MPI_Info info);
52 static int read(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
53 static int read_shared(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
54 static int read_ordered(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
55 static int write(MPI_File fh, void* buf, int count, const Datatype* datatype, MPI_Status* status);
56 static int write_shared(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status);
57 static int write_ordered(MPI_File fh, const void* buf, int count, const Datatype* datatype, MPI_Status* status);
58 template <int (*T)(MPI_File, void*, int, const Datatype*, MPI_Status*)>
59 int op_all(void* buf, int count, const Datatype* datatype, MPI_Status* status);
60 static int close(MPI_File *fh);
61 static int del(const char* filename, const Info* info);
62 MPI_Errhandler errhandler();
63 void set_errhandler( MPI_Errhandler errhandler);
64 static File* f2c(int id);
67 /* Read_all, Write_all : loosely based on */
68 /* @article{Thakur:1996:ETM:245875.245879,*/
69 /* author = {Thakur, Rajeev and Choudhary, Alok},*/
70 /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/
71 /* journal = {Sci. Program.},*/
72 /* issue_date = {Winter 1996},*/
73 /* pages = {301--317},*/
75 template <int (*T)(MPI_File, void*, int, const Datatype*, MPI_Status*)>
76 int File::op_all(void* buf, int count, const Datatype* datatype, MPI_Status* status)
78 // get min and max offsets from everyone.
79 int size = comm_->size();
80 int rank = comm_->rank();
81 MPI_Offset min_offset = file_->tell();
82 MPI_Offset max_offset =
84 count * datatype->get_extent(); // cheating, as we don't care about exact data location, we can skip extent
85 std::vector<MPI_Offset> min_offsets(size);
86 std::vector<MPI_Offset> max_offsets(size);
87 simgrid::smpi::colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets.data(), 1, MPI_OFFSET, comm_);
88 simgrid::smpi::colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets.data(), 1, MPI_OFFSET, comm_);
89 MPI_Offset min = min_offset;
90 MPI_Offset max = max_offset;
93 for (int i = 0; i < size; i++) {
94 if (min_offsets[i] != max_offsets[i])
96 tot += (max_offsets[i] - min_offsets[i]);
97 if (min_offsets[i] < min)
99 if (max_offsets[i] > max)
100 max = max_offsets[i];
103 XBT_CDEBUG(smpi_pmpi, "my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min,
106 if (status != MPI_STATUS_IGNORE)
110 MPI_Offset total = max - min;
111 if (total == tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)) {
112 // contiguous. Just have each proc perform its read
113 if (status != MPI_STATUS_IGNORE)
114 status->count = count * datatype->size();
115 return T(this, buf, count, datatype, status);
118 // Interleaved case : How much do I need to read, and whom to send it ?
119 MPI_Offset my_chunk_start = (max - min + 1) / size * rank;
120 MPI_Offset my_chunk_end = ((max - min + 1) / size * (rank + 1));
121 XBT_CDEBUG(smpi_pmpi, "my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);
122 std::vector<int> send_sizes(size);
123 std::vector<int> recv_sizes(size);
124 std::vector<int> send_disps(size);
125 std::vector<int> recv_disps(size);
127 for (int i = 0; i < size; i++) {
129 send_disps[i] = 0; // cheat to avoid issues when send>recv as we use recv buffer
130 if ((my_chunk_start >= min_offsets[i] && my_chunk_start < max_offsets[i]) ||
131 ((my_chunk_end <= max_offsets[i]) && my_chunk_end > min_offsets[i])) {
132 send_sizes[i] = (std::min(max_offsets[i] - 1, my_chunk_end - 1) - std::max(min_offsets[i], my_chunk_start));
133 // store min and max offset to actually read
134 min_offset = std::min(min_offset, min_offsets[i]);
135 total_sent += send_sizes[i];
136 XBT_CDEBUG(smpi_pmpi, "will have to send %d bytes to %d", send_sizes[i], i);
139 min_offset = std::max(min_offset, my_chunk_start);
141 // merge the ranges of every process
142 std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;
143 for (int i = 0; i < size; ++i)
144 ranges.emplace_back(min_offsets[i], max_offsets[i]);
145 std::sort(ranges.begin(), ranges.end());
146 std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;
147 chunks.push_back(ranges[0]);
149 unsigned int nchunks = 0;
151 while (i < ranges.size()) {
152 if (ranges[i].second > chunks[nchunks].second) {
153 // else range included - ignore
154 if (ranges[i].first > chunks[nchunks].second) {
155 // new disjoint range
156 chunks.push_back(ranges[i]);
160 chunks[nchunks].second = ranges[i].second;
165 // what do I need to read ?
166 MPI_Offset totreads = 0;
167 for (i = 0; i < chunks.size(); i++) {
168 if (chunks[i].second < my_chunk_start)
170 else if (chunks[i].first > my_chunk_end)
173 totreads += (std::min(chunks[i].second, my_chunk_end - 1) - std::max(chunks[i].first, my_chunk_start));
175 XBT_CDEBUG(smpi_pmpi, "will have to access %lld from my chunk", totreads);
177 unsigned char* sendbuf = smpi_get_tmp_sendbuffer(total_sent);
180 seek(min_offset, MPI_SEEK_SET);
181 T(this, sendbuf, totreads / datatype->size(), datatype, status);
183 simgrid::smpi::colls::alltoall(send_sizes.data(), 1, MPI_INT, recv_sizes.data(), 1, MPI_INT, comm_);
185 for (int i = 0; i < size; i++) {
186 recv_disps[i] = total_recv;
187 total_recv += recv_sizes[i];
189 // Set buf value to avoid copying dumb data
190 simgrid::smpi::colls::alltoallv(sendbuf, send_sizes.data(), send_disps.data(), MPI_BYTE, buf, recv_sizes.data(),
191 recv_disps.data(), MPI_BYTE, comm_);
192 if (status != MPI_STATUS_IGNORE)
193 status->count = count * datatype->size();
194 smpi_free_tmp_buffer(sendbuf);