1 /* Copyright (c) 2010-2019. The SimGrid Team.
2 * All rights reserved. */
4 /* This program is free software; you can redistribute it and/or modify it
5 * under the terms of the license (GNU LGPL) which comes with this package. */
7 #ifndef SMPI_FILE_HPP_INCLUDED
8 #define SMPI_FILE_HPP_INCLUDED
9 #include "simgrid/plugins/file_system.h"
10 #include "smpi_comm.hpp"
11 #include "smpi_coll.hpp"
12 #include "smpi_datatype.hpp"
13 #include "smpi_info.hpp"
22 simgrid::s4u::File* file_;
24 MPI_Offset* shared_file_pointer_;
25 s4u::MutexPtr shared_mutex_;
29 File(MPI_Comm comm, char *filename, int amode, MPI_Info info);
32 int get_position(MPI_Offset* offset);
33 int get_position_shared(MPI_Offset* offset);
36 int seek(MPI_Offset offset, int whence);
37 int seek_shared(MPI_Offset offset, int whence);
39 void set_info( MPI_Info info);
40 static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
41 static int read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
42 static int read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
43 static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
44 static int write_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
45 static int write_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
46 template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)> int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
47 static int close(MPI_File *fh);
48 static int del(char *filename, MPI_Info info);
51 /* Read_all, Write_all : loosely based on */
52 /* @article{Thakur:1996:ETM:245875.245879,*/
53 /* author = {Thakur, Rajeev and Choudhary, Alok},*/
54 /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/
55 /* journal = {Sci. Program.},*/
56 /* issue_date = {Winter 1996},*/
57 /* pages = {301--317},*/
59 template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)>
60 int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
61 //get min and max offsets from everyone.
62 int size = comm_->size();
63 int rank = comm_-> rank();
64 MPI_Offset min_offset = file_->tell();
65 MPI_Offset max_offset = (min_offset + count * datatype->size());//cheating, as we don't care about exact data location, we can skip extent
66 MPI_Offset* min_offsets = xbt_new(MPI_Offset, size);
67 MPI_Offset* max_offsets = xbt_new(MPI_Offset, size);
68 simgrid::smpi::Colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);
69 simgrid::smpi::Colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);
70 MPI_Offset min=min_offset;
71 MPI_Offset max=max_offset;
74 for(int i=0;i<size;i++){
75 if(min_offsets[i]!=max_offsets[i])
77 tot+=(max_offsets[i]-min_offsets[i]);
78 if(min_offsets[i]<min)
80 if(max_offsets[i]>max)
84 XBT_DEBUG("my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, max);
89 MPI_Offset total = max-min;
90 if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){
91 //contiguous. Just have each proc perform its read
92 status->count=count * datatype->size();
93 return T(this,buf,count,datatype, status);
96 //Interleaved case : How much do I need to read, and whom to send it ?
97 MPI_Offset my_chunk_start=(max-min+1)/size*rank;
98 MPI_Offset my_chunk_end=((max-min+1)/size*(rank+1));
99 XBT_DEBUG("my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);
100 int* send_sizes = xbt_new0(int, size);
101 int* recv_sizes = xbt_new(int, size);
102 int* send_disps = xbt_new(int, size);
103 int* recv_disps = xbt_new(int, size);
105 for(int i=0;i<size;i++){
106 if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])||
107 ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){
108 send_sizes[i]=(std::min(max_offsets[i]-1, my_chunk_end-1)-std::max(min_offsets[i], my_chunk_start));
109 //store min and max offest to actually read
110 min_offset=std::min(min_offset, min_offsets[i]);
111 send_disps[i]=0;//send_sizes[i]; cheat to avoid issues when send>recv as we use recv buffer
112 total_sent+=send_sizes[i];
113 XBT_DEBUG("will have to send %d bytes to %d", send_sizes[i], i);
116 min_offset=std::max(min_offset, my_chunk_start);
118 //merge the ranges of every process
119 std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;
120 for(int i=0; i<size; ++i)
121 ranges.push_back(std::make_pair(min_offsets[i],max_offsets[i]));
122 std::sort(ranges.begin(), ranges.end());
123 std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;
124 chunks.push_back(ranges[0]);
126 unsigned int nchunks=0;
128 while(i < ranges.size()){
129 if(ranges[i].second>chunks[nchunks].second){
130 // else range included - ignore
131 if(ranges[i].first>chunks[nchunks].second){
133 chunks.push_back(ranges[i]);
137 chunks[nchunks].second=ranges[i].second;
142 //what do I need to read ?
143 MPI_Offset totreads=0;
144 for(i=0; i<chunks.size();i++){
145 if(chunks[i].second < my_chunk_start)
147 else if (chunks[i].first > my_chunk_end)
150 totreads += (std::min(chunks[i].second, my_chunk_end-1)-std::max(chunks[i].first, my_chunk_start));
152 XBT_DEBUG("will have to access %lld from my chunk", totreads);
154 char* sendbuf= static_cast<char *>(smpi_get_tmp_sendbuffer(total_sent));
157 seek(min_offset, MPI_SEEK_SET);
158 T(this,sendbuf,totreads/datatype->size(),datatype, status);
160 simgrid::smpi::Colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);
162 for(int i=0;i<size;i++){
163 recv_disps[i]=total_recv;
164 total_recv+=recv_sizes[i];
166 //Set buf value to avoid copying dumb data
167 simgrid::smpi::Colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE,
168 buf, recv_sizes, recv_disps, MPI_BYTE, comm_);
169 status->count=count * datatype->size();
170 smpi_free_tmp_buffer(sendbuf);
171 xbt_free(send_sizes);
172 xbt_free(recv_sizes);
173 xbt_free(send_disps);
174 xbt_free(recv_disps);
175 xbt_free(min_offsets);
176 xbt_free(max_offsets);