1 /* Copyright (c) 2010-2019. The SimGrid Team.
\r
2 * All rights reserved. */
\r
4 /* This program is free software; you can redistribute it and/or modify it
\r
5 * under the terms of the license (GNU LGPL) which comes with this package. */
\r
7 #ifndef SMPI_FILE_HPP_INCLUDED
\r
8 #define SMPI_FILE_HPP_INCLUDED
\r
9 #include "simgrid/plugins/file_system.h"
\r
10 #include "smpi_comm.hpp"
\r
11 #include "smpi_coll.hpp"
\r
12 #include "smpi_datatype.hpp"
\r
13 #include "smpi_info.hpp"
\r
14 #include <algorithm>
\r
22 simgrid::s4u::File* file_;
\r
24 MPI_Offset shared_file_pointer_;
\r
28 File(MPI_Comm comm, char *filename, int amode, MPI_Info info);
\r
31 int get_position(MPI_Offset* offset);
\r
32 int get_position_shared(MPI_Offset* offset);
\r
35 int seek(MPI_Offset offset, int whence);
\r
36 int seek_shared(MPI_Offset offset, int whence);
\r
40 void set_info( MPI_Info info);
\r
41 static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
\r
42 static int read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
\r
43 static int read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
\r
44 static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
\r
45 static int write_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
\r
46 static int write_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
\r
47 template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)> int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
\r
48 static int close(MPI_File *fh);
\r
49 static int del(char *filename, MPI_Info info);
\r
52 template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)>
\r
53 int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
\r
54 //get min and max offsets from everyone.
\r
55 int size = comm_->size();
\r
56 int rank = comm_-> rank();
\r
57 MPI_Offset min_offset = file_->tell();
\r
58 MPI_Offset max_offset = min_offset + count * datatype->size();//cheating, as we don't care about exact data location, we can skip extent
\r
59 MPI_Offset* min_offsets = xbt_new(MPI_Offset, size);
\r
60 MPI_Offset* max_offsets = xbt_new(MPI_Offset, size);
\r
61 simgrid::smpi::Colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);
\r
62 simgrid::smpi::Colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);
\r
63 MPI_Offset min=min_offset;
\r
64 MPI_Offset max=max_offset;
\r
66 for(int i=0;i<size;i++){
\r
67 tot+=(max_offsets[i]-min_offsets[i]);
\r
68 if(min_offsets[i]<min)
\r
70 if(max_offsets[i]>max)
\r
73 MPI_Offset total = max-min;
\r
74 if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){
\r
75 //contiguous. Just have each proc perform its read
\r
76 return T(this,buf,count,datatype, status);
\r
79 //Interleaved case : How much do I need to read, and whom to send it ?
\r
80 MPI_Offset my_chunk_start=(max-min)/size*rank;
\r
81 MPI_Offset my_chunk_end=((max-min)/size*(rank+1))-1;
\r
82 int* send_sizes = xbt_new0(int, size);
\r
83 int* recv_sizes = xbt_new(int, size);
\r
84 int* send_disps = xbt_new(int, size);
\r
85 int* recv_disps = xbt_new(int, size);
\r
87 for(int i=0;i<size;i++){
\r
88 if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])||
\r
89 ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){
\r
90 send_sizes[i]=(std::min(max_offsets[i], my_chunk_end+1)-std::max(min_offsets[i], my_chunk_start));
\r
91 //store min and max offest to actually read
\r
92 min_offset=std::max(min_offsets[i], my_chunk_start);
\r
93 max_offset=std::min(max_offsets[i], my_chunk_end+1);
\r
94 send_disps[i]=0;//send_sizes[i]; cheat to avoid issues when send>recv as we use recv buffer
\r
95 total_sent+=send_sizes[i];
\r
99 //merge the ranges of every process
\r
100 std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;
\r
101 for(int i=0; i<size; ++i)
\r
102 ranges.push_back(std::make_pair(min_offsets[i],max_offsets[i]));
\r
103 std::sort(ranges.begin(), ranges.end());
\r
104 std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;
\r
105 chunks.push_back(ranges[0]);
\r
107 unsigned int nchunks=0;
\r
109 while(i < ranges.size()){
\r
110 if(ranges[i].second>chunks[nchunks].second){
\r
111 // else range included - ignore
\r
112 if(ranges[i].first>chunks[nchunks].second){
\r
113 //new disjoint range
\r
114 chunks.push_back(ranges[i]);
\r
118 chunks[nchunks].second=ranges[i].second;
\r
123 //what do I need to read ?
\r
124 MPI_Offset totreads=0;
\r
125 for(i=0; i<chunks.size();i++){
\r
126 if(chunks[i].second < my_chunk_start)
\r
128 else if (chunks[i].first > my_chunk_end)
\r
131 totreads += (std::min(chunks[i].second, my_chunk_end+1)-std::max(chunks[i].first, my_chunk_start));
\r
133 char* sendbuf= static_cast<char *>(smpi_get_tmp_sendbuffer(totreads));
\r
136 seek(min_offset, MPI_SEEK_SET);
\r
137 T(this,sendbuf,totreads/datatype->size(),datatype, status);
\r
139 simgrid::smpi::Colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);
\r
141 for(int i=0;i<size;i++){
\r
142 recv_disps[i]=total_recv;
\r
143 total_recv+=recv_sizes[i];
\r
145 //Set buf value to avoid copying dumb data
\r
146 simgrid::smpi::Colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE,
\r
147 buf, recv_sizes, recv_disps, MPI_BYTE, comm_);
\r
148 smpi_free_tmp_buffer(sendbuf);
\r
149 xbt_free(send_sizes);
\r
150 xbt_free(recv_sizes);
\r
151 xbt_free(send_disps);
\r
152 xbt_free(recv_disps);
\r
153 xbt_free(min_offsets);
\r
154 xbt_free(max_offsets);
\r
155 return MPI_SUCCESS;
\r