class Colls;
class Comm;
class Datatype;
+class File;
class Group;
class Info;
class Keyval;
typedef simgrid::smpi::Comm SMPI_Comm;
typedef simgrid::smpi::Datatype SMPI_Datatype;
+typedef simgrid::smpi::File SMPI_File;
typedef simgrid::smpi::Group SMPI_Group;
typedef simgrid::smpi::Info SMPI_Info;
typedef simgrid::smpi::Op SMPI_Op;
typedef struct SMPI_Comm SMPI_Comm;
typedef struct SMPI_Datatype SMPI_Datatype;
+typedef struct SMPI_File SMPI_File;
typedef struct SMPI_Group SMPI_Group;
typedef struct SMPI_Info SMPI_Info;
typedef struct SMPI_Op SMPI_Op;
typedef long long MPI_Offset;
typedef long long MPI_Count;
-struct s_MPI_File;
-typedef struct s_MPI_File *MPI_File;
-
-
+typedef SMPI_File *MPI_File;
typedef SMPI_Datatype *MPI_Datatype;
typedef struct {
this_actor::parallel_execute(m_host_list, flops_amount, bytes_amount);
}
- XBT_DEBUG("WRITE %s on disk '%s'. size '%llu/%llu'", get_path(), local_storage_->get_cname(), size, size_);
+ XBT_DEBUG("WRITE %s on disk '%s'. size '%llu/%llu' '%llu:%llu'", get_path(), local_storage_->get_cname(), size, size_, sg_storage_get_size_used(local_storage_), sg_storage_get_size(local_storage_));
// If the storage is full before even starting to write
- if (sg_storage_get_size_used(local_storage_) >= sg_storage_get_size(local_storage_))
- return 0;
+ // if (sg_storage_get_size_used(local_storage_) >= sg_storage_get_size(local_storage_))
+ // return 0;
/* Substract the part of the file that might disappear from the used sized on the storage element */
local_storage_->extension<FileSystemStorageExt>()->decr_used_size(size_ - current_position_);
WRAPPED_PMPI_CALL(int, MPI_Test_cancelled,(MPI_Status* status, int* flag) ,(status, flag))
WRAPPED_PMPI_CALL(int, MPI_Status_set_cancelled,(MPI_Status *status,int flag),(status,flag))
WRAPPED_PMPI_CALL(int,MPI_Status_set_elements,( MPI_Status *status, MPI_Datatype datatype, int count),( status, datatype, count))
+WRAPPED_PMPI_CALL(int, MPI_File_open,(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh),(comm, filename, amode, info, fh))
+WRAPPED_PMPI_CALL(int, MPI_File_close,(MPI_File *fh), (fh))
+WRAPPED_PMPI_CALL(int, MPI_File_delete,(char *filename, MPI_Info info), (filename, info))
+WRAPPED_PMPI_CALL(int, MPI_File_set_info,(MPI_File fh, MPI_Info info), (fh, info))
+WRAPPED_PMPI_CALL(int, MPI_File_get_info,(MPI_File fh, MPI_Info *info_used), (fh, info_used))
+WRAPPED_PMPI_CALL(int, MPI_File_read_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_read_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_write_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_write_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_read,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_read_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_write,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_write_all,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_seek,(MPI_File fh, MPI_Offset offset, int whenace), (fh, offset, whenace))
+WRAPPED_PMPI_CALL(int, MPI_File_get_position,(MPI_File fh, MPI_Offset *offset), (fh, offset))
+WRAPPED_PMPI_CALL(int, MPI_File_read_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_write_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_read_ordered,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_write_ordered,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
+WRAPPED_PMPI_CALL(int, MPI_File_seek_shared,(MPI_File fh, MPI_Offset offset, int whence), (fh, offset, whence))
+WRAPPED_PMPI_CALL(int, MPI_File_get_position_shared,(MPI_File fh, MPI_Offset *offset), (fh, offset))
+
/*
Unimplemented Calls - both PMPI and MPI calls are generated.
When implementing, please move ahead, swap UNIMPLEMENTED_WRAPPED_PMPI_CALL for WRAPPED_PMPI_CALL,
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_create_errhandler,(MPI_File_errhandler_function *function, MPI_Errhandler *errhandler),(function, errhandler))
UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int, MPI_File_set_errhandler,( MPI_File file, MPI_Errhandler errhandler), (file, errhandler))
UNIMPLEMENTED_WRAPPED_PMPI_CALL_NOFAIL(int, MPI_File_get_errhandler,( MPI_File file, MPI_Errhandler *errhandler), (file, errhandler))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_open,(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh),(comm, filename, amode, info, fh))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_close,(MPI_File *fh), (fh))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_delete,(char *filename, MPI_Info info), (filename, info))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_size,(MPI_File fh, MPI_Offset size), (fh, size))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_preallocate,(MPI_File fh, MPI_Offset size), (fh, size))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_size,(MPI_File fh, MPI_Offset *size), (fh, size))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_group,(MPI_File fh, MPI_Group *group), (fh, group))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_amode,(MPI_File fh, int *amode), (fh, amode))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_info,(MPI_File fh, MPI_Info info), (fh, info))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_info,(MPI_File fh, MPI_Info *info_used), (fh, info_used))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_set_view,(MPI_File fh, MPI_Offset disp, MPI_Datatype etype, MPI_Datatype filetype, char *datarep, MPI_Info info), (fh, disp, etype, filetype, datarep, info))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_view,(MPI_File fh, MPI_Offset *disp, MPI_Datatype *etype, MPI_Datatype *filetype, char *datarep), (fh, disp, etype, filetype, datarep))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Status *status), (fh, offset, buf, count, datatype, status))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_at,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_at,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_at_all,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_at_all,(MPI_File fh, MPI_Offset offset, void *buf,int count, MPI_Datatype datatype, MPI_Request *request), (fh, offset, buf, count, datatype, request))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_all,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_all,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_seek,(MPI_File fh, MPI_Offset offset, int whenace), (fh, offset, whenace))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_position,(MPI_File fh, MPI_Offset *offset), (fh, offset))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_byte_offset,(MPI_File fh, MPI_Offset offset, MPI_Offset *disp), (fh, offset, disp))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iread_shared,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_iwrite_shared,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Request *request), (fh, buf, count, datatype, request))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_ordered,(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_ordered,(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status), (fh, buf, count, datatype, status))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_seek_shared,(MPI_File fh, MPI_Offset offset, int whence), (fh, offset, whence))
-UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_get_position_shared,(MPI_File fh, MPI_Offset *offset), (fh, offset))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all_begin,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype), (fh, offset, buf, count, datatype))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_read_at_all_end,(MPI_File fh, void *buf, MPI_Status *status), (fh, buf, status))
UNIMPLEMENTED_WRAPPED_PMPI_CALL(int, MPI_File_write_at_all_begin,(MPI_File fh, MPI_Offset offset, void *buf, int count, MPI_Datatype datatype), (fh, offset, buf, count, datatype))
--- /dev/null
+/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */\r
+\r
+/* This program is free software; you can redistribute it and/or modify it\r
+ * under the terms of the license (GNU LGPL) which comes with this package. */\r
+\r
+#include "private.hpp"\r
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);\r
+\r
+#include "smpi_file.hpp"\r
+#include "smpi_datatype.hpp"\r
+\r
+\r
+int PMPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh){\r
+ if (comm == MPI_COMM_NULL)\r
+ return MPI_ERR_COMM;\r
+ if (filename == nullptr)\r
+ return MPI_ERR_FILE;\r
+ if (amode < 0)\r
+ return MPI_ERR_AMODE;\r
+ smpi_bench_end();\r
+ *fh = new simgrid::smpi::File(comm, filename, amode, info);\r
+ smpi_bench_begin();\r
+ if (((*fh)->size() == 0 && (not amode & MPI_MODE_CREATE)) ||\r
+ ((*fh)->size() != 0 && (amode & MPI_MODE_EXCL))){\r
+ delete fh;\r
+ return MPI_ERR_AMODE;\r
+ }\r
+ if(amode & MPI_MODE_APPEND)\r
+ (*fh)->seek(0,MPI_SEEK_END);\r
+ return MPI_SUCCESS;\r
+}\r
+\r
+int PMPI_File_close(MPI_File *fh){\r
+ if (fh==nullptr)\r
+ return MPI_ERR_ARG;\r
+ smpi_bench_end();\r
+ int ret = simgrid::smpi::File::close(fh);\r
+ *fh = MPI_FILE_NULL;\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+#define CHECK_FILE(fh) if(fh==MPI_FILE_NULL) return MPI_ERR_FILE;\r
+#define CHECK_BUFFER(buf, count) if (buf==nullptr && count > 0) return MPI_ERR_BUFFER;\r
+#define CHECK_COUNT(count) if (count < 0) return MPI_ERR_COUNT;\r
+#define CHECK_OFFSET(offset) if (offset < 0) return MPI_ERR_DISP;\r
+#define CHECK_DATATYPE(datatype, count) if (datatype == MPI_DATATYPE_NULL && count > 0) return MPI_ERR_TYPE;\r
+#define CHECK_STATUS(status) if (status == nullptr) return MPI_ERR_ARG;\r
+#define CHECK_FLAGS(fh) if (fh->flags() & MPI_MODE_SEQUENTIAL) return MPI_ERR_AMODE;\r
+\r
+#define PASS_ZEROCOUNT(count) if (count == 0) {\\r
+status->count=0;\\r
+return MPI_SUCCESS;\\r
+}\r
+\r
+int PMPI_File_seek(MPI_File fh, MPI_Offset offset, int whence){\r
+ CHECK_FILE(fh);
+ smpi_bench_end();\r
+ int ret = fh->seek(offset,whence);\r
+ smpi_bench_begin();\r
+ return ret;\r
+\r
+}\r
+\r
+int PMPI_File_seek_shared(MPI_File fh, MPI_Offset offset, int whence){\r
+ CHECK_FILE(fh)\r
+ smpi_bench_end();\r
+ int ret = fh->seek_shared(offset,whence);\r
+ smpi_bench_begin();\r
+ return ret;\r
+\r
+}\r
+\r
+int PMPI_File_get_position(MPI_File fh, MPI_Offset* offset){\r
+ if (offset==nullptr)\r
+ return MPI_ERR_DISP;\r
+ smpi_bench_end();\r
+ int ret = fh->get_position(offset);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_get_position_shared(MPI_File fh, MPI_Offset* offset){\r
+ CHECK_FILE(fh)\r
+ if (offset==nullptr)\r
+ return MPI_ERR_DISP;\r
+ smpi_bench_end();\r
+ int ret = fh->get_position_shared(offset);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ PASS_ZEROCOUNT(count)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read", static_cast<double>(count*datatype->size())));\r
+ int ret = simgrid::smpi::File::read(fh, buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)\r
+ CHECK_BUFFER(buf, count)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ PASS_ZEROCOUNT(count)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_shared", static_cast<double>(count*datatype->size())));\r
+ int ret = simgrid::smpi::File::read_shared(fh, buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ PASS_ZEROCOUNT(count)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write", static_cast<double>(count*datatype->size())));\r
+ int ret = simgrid::smpi::File::write(fh, buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_write_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)\r
+ CHECK_BUFFER(buf, count)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ PASS_ZEROCOUNT(count)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_shared", static_cast<double>(count*datatype->size())));\r
+ int ret = simgrid::smpi::File::write_shared(fh, buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_read_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_all", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)\r
+ CHECK_BUFFER(buf, count)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_ordered", static_cast<double>(count*datatype->size())));\r
+ int ret = simgrid::smpi::File::read_ordered(fh, buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_write_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_all", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_write_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)\r
+ CHECK_BUFFER(buf, count)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_ordered", static_cast<double>(count*datatype->size())));\r
+ int ret = simgrid::smpi::File::write_ordered(fh, buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)\r
+ CHECK_OFFSET(offset)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ PASS_ZEROCOUNT(count);\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->seek(offset,MPI_SEEK_SET);\r
+ if(ret!=MPI_SUCCESS)\r
+ return ret;\r
+ ret = simgrid::smpi::File::read(fh, buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)\r
+ CHECK_OFFSET(offset)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_at_all", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->seek(offset,MPI_SEEK_SET);\r
+ if(ret!=MPI_SUCCESS)\r
+ return ret;\r
+ ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)\r
+ CHECK_OFFSET(offset)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ PASS_ZEROCOUNT(count);\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->seek(offset,MPI_SEEK_SET);\r
+ if(ret!=MPI_SUCCESS)\r
+ return ret;\r
+ ret = simgrid::smpi::File::write(fh, buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)\r
+ CHECK_OFFSET(offset)\r
+ CHECK_COUNT(count)\r
+ CHECK_DATATYPE(datatype, count)\r
+ CHECK_STATUS(status)\r
+ CHECK_FLAGS(fh)\r
+ smpi_bench_end();\r
+ int rank_traced = simgrid::s4u::this_actor::get_pid();\r
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_at_all", static_cast<double>(count*datatype->size())));\r
+ int ret = fh->seek(offset,MPI_SEEK_SET);\r
+ if(ret!=MPI_SUCCESS)\r
+ return ret;\r
+ ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);\r
+ TRACE_smpi_comm_out(rank_traced);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_delete(char *filename, MPI_Info info){\r
+ if (filename == nullptr)\r
+ return MPI_ERR_FILE;\r
+ smpi_bench_end();\r
+ int ret = simgrid::smpi::File::del(filename, info);\r
+ smpi_bench_begin();\r
+ return ret;\r
+}\r
+\r
+int PMPI_File_get_info(MPI_File fh, MPI_Info* info)\r
+{\r
+ CHECK_FILE(fh)\r
+ *info = fh->info();\r
+ return MPI_SUCCESS;\r
+}\r
+\r
+int PMPI_File_set_info(MPI_File fh, MPI_Info info)\r
+{\r
+ CHECK_FILE(fh)\r
+ fh->set_info(info);\r
+ return MPI_SUCCESS;\r
+}
\ No newline at end of file
--- /dev/null
+/* Copyright (c) 2010-2019. The SimGrid Team.\r
+ * All rights reserved. */\r
+\r
+/* This program is free software; you can redistribute it and/or modify it\r
+ * under the terms of the license (GNU LGPL) which comes with this package. */\r
+\r
+#ifndef SMPI_FILE_HPP_INCLUDED\r
+#define SMPI_FILE_HPP_INCLUDED\r
+#include "simgrid/plugins/file_system.h"\r
+#include "smpi_comm.hpp"\r
+#include "smpi_coll.hpp"\r
+#include "smpi_datatype.hpp"\r
+#include "smpi_info.hpp"\r
+#include <algorithm>\r
+\r
+\r
+namespace simgrid{\r
+namespace smpi{\r
+class File{\r
+ MPI_Comm comm_;\r
+ int flags_;\r
+ simgrid::s4u::File* file_;\r
+ MPI_Info info_;\r
+ MPI_Offset* shared_file_pointer_;\r
+ s4u::MutexPtr shared_mutex_;\r
+ MPI_Win win_;\r
+ char* list_;\r
+ public:\r
+ File(MPI_Comm comm, char *filename, int amode, MPI_Info info);\r
+ ~File();\r
+ int size();\r
+ int get_position(MPI_Offset* offset);\r
+ int get_position_shared(MPI_Offset* offset);\r
+ int flags();\r
+ int sync();\r
+ int seek(MPI_Offset offset, int whence);\r
+ int seek_shared(MPI_Offset offset, int whence);\r
+ MPI_Info info();\r
+ void set_info( MPI_Info info);\r
+ static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
+ static int read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
+ static int read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
+ static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
+ static int write_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
+ static int write_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
+ template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)> int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
+ static int close(MPI_File *fh);\r
+ static int del(char *filename, MPI_Info info);\r
+};\r
+\r
+ /* Read_all, Write_all : loosely based on */\r
+ /* @article{Thakur:1996:ETM:245875.245879,*/\r
+ /* author = {Thakur, Rajeev and Choudhary, Alok},*/\r
+ /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/\r
+ /* journal = {Sci. Program.},*/\r
+ /* issue_date = {Winter 1996},*/\r
+ /* pages = {301--317},*/\r
+ /* }*/ \r
+ template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)>\r
+ int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
+ //get min and max offsets from everyone.\r
+ int size = comm_->size();\r
+ int rank = comm_-> rank();\r
+ MPI_Offset min_offset = file_->tell();\r
+ MPI_Offset max_offset = (min_offset + count * datatype->size());//cheating, as we don't care about exact data location, we can skip extent\r
+ MPI_Offset* min_offsets = xbt_new(MPI_Offset, size);\r
+ MPI_Offset* max_offsets = xbt_new(MPI_Offset, size);\r
+ simgrid::smpi::Colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);\r
+ simgrid::smpi::Colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);\r
+ MPI_Offset min=min_offset;\r
+ MPI_Offset max=max_offset;\r
+ MPI_Offset tot= 0;\r
+ int empty=1;\r
+ for(int i=0;i<size;i++){\r
+ if(min_offsets[i]!=max_offsets[i])\r
+ empty=0;\r
+ tot+=(max_offsets[i]-min_offsets[i]);\r
+ if(min_offsets[i]<min)\r
+ min=min_offsets[i];\r
+ if(max_offsets[i]>max)\r
+ max=max_offsets[i];\r
+ }\r
+ \r
+ XBT_DEBUG("my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, max);\r
+ if(empty==1){\r
+ status->count=0;\r
+ return MPI_SUCCESS;\r
+ }\r
+ MPI_Offset total = max-min;\r
+ if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){\r
+ //contiguous. Just have each proc perform its read\r
+ status->count=count * datatype->size();\r
+ return T(this,buf,count,datatype, status);\r
+ }\r
+\r
+ //Interleaved case : How much do I need to read, and whom to send it ?\r
+ MPI_Offset my_chunk_start=(max-min+1)/size*rank;\r
+ MPI_Offset my_chunk_end=((max-min+1)/size*(rank+1));\r
+ XBT_DEBUG("my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);\r
+ int* send_sizes = xbt_new0(int, size);\r
+ int* recv_sizes = xbt_new(int, size);\r
+ int* send_disps = xbt_new(int, size);\r
+ int* recv_disps = xbt_new(int, size);\r
+ int total_sent=0;\r
+ for(int i=0;i<size;i++){\r
+ if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])||\r
+ ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){\r
+ send_sizes[i]=(std::min(max_offsets[i]-1, my_chunk_end-1)-std::max(min_offsets[i], my_chunk_start));\r
+ //store min and max offest to actually read\r
+ min_offset=std::min(min_offset, min_offsets[i]);\r
+ send_disps[i]=0;//send_sizes[i]; cheat to avoid issues when send>recv as we use recv buffer\r
+ total_sent+=send_sizes[i];\r
+ XBT_DEBUG("will have to send %d bytes to %d", send_sizes[i], i);\r
+ }\r
+ }\r
+ min_offset=std::max(min_offset, my_chunk_start);\r
+\r
+ //merge the ranges of every process\r
+ std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;\r
+ for(int i=0; i<size; ++i)\r
+ ranges.push_back(std::make_pair(min_offsets[i],max_offsets[i]));\r
+ std::sort(ranges.begin(), ranges.end());\r
+ std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;\r
+ chunks.push_back(ranges[0]);\r
+\r
+ unsigned int nchunks=0;\r
+ unsigned int i=1;\r
+ while(i < ranges.size()){\r
+ if(ranges[i].second>chunks[nchunks].second){\r
+ // else range included - ignore\r
+ if(ranges[i].first>chunks[nchunks].second){\r
+ //new disjoint range\r
+ chunks.push_back(ranges[i]);\r
+ nchunks++;\r
+ } else {\r
+ //merge ranges\r
+ chunks[nchunks].second=ranges[i].second;\r
+ }\r
+ }\r
+ i++;\r
+ }\r
+ //what do I need to read ?\r
+ MPI_Offset totreads=0;\r
+ for(i=0; i<chunks.size();i++){\r
+ if(chunks[i].second < my_chunk_start)\r
+ continue;\r
+ else if (chunks[i].first > my_chunk_end)\r
+ continue;\r
+ else\r
+ totreads += (std::min(chunks[i].second, my_chunk_end-1)-std::max(chunks[i].first, my_chunk_start));\r
+ }\r
+ XBT_DEBUG("will have to access %lld from my chunk", totreads);\r
+\r
+ char* sendbuf= static_cast<char *>(smpi_get_tmp_sendbuffer(totreads));\r
+\r
+ if(totreads>0){\r
+ seek(min_offset, MPI_SEEK_SET);\r
+ T(this,sendbuf,totreads/datatype->size(),datatype, status);\r
+ }\r
+ simgrid::smpi::Colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);\r
+ int total_recv=0;\r
+ for(int i=0;i<size;i++){\r
+ recv_disps[i]=total_recv;\r
+ total_recv+=recv_sizes[i];\r
+ }\r
+ //Set buf value to avoid copying dumb data\r
+ simgrid::smpi::Colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE,\r
+ buf, recv_sizes, recv_disps, MPI_BYTE, comm_);\r
+ status->count=count * datatype->size();\r
+ smpi_free_tmp_buffer(sendbuf);\r
+ xbt_free(send_sizes);\r
+ xbt_free(recv_sizes);\r
+ xbt_free(send_disps);\r
+ xbt_free(recv_disps);\r
+ xbt_free(min_offsets);\r
+ xbt_free(max_offsets);\r
+ return MPI_SUCCESS;\r
+ }\r
+}\r
+}\r
+\r
+#endif\r
{"win_flush", "1 0 0.3"},
{"win_flush_local", "1 0 0.8"},
{"win_flush_all", "1 0.8 0"},
- {"win_flush_local_all", "1 0 0.3"}
+ {"win_flush_local_all", "1 0 0.3"},
+
+ {"file_read", "1 1 0.3"}
};
static const char* instr_find_color(const char* c_state)
#include "mc/mc.h"
#include "simgrid/s4u/Engine.hpp"
+#include "simgrid/plugins/file_system.h"
#include "smpi_coll.hpp"
#include "smpi_f2c.hpp"
#include "smpi_host.hpp"
SIMIX_global_init(&argc, argv);
SMPI_switch_data_segment = &smpi_switch_data_segment;
-
+ sg_storage_file_system_init();
// parse the platform file: get the host list
simgrid::s4u::Engine::get_instance()->load_platform(argv[1]);
SIMIX_comm_set_copy_data_callback(smpi_comm_copy_buffer_callback);
--- /dev/null
+/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */\r
+\r
+/* This program is free software; you can redistribute it and/or modify it\r
+ * under the terms of the license (GNU LGPL) which comes with this package. */\r
+#include "private.hpp"\r
+\r
+#include "smpi_comm.hpp"\r
+#include "smpi_coll.hpp"\r
+#include "smpi_datatype.hpp"\r
+#include "smpi_info.hpp"\r
+#include "smpi_win.hpp"\r
+#include "smpi_request.hpp"\r
+\r
+//setup here, because we have templates in smpi_file we want to log\r
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_io, smpi, "Logging specific to SMPI (RMA operations)");\r
+\r
+#include "smpi_file.hpp"\r
+#include "smpi_status.hpp"\r
+#include "simgrid/plugins/file_system.h"\r
+\r
+#define FP_SIZE sizeof(MPI_Offset)\r
+\r
+\r
+namespace simgrid{\r
+namespace smpi{\r
+\r
+ File::File(MPI_Comm comm, char *filename, int amode, MPI_Info info): comm_(comm), flags_(amode), info_(info) {\r
+ file_= new simgrid::s4u::File(filename, nullptr);\r
+ list_=nullptr;\r
+ if (comm_->rank() == 0) {\r
+ int size= comm_->size() + FP_SIZE;\r
+ list_ = new char[size];\r
+ memset(list_, 0, size);\r
+ shared_file_pointer_ = new MPI_Offset[1];\r
+ shared_mutex_ = s4u::Mutex::create();\r
+ *shared_file_pointer_ = 0;\r
+ win_=new Win(list_, size, 1, MPI_INFO_NULL, comm_);\r
+ }else{\r
+ win_=new Win(list_, 0, 1, MPI_INFO_NULL, comm_);\r
+ }\r
+ simgrid::smpi::Colls::bcast(&shared_file_pointer_, 1, MPI_AINT, 0, comm);\r
+ simgrid::smpi::Colls::bcast(&shared_mutex_, 1, MPI_AINT, 0, comm);\r
+ if(comm_->rank() != 0)\r
+ intrusive_ptr_add_ref(&*shared_mutex_);\r
+ }\r
+\r
+ File::~File(){\r
+ delete file_;\r
+ }\r
+\r
+ int File::close(MPI_File *fh){\r
+ XBT_DEBUG("Closing MPI_File %s", (*fh)->file_->get_path());\r
+ (*fh)->sync();\r
+ if((*fh)->flags() & MPI_MODE_DELETE_ON_CLOSE)\r
+ (*fh)->file_->unlink();\r
+ delete (*fh);\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::del(char *filename, MPI_Info info){\r
+ //get the file with MPI_MODE_DELETE_ON_CLOSE and then close it\r
+ File* f = new File(MPI_COMM_SELF,filename,MPI_MODE_DELETE_ON_CLOSE|MPI_MODE_RDWR, nullptr);\r
+ close(&f);\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::get_position(MPI_Offset* offset){\r
+ *offset=file_->tell();\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::get_position_shared(MPI_Offset* offset){\r
+ shared_mutex_->lock();\r
+ *offset=*shared_file_pointer_;\r
+ shared_mutex_->unlock();\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::seek(MPI_Offset offset, int whence){\r
+ switch(whence){\r
+ case(MPI_SEEK_SET):\r
+ XBT_VERB("Seeking in MPI_File %s, setting offset %lld", file_->get_path(), offset);\r
+ file_->seek(offset,SEEK_SET);\r
+ break;\r
+ case(MPI_SEEK_CUR):\r
+ XBT_VERB("Seeking in MPI_File %s, current offset + %lld", file_->get_path(), offset);\r
+ file_->seek(offset,SEEK_CUR);\r
+ break;\r
+ case(MPI_SEEK_END):\r
+ XBT_VERB("Seeking in MPI_File %s, end offset + %lld", file_->get_path(), offset);\r
+ file_->seek(offset,SEEK_END);\r
+ break;\r
+ default:\r
+ return MPI_ERR_FILE;\r
+ }\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::seek_shared(MPI_Offset offset, int whence){\r
+ shared_mutex_->lock();\r
+ seek(offset,whence);\r
+ *shared_file_pointer_=offset;\r
+ shared_mutex_->unlock();\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::read(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
+ //get position first as we may be doing non contiguous reads and it will probably be updated badly\r
+ MPI_Offset position = fh->file_->tell();\r
+ MPI_Offset movesize = datatype->get_extent()*count;\r
+ MPI_Offset readsize = datatype->size()*count;\r
+ XBT_DEBUG("Position before read in MPI_File %s : %llu",fh->file_->get_path(),fh->file_->tell());\r
+ MPI_Offset read = fh->file_->read(readsize);\r
+ XBT_VERB("Read in MPI_File %s, %lld bytes read, readsize %lld bytes, movesize %lld", fh->file_->get_path(), read, readsize, movesize);\r
+ if(readsize!=movesize){\r
+ fh->file_->seek(position+movesize, SEEK_SET);\r
+ }\r
+ XBT_VERB("Position after read in MPI_File %s : %llu",fh->file_->get_path(), fh->file_->tell());\r
+ status->count=count*datatype->size();\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ /*Ordered and Shared Versions, with RMA-based locks : Based on the model described in :*/\r
+ /* @InProceedings{10.1007/11557265_15,*/\r
+ /* author="Latham, Robert and Ross, Robert and Thakur, Rajeev and Toonen, Brian",*/ \r
+ /* title="Implementing MPI-IO Shared File Pointers Without File System Support",*/\r
+ /* booktitle="Recent Advances in Parallel Virtual Machine and Message Passing Interface",*/\r
+ /* year="2005",*/\r
+ /* publisher="Springer Berlin Heidelberg",*/\r
+ /* address="Berlin, Heidelberg",*/\r
+ /* pages="84--93"*/\r
+ /* }*/\r
+ int File::read_shared(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
+ fh->shared_mutex_->lock();\r
+ fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET);\r
+ read(fh, buf, count, datatype, status);\r
+ *(fh->shared_file_pointer_)=fh->file_->tell();\r
+ fh->shared_mutex_->unlock();\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::read_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
+ //0 needs to get the shared pointer value\r
+ MPI_Offset val;\r
+ if(fh->comm_->rank()==0){\r
+ val=*(fh->shared_file_pointer_);\r
+ }else{\r
+ val=count*datatype->size();\r
+ }\r
+\r
+ MPI_Offset result;\r
+ simgrid::smpi::Colls::scan(&val, &result, 1, MPI_OFFSET, MPI_SUM, fh->comm_);\r
+ fh->seek(result, MPI_SEEK_SET);\r
+ int ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);\r
+ if(fh->comm_->rank()==fh->comm_->size()-1){\r
+ fh->shared_mutex_->lock();\r
+ *(fh->shared_file_pointer_)=fh->file_->tell();\r
+ fh->shared_mutex_->unlock();\r
+ }\r
+ char c;\r
+ simgrid::smpi::Colls::bcast(&c, 1, MPI_BYTE, fh->comm_->size()-1, fh->comm_);\r
+ return ret;\r
+ }\r
+\r
+ int File::write(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
+ //get position first as we may be doing non contiguous reads and it will probably be updated badly\r
+ MPI_Offset position = fh->file_->tell();\r
+ MPI_Offset movesize = datatype->get_extent()*count;\r
+ MPI_Offset writesize = datatype->size()*count;\r
+ XBT_DEBUG("Position before write in MPI_File %s : %llu",fh->file_->get_path(),fh->file_->tell());\r
+ MPI_Offset write = fh->file_->write(writesize);\r
+ XBT_VERB("Write in MPI_File %s, %lld bytes written, readsize %lld bytes, movesize %lld", fh->file_->get_path(), write, writesize, movesize);\r
+ if(writesize!=movesize){\r
+ fh->file_->seek(position+movesize, SEEK_SET);\r
+ }\r
+ XBT_VERB("Position after write in MPI_File %s : %llu",fh->file_->get_path(), fh->file_->tell());\r
+ status->count=count*datatype->size();\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::write_shared(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
+ fh->shared_mutex_->lock();\r
+ fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET);\r
+ write(fh, buf, count, datatype, status);\r
+ *(fh->shared_file_pointer_)=fh->file_->tell();\r
+ fh->shared_mutex_->unlock();\r
+ return MPI_SUCCESS;\r
+ }\r
+\r
+ int File::write_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
+ //0 needs to get the shared pointer value\r
+ MPI_Offset val;\r
+ if(fh->comm_->rank()==0){\r
+ val=*(fh->shared_file_pointer_);\r
+ }else{\r
+ val=count*datatype->size();\r
+ }\r
+ MPI_Offset result;\r
+ simgrid::smpi::Colls::scan(&val, &result, 1, MPI_OFFSET, MPI_SUM, fh->comm_);\r
+ fh->seek(result, MPI_SEEK_SET);\r
+ int ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);\r
+ if(fh->comm_->rank()==fh->comm_->size()-1){\r
+ fh->shared_mutex_->lock();\r
+ *(fh->shared_file_pointer_)=fh->file_->tell();\r
+ fh->shared_mutex_->unlock();\r
+ }\r
+ char c;\r
+ simgrid::smpi::Colls::bcast(&c, 1, MPI_BYTE, fh->comm_->size()-1, fh->comm_);\r
+ return ret;\r
+ }\r
+\r
+ int File::size(){\r
+ return file_->size();\r
+ }\r
+\r
+ int File::flags(){\r
+ return flags_;\r
+ }\r
+\r
+ int File::sync(){\r
+ //no idea\r
+ return simgrid::smpi::Colls::barrier(comm_);\r
+ }\r
+\r
+MPI_Info File::info(){\r
+ if(info_== MPI_INFO_NULL)\r
+ info_ = new Info();\r
+ info_->ref();\r
+ return info_;\r
+}\r
+\r
+void File::set_info(MPI_Info info){\r
+ if(info_!= MPI_INFO_NULL)\r
+ info->ref();\r
+ info_=info;\r
+}\r
+\r
+}\r
+}\r
include_directories(BEFORE "${CMAKE_HOME_DIRECTORY}/include/smpi")
foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoall coll-alltoallv coll-barrier coll-bcast
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
- type-hvector type-indexed type-struct type-vector bug-17132 timers privatization )
+ type-hvector type-indexed type-struct type-vector bug-17132 timers privatization
+ io-simple io-simple-at io-all io-shared io-ordered)
add_executable (${x} EXCLUDE_FROM_ALL ${x}/${x}.c)
target_link_libraries(${x} simgrid)
set_target_properties(${x} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${x})
foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoall coll-alltoallv coll-barrier coll-bcast
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
type-hvector type-indexed type-struct type-vector bug-17132 timers privatization
- macro-shared macro-partial-shared macro-partial-shared-communication)
+ macro-shared macro-partial-shared macro-partial-shared-communication
+ io-simple io-simple-at io-all io-shared io-ordered)
set(tesh_files ${tesh_files} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.tesh)
set(teshsuite_src ${teshsuite_src} ${CMAKE_CURRENT_SOURCE_DIR}/${x}/${x}.c)
endforeach()
foreach(x coll-allgather coll-allgatherv coll-allreduce coll-alltoall coll-alltoallv coll-barrier coll-bcast
coll-gather coll-reduce coll-reduce-scatter coll-scatter macro-sample pt2pt-dsend pt2pt-pingpong
- type-hvector type-indexed type-struct type-vector bug-17132 timers)
+ type-hvector type-indexed type-struct type-vector bug-17132 timers io-simple io-simple-at io-all io-shared io-ordered)
ADD_TESH_FACTORIES(tesh-smpi-${x} "thread;ucontext;raw;boost" --setenv platfdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/platforms --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/smpi/${x} --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/${x} ${x}.tesh)
endforeach()
--- /dev/null
+#include "mpi.h"\r
+#include <stdio.h>\r
+#include <stdlib.h>\r
+#include <string.h>\r
+#include <memory.h>\r
+\r
+/* Test reading and writing zero bytes (set status correctly) */\r
+\r
+int main( int argc, char *argv[] )\r
+{\r
+ int errs = 0;\r
+ int size, rank, i, *buf, count;\r
+ MPI_File fh;\r
+ MPI_Comm comm;\r
+ MPI_Status status;\r
+ \r
+ MPI_Init( &argc, &argv );\r
+ \r
+ comm = MPI_COMM_WORLD;\r
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
+ MPI_Comm_size( comm, &size );\r
+ MPI_Comm_rank( comm, &rank );\r
+ buf = (int *)malloc( 10+ size * sizeof(int) );\r
+ buf[0] = rank;\r
+ \r
+ /* Write to file */\r
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET ); \r
+ MPI_File_write_all( fh, buf, 1, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 1) {\r
+ errs++;\r
+ fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);\r
+ }\r
+ /* Write to file, overlapping */\r
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET ); \r
+ MPI_File_write_all( fh, buf, 10, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 10) {\r
+ errs++;\r
+ fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);\r
+ }\r
+ /* Read nothing (check status) */\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_read_all( fh, buf, 0, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 0) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);\r
+ }\r
+\r
+ /* Write nothing (check status) */\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_write_all( fh, buf, 0, MPI_INT, &status );\r
+ if (count != 0) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
+ }\r
+\r
+ MPI_Barrier( comm );\r
+\r
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );\r
+ for (i=0; i<size; i++) buf[i] = -1;\r
+ MPI_File_read_all( fh, buf, 10, MPI_INT, &status );\r
+ // if (buf[0] != rank) {\r
+ // errs++;\r
+ // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);\r
+ // }\r
+ \r
+ free( buf );\r
+ MPI_File_close( &fh );\r
+ \r
+ MPI_Finalize();\r
+ return errs;\r
+}
\ No newline at end of file
--- /dev/null
+# Test for MPI_File_read and MPI_File_write\r
+! output sort\r
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-all\r
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
+> [rank 0] -> bob\r
+> [rank 1] -> carl\r
+> [rank 2] -> bob\r
+> [rank 3] -> carl\r
+> [ 0.000282] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000358] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.000534] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
+> [ 0.000534] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.000737] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000737] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
+> [ 0.000737] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
+> [ 0.000737] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000737] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
+> [ 0.000737] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.000838] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000838] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+> [ 0.000838] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.000838] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000838] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.000838] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001040] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 39\r
+> [ 0.001040] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 26\r
+> [ 0.001040] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001040] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 13\r
+> [ 0.001041] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12\r
+> [ 0.001041] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 51\r
+> [ 0.001041] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12\r
+> [ 0.001041] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 25\r
+> [ 0.001141] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12\r
+> [ 0.001141] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+> [ 0.001141] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12\r
+> [ 0.001141] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 38\r
+> [ 0.001799] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001814] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.001849] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
+> [ 0.001849] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.002052] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.002052] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 26\r
+> [ 0.002052] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 39\r
+> [ 0.002052] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 13\r
+> [ 0.002052] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12\r
+> [ 0.002052] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 51\r
+> [ 0.002052] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12\r
+> [ 0.002052] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 25\r
+> [ 0.002153] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12\r
+> [ 0.002153] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 38\r
+> [ 0.002153] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12\r
+> [ 0.002153] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
--- /dev/null
+#include "mpi.h"\r
+#include <stdio.h>\r
+#include <stdlib.h>\r
+#include <string.h>\r
+#include <memory.h>\r
+\r
+/* Test reading and writing zero bytes (set status correctly) */\r
+\r
+int main( int argc, char *argv[] )\r
+{\r
+ int errs = 0;\r
+ int size, rank, i, *buf, count;\r
+ MPI_File fh;\r
+ MPI_Comm comm;\r
+ MPI_Status status;\r
+ \r
+ MPI_Init( &argc, &argv );\r
+ \r
+ comm = MPI_COMM_WORLD;\r
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
+ MPI_Comm_size( comm, &size );\r
+ MPI_Comm_rank( comm, &rank );\r
+ buf = (int *)malloc( 10+size * sizeof(int) );\r
+ buf[0] = rank;\r
+ \r
+ /* Write to file */\r
+ MPI_File_write_ordered( fh, buf, 10, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 10) {\r
+ errs++;\r
+ fprintf( stderr, "Wrong count (%d) on write-ordered\n", count );fflush(stderr);\r
+ }\r
+ MPI_Barrier( comm );\r
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
+ for (i=0; i<size; i++) buf[i] = -1;\r
+ MPI_File_read_ordered( fh, buf, 10, MPI_INT, &status );\r
+\r
+ MPI_Barrier(comm);\r
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
+\r
+ free( buf );\r
+ MPI_File_close( &fh );\r
+ \r
+ MPI_Finalize();\r
+ return errs;\r
+}
\ No newline at end of file
--- /dev/null
+# Test for MPI_File_read and MPI_File_write\r
+! output sort\r
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-ordered\r
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
+> [rank 0] -> bob\r
+> [rank 1] -> carl\r
+> [rank 2] -> bob\r
+> [rank 3] -> carl\r
+> [ 0.000282] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000635] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80\r
+> [ 0.000635] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40\r
+> [ 0.000635] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120\r
+> [ 0.000839] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40\r
+> [ 0.000839] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160\r
+> [ 0.000839] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40\r
+> [ 0.000839] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80\r
+> [ 0.000940] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40\r
+> [ 0.000940] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120\r
+> [ 0.000940] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40\r
+> [ 0.000940] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40\r
+> [ 0.001091] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001091] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001106] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001141] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001141] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001242] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80\r
+> [ 0.001242] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40\r
+> [ 0.001242] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120\r
+> [ 0.001446] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
+> [ 0.001446] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160\r
+> [ 0.001446] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
+> [ 0.001446] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80\r
+> [ 0.001548] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
+> [ 0.001548] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120\r
+> [ 0.001548] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
+> [ 0.001548] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40\r
+> [ 0.001699] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001714] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001749] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001749] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
--- /dev/null
+#include "mpi.h"\r
+#include <stdio.h>\r
+#include <stdlib.h>\r
+#include <string.h>\r
+#include <memory.h>\r
+\r
+/* Test reading and writing zero bytes (set status correctly) */\r
+\r
+int main( int argc, char *argv[] )\r
+{\r
+ int errs = 0;\r
+ int size, rank, i, *buf, count;\r
+ MPI_File fh;\r
+ MPI_Comm comm;\r
+ MPI_Status status;\r
+ \r
+ MPI_Init( &argc, &argv );\r
+ \r
+ comm = MPI_COMM_WORLD;\r
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
+ MPI_Comm_size( comm, &size );\r
+ MPI_Comm_rank( comm, &rank );\r
+ buf = (int *)malloc( 10+size * sizeof(int) );\r
+ buf[0] = rank;\r
+ \r
+ /* Write nothing (check status) */\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_write( fh, buf, 0, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 0) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
+ }\r
+ \r
+ MPI_Barrier( comm );\r
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
+ for (i=0; i<size; i++) buf[i] = -1;\r
+ MPI_File_read_ordered( fh, buf, 10, MPI_INT, &status );\r
+\r
+ MPI_Barrier(comm);\r
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
+\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_read_shared( fh, buf, 1, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 1) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on read shared\n", count );fflush(stderr);\r
+ }\r
+ \r
+ MPI_Barrier(comm);\r
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
+\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_write_shared( fh, buf, 1, MPI_INT, &status );\r
+ if (count != 1) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
+ }\r
+\r
+ free( buf );\r
+ MPI_File_close( &fh );\r
+ \r
+ MPI_Finalize();\r
+ return errs;\r
+}
\ No newline at end of file
--- /dev/null
+# Test for MPI_File_read and MPI_File_write\r
+! output sort\r
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-shared\r
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
+> [rank 0] -> bob\r
+> [rank 1] -> carl\r
+> [rank 2] -> bob\r
+> [rank 3] -> carl\r
+> [ 0.000584] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000584] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000599] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000634] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000634] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000735] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80\r
+> [ 0.000735] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40\r
+> [ 0.000735] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120\r
+> [ 0.000939] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
+> [ 0.000939] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160\r
+> [ 0.000939] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
+> [ 0.000939] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80\r
+> [ 0.001041] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
+> [ 0.001041] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120\r
+> [ 0.001041] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
+> [ 0.001041] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40\r
+> [ 0.001192] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001192] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001292] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.001292] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.001292] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001292] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001292] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001292] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001393] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.001393] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.001393] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.001393] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.001393] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
+> [ 0.001393] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.001393] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.001393] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+> [ 0.001443] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001443] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001544] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.001544] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.001544] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001544] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001544] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001544] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.001645] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.001645] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.001645] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.001645] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.001645] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
+> [ 0.001645] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.001645] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.001645] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
--- /dev/null
+#include "mpi.h"\r
+#include <stdio.h>\r
+#include <stdlib.h>\r
+#include <string.h>\r
+#include <memory.h>\r
+\r
+/* Test reading and writing zero bytes (set status correctly) */\r
+\r
+int main( int argc, char *argv[] )\r
+{\r
+ int errs = 0;\r
+ int size, rank, i, *buf, count;\r
+ MPI_File fh;\r
+ MPI_Comm comm;\r
+ MPI_Status status;\r
+ \r
+ MPI_Init( &argc, &argv );\r
+ \r
+ comm = MPI_COMM_WORLD;\r
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
+ MPI_Comm_size( comm, &size );\r
+ MPI_Comm_rank( comm, &rank );\r
+ buf = (int *)malloc( size * sizeof(int) );\r
+ buf[0] = rank;\r
+ \r
+ /* Write to file */\r
+ MPI_File_write_at( fh, sizeof(int)*rank, buf, 1, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 1) {\r
+ errs++;\r
+ fprintf( stderr, "Wrong count (%d) on write_at\n", count );fflush(stderr);\r
+ }\r
+\r
+ /* Read nothing (check status) */\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_read_at( fh, sizeof(int)*rank, buf, 0, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 0) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);\r
+ }\r
+\r
+ /* Write nothing (check status) */\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_write_at( fh, sizeof(int)*rank, buf, 0, MPI_INT, &status );\r
+ if (count != 0) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
+ }\r
+\r
+ MPI_Barrier( comm );\r
+\r
+ for (i=0; i<size; i++) buf[i] = -1;\r
+ MPI_File_read_at( fh, sizeof(int)*rank, buf, 1, MPI_INT, &status );\r
+ // if (buf[0] != rank) {\r
+ // errs++;\r
+ // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);\r
+ // }\r
+ \r
+ free( buf );\r
+ MPI_File_close( &fh );\r
+ \r
+ MPI_Finalize();\r
+ return errs;\r
+}
\ No newline at end of file
--- /dev/null
+# Test for MPI_File_read and MPI_File_write\r
+! output sort\r
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-simple-at\r
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
+> [rank 0] -> bob\r
+> [rank 1] -> carl\r
+> [rank 2] -> bob\r
+> [rank 3] -> carl\r
+> [ 0.000282] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000358] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.000383] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000383] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.000459] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000459] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+> [ 0.000534] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
+> [ 0.000534] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.000534] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000534] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
+> [ 0.000534] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000534] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
+> [ 0.000584] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000599] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.000634] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
+> [ 0.000634] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.000634] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.000634] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
+> [ 0.000634] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.000634] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
+> [ 0.000685] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.000685] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.000700] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.000700] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
--- /dev/null
+#include "mpi.h"\r
+#include <stdio.h>\r
+#include <stdlib.h>\r
+#include <string.h>\r
+#include <memory.h>\r
+\r
+/* Test reading and writing zero bytes (set status correctly) */\r
+\r
+int main( int argc, char *argv[] )\r
+{\r
+ int errs = 0;\r
+ int size, rank, i, *buf, count;\r
+ MPI_File fh;\r
+ MPI_Comm comm;\r
+ MPI_Status status;\r
+ \r
+ MPI_Init( &argc, &argv );\r
+ \r
+ comm = MPI_COMM_WORLD;\r
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
+ MPI_Comm_size( comm, &size );\r
+ MPI_Comm_rank( comm, &rank );\r
+ buf = (int *)malloc( size * sizeof(int) );\r
+ buf[0] = rank;\r
+ \r
+ /* Write to file */\r
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET ); \r
+ MPI_File_write( fh, buf, 1, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 1) {\r
+ errs++;\r
+ fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);\r
+ }\r
+\r
+ /* Read nothing (check status) */\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_read( fh, buf, 0, MPI_INT, &status );\r
+ MPI_Get_count( &status, MPI_INT, &count );\r
+ if (count != 0) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);\r
+ }\r
+\r
+ /* Write nothing (check status) */\r
+ memset( &status, 0xff, sizeof(MPI_Status) );\r
+ MPI_File_write( fh, buf, 0, MPI_INT, &status );\r
+ if (count != 0) {\r
+ errs++;\r
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
+ }\r
+\r
+ MPI_Barrier( comm );\r
+\r
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );\r
+ for (i=0; i<size; i++) buf[i] = -1;\r
+ MPI_File_read( fh, buf, 1, MPI_INT, &status );\r
+ // if (buf[0] != rank) {\r
+ // errs++;\r
+ // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);\r
+ // }\r
+ \r
+ free( buf );\r
+ MPI_File_close( &fh );\r
+ \r
+ MPI_Finalize();\r
+ return errs;\r
+}
\ No newline at end of file
--- /dev/null
+# Test for MPI_File_read and MPI_File_write\r
+! output sort\r
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-simple\r
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
+> [rank 0] -> bob\r
+> [rank 1] -> carl\r
+> [rank 2] -> bob\r
+> [rank 3] -> carl\r
+> [ 0.000282] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000358] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.000383] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000383] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.000459] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000459] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+> [ 0.000534] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
+> [ 0.000534] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.000534] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000534] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
+> [ 0.000534] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
+> [ 0.000534] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
+> [ 0.000584] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+> [ 0.000599] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
+> [ 0.000634] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
+> [ 0.000634] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
+> [ 0.000634] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.000634] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
+> [ 0.000634] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.000634] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
+> [ 0.000685] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.000685] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
+> [ 0.000700] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
+> [ 0.000700] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
src/smpi/bindings/smpi_pmpi.cpp
src/smpi/bindings/smpi_pmpi_coll.cpp
src/smpi/bindings/smpi_pmpi_comm.cpp
+ src/smpi/bindings/smpi_pmpi_file.cpp
src/smpi/bindings/smpi_pmpi_group.cpp
src/smpi/bindings/smpi_pmpi_info.cpp
src/smpi/bindings/smpi_pmpi_op.cpp
src/smpi/mpi/smpi_datatype.cpp
src/smpi/mpi/smpi_datatype_derived.cpp
src/smpi/mpi/smpi_f2c.cpp
+ src/smpi/mpi/smpi_file.cpp
src/smpi/mpi/smpi_group.cpp
src/smpi/mpi/smpi_info.cpp
src/smpi/mpi/smpi_keyvals.cpp
src/smpi/include/smpi_datatype_derived.hpp
src/smpi/include/smpi_datatype.hpp
src/smpi/include/smpi_f2c.hpp
+ src/smpi/include/smpi_file.hpp
src/smpi/include/smpi_group.hpp
src/smpi/include/smpi_host.hpp
src/smpi/include/smpi_info.hpp