And fix my windows line endings mess.
-/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */\r
-\r
-/* This program is free software; you can redistribute it and/or modify it\r
- * under the terms of the license (GNU LGPL) which comes with this package. */\r
-\r
-#include "private.hpp"\r
-XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);\r
-\r
-#include "smpi_file.hpp"\r
-#include "smpi_datatype.hpp"\r
-\r
-\r
-int PMPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh){\r
- if (comm == MPI_COMM_NULL)\r
- return MPI_ERR_COMM;\r
- if (filename == nullptr)\r
- return MPI_ERR_FILE;\r
- if (amode < 0)\r
- return MPI_ERR_AMODE;\r
- smpi_bench_end();\r
- *fh = new simgrid::smpi::File(comm, filename, amode, info);\r
- smpi_bench_begin();\r
- if (((*fh)->size() == 0 && not (amode & MPI_MODE_CREATE)) ||\r
- ((*fh)->size() != 0 && (amode & MPI_MODE_EXCL))){\r
- delete fh;\r
- return MPI_ERR_AMODE;\r
- }\r
- if(amode & MPI_MODE_APPEND)\r
- (*fh)->seek(0,MPI_SEEK_END);\r
- return MPI_SUCCESS;\r
-}\r
-\r
-int PMPI_File_close(MPI_File *fh){\r
- if (fh==nullptr)\r
- return MPI_ERR_ARG;\r
- smpi_bench_end();\r
- int ret = simgrid::smpi::File::close(fh);\r
- *fh = MPI_FILE_NULL;\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-#define CHECK_FILE(fh) if(fh==MPI_FILE_NULL) return MPI_ERR_FILE;\r
-#define CHECK_BUFFER(buf, count) if (buf==nullptr && count > 0) return MPI_ERR_BUFFER;\r
-#define CHECK_COUNT(count) if (count < 0) return MPI_ERR_COUNT;\r
-#define CHECK_OFFSET(offset) if (offset < 0) return MPI_ERR_DISP;\r
-#define CHECK_DATATYPE(datatype, count) if (datatype == MPI_DATATYPE_NULL && count > 0) return MPI_ERR_TYPE;\r
-#define CHECK_STATUS(status) if (status == nullptr) return MPI_ERR_ARG;\r
-#define CHECK_FLAGS(fh) if (fh->flags() & MPI_MODE_SEQUENTIAL) return MPI_ERR_AMODE;\r
-\r
-#define PASS_ZEROCOUNT(count) if (count == 0) {\\r
-status->count=0;\\r
-return MPI_SUCCESS;\\r
-}\r
-\r
-int PMPI_File_seek(MPI_File fh, MPI_Offset offset, int whence){\r
+/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#include "private.hpp"
+XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(smpi_pmpi);
+
+#include "smpi_file.hpp"
+#include "smpi_datatype.hpp"
+
+
+int PMPI_File_open(MPI_Comm comm, char *filename, int amode, MPI_Info info, MPI_File *fh){
+ if (comm == MPI_COMM_NULL)
+ return MPI_ERR_COMM;
+ if (filename == nullptr)
+ return MPI_ERR_FILE;
+ if (amode < 0)
+ return MPI_ERR_AMODE;
+ smpi_bench_end();
+ *fh = new simgrid::smpi::File(comm, filename, amode, info);
+ smpi_bench_begin();
+ if (((*fh)->size() == 0 && not (amode & MPI_MODE_CREATE)) ||
+ ((*fh)->size() != 0 && (amode & MPI_MODE_EXCL))){
+ delete fh;
+ return MPI_ERR_AMODE;
+ }
+ if(amode & MPI_MODE_APPEND)
+ (*fh)->seek(0,MPI_SEEK_END);
+ return MPI_SUCCESS;
+}
+
+int PMPI_File_close(MPI_File *fh){
+ if (fh==nullptr)
+ return MPI_ERR_ARG;
+ smpi_bench_end();
+ int ret = simgrid::smpi::File::close(fh);
+ *fh = MPI_FILE_NULL;
+ smpi_bench_begin();
+ return ret;
+}
+#define CHECK_FILE(fh) if(fh==MPI_FILE_NULL) return MPI_ERR_FILE;
+#define CHECK_BUFFER(buf, count) if (buf==nullptr && count > 0) return MPI_ERR_BUFFER;
+#define CHECK_COUNT(count) if (count < 0) return MPI_ERR_COUNT;
+#define CHECK_OFFSET(offset) if (offset < 0) return MPI_ERR_DISP;
+#define CHECK_DATATYPE(datatype, count) if (datatype == MPI_DATATYPE_NULL && count > 0) return MPI_ERR_TYPE;
+#define CHECK_STATUS(status) if (status == nullptr) return MPI_ERR_ARG;
+#define CHECK_FLAGS(fh) if (fh->flags() & MPI_MODE_SEQUENTIAL) return MPI_ERR_AMODE;
+
+#define PASS_ZEROCOUNT(count) if (count == 0) {\
+status->count=0;\
+return MPI_SUCCESS;\
+}
+
+int PMPI_File_seek(MPI_File fh, MPI_Offset offset, int whence){
CHECK_FILE(fh);
- smpi_bench_end();\r
- int ret = fh->seek(offset,whence);\r
- smpi_bench_begin();\r
- return ret;\r
-\r
-}\r
-\r
-int PMPI_File_seek_shared(MPI_File fh, MPI_Offset offset, int whence){\r
- CHECK_FILE(fh)\r
- smpi_bench_end();\r
- int ret = fh->seek_shared(offset,whence);\r
- smpi_bench_begin();\r
- return ret;\r
-\r
-}\r
-\r
-int PMPI_File_get_position(MPI_File fh, MPI_Offset* offset){\r
- if (offset==nullptr)\r
- return MPI_ERR_DISP;\r
- smpi_bench_end();\r
- int ret = fh->get_position(offset);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_get_position_shared(MPI_File fh, MPI_Offset* offset){\r
- CHECK_FILE(fh)\r
- if (offset==nullptr)\r
- return MPI_ERR_DISP;\r
- smpi_bench_end();\r
- int ret = fh->get_position_shared(offset);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ smpi_bench_end();
+ int ret = fh->seek(offset,whence);
+ smpi_bench_begin();
+ return ret;
+
+}
+
+int PMPI_File_seek_shared(MPI_File fh, MPI_Offset offset, int whence){
CHECK_FILE(fh)
- CHECK_BUFFER(buf, count)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- PASS_ZEROCOUNT(count)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read", static_cast<double>(count*datatype->size())));\r
- int ret = simgrid::smpi::File::read(fh, buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
- CHECK_FILE(fh)\r
- CHECK_BUFFER(buf, count)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- PASS_ZEROCOUNT(count)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_shared", static_cast<double>(count*datatype->size())));\r
- int ret = simgrid::smpi::File::read_shared(fh, buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ smpi_bench_end();
+ int ret = fh->seek_shared(offset,whence);
+ smpi_bench_begin();
+ return ret;
+
+}
+
+int PMPI_File_get_position(MPI_File fh, MPI_Offset* offset){
+ if (offset==nullptr)
+ return MPI_ERR_DISP;
+ smpi_bench_end();
+ int ret = fh->get_position(offset);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_get_position_shared(MPI_File fh, MPI_Offset* offset){
CHECK_FILE(fh)
- CHECK_BUFFER(buf, count)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- PASS_ZEROCOUNT(count)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write", static_cast<double>(count*datatype->size())));\r
- int ret = simgrid::smpi::File::write(fh, buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_write_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
- CHECK_FILE(fh)\r
- CHECK_BUFFER(buf, count)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- PASS_ZEROCOUNT(count)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_shared", static_cast<double>(count*datatype->size())));\r
- int ret = simgrid::smpi::File::write_shared(fh, buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_read_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ if (offset==nullptr)
+ return MPI_ERR_DISP;
+ smpi_bench_end();
+ int ret = fh->get_position_shared(offset);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE(fh)
- CHECK_BUFFER(buf, count)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_all", static_cast<double>(count*datatype->size())));\r
- int ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
- CHECK_FILE(fh)\r
- CHECK_BUFFER(buf, count)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_ordered", static_cast<double>(count*datatype->size())));\r
- int ret = simgrid::smpi::File::read_ordered(fh, buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_write_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_BUFFER(buf, count)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ PASS_ZEROCOUNT(count)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read", static_cast<double>(count*datatype->size())));
+ int ret = simgrid::smpi::File::read(fh, buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE(fh)
- CHECK_BUFFER(buf, count)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_all", static_cast<double>(count*datatype->size())));\r
- int ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_write_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
- CHECK_FILE(fh)\r
- CHECK_BUFFER(buf, count)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_ordered", static_cast<double>(count*datatype->size())));\r
- int ret = simgrid::smpi::File::write_ordered(fh, buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_BUFFER(buf, count)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ PASS_ZEROCOUNT(count)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_shared", static_cast<double>(count*datatype->size())));
+ int ret = simgrid::smpi::File::read_shared(fh, buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE(fh)
- CHECK_BUFFER(buf, count)\r
- CHECK_OFFSET(offset)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- PASS_ZEROCOUNT(count);\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read", static_cast<double>(count*datatype->size())));\r
- int ret = fh->seek(offset,MPI_SEEK_SET);\r
- if(ret!=MPI_SUCCESS)\r
- return ret;\r
- ret = simgrid::smpi::File::read(fh, buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_BUFFER(buf, count)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ PASS_ZEROCOUNT(count)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write", static_cast<double>(count*datatype->size())));
+ int ret = simgrid::smpi::File::write(fh, buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_write_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE(fh)
- CHECK_BUFFER(buf, count)\r
- CHECK_OFFSET(offset)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_at_all", static_cast<double>(count*datatype->size())));\r
- int ret = fh->seek(offset,MPI_SEEK_SET);\r
- if(ret!=MPI_SUCCESS)\r
- return ret;\r
- ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_BUFFER(buf, count)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ PASS_ZEROCOUNT(count)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_shared", static_cast<double>(count*datatype->size())));
+ int ret = simgrid::smpi::File::write_shared(fh, buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_read_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE(fh)
- CHECK_BUFFER(buf, count)\r
- CHECK_OFFSET(offset)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- PASS_ZEROCOUNT(count);\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write", static_cast<double>(count*datatype->size())));\r
- int ret = fh->seek(offset,MPI_SEEK_SET);\r
- if(ret!=MPI_SUCCESS)\r
- return ret;\r
- ret = simgrid::smpi::File::write(fh, buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){\r
+ CHECK_BUFFER(buf, count)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_all", static_cast<double>(count*datatype->size())));
+ int ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
CHECK_FILE(fh)
- CHECK_BUFFER(buf, count)\r
- CHECK_OFFSET(offset)\r
- CHECK_COUNT(count)\r
- CHECK_DATATYPE(datatype, count)\r
- CHECK_STATUS(status)\r
- CHECK_FLAGS(fh)\r
- smpi_bench_end();\r
- int rank_traced = simgrid::s4u::this_actor::get_pid();\r
- TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_at_all", static_cast<double>(count*datatype->size())));\r
- int ret = fh->seek(offset,MPI_SEEK_SET);\r
- if(ret!=MPI_SUCCESS)\r
- return ret;\r
- ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);\r
- TRACE_smpi_comm_out(rank_traced);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_delete(char *filename, MPI_Info info){\r
- if (filename == nullptr)\r
- return MPI_ERR_FILE;\r
- smpi_bench_end();\r
- int ret = simgrid::smpi::File::del(filename, info);\r
- smpi_bench_begin();\r
- return ret;\r
-}\r
-\r
-int PMPI_File_get_info(MPI_File fh, MPI_Info* info)\r
-{\r
- CHECK_FILE(fh)\r
- *info = fh->info();\r
- return MPI_SUCCESS;\r
-}\r
-\r
-int PMPI_File_set_info(MPI_File fh, MPI_Info info)\r
-{\r
- CHECK_FILE(fh)\r
- fh->set_info(info);\r
- return MPI_SUCCESS;\r
+ CHECK_BUFFER(buf, count)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_ordered", static_cast<double>(count*datatype->size())));
+ int ret = simgrid::smpi::File::read_ordered(fh, buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_write_all(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_all", static_cast<double>(count*datatype->size())));
+ int ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_write_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_ordered", static_cast<double>(count*datatype->size())));
+ int ret = simgrid::smpi::File::write_ordered(fh, buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_read_at(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)
+ CHECK_OFFSET(offset)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ PASS_ZEROCOUNT(count);
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read", static_cast<double>(count*datatype->size())));
+ int ret = fh->seek(offset,MPI_SEEK_SET);
+ if(ret!=MPI_SUCCESS)
+ return ret;
+ ret = simgrid::smpi::File::read(fh, buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_read_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)
+ CHECK_OFFSET(offset)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - read_at_all", static_cast<double>(count*datatype->size())));
+ int ret = fh->seek(offset,MPI_SEEK_SET);
+ if(ret!=MPI_SUCCESS)
+ return ret;
+ ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_write_at(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)
+ CHECK_OFFSET(offset)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ PASS_ZEROCOUNT(count);
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write", static_cast<double>(count*datatype->size())));
+ int ret = fh->seek(offset,MPI_SEEK_SET);
+ if(ret!=MPI_SUCCESS)
+ return ret;
+ ret = simgrid::smpi::File::write(fh, buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_write_at_all(MPI_File fh, MPI_Offset offset, void *buf, int count,MPI_Datatype datatype, MPI_Status *status){
+ CHECK_FILE(fh)
+ CHECK_BUFFER(buf, count)
+ CHECK_OFFSET(offset)
+ CHECK_COUNT(count)
+ CHECK_DATATYPE(datatype, count)
+ CHECK_STATUS(status)
+ CHECK_FLAGS(fh)
+ smpi_bench_end();
+ int rank_traced = simgrid::s4u::this_actor::get_pid();
+ TRACE_smpi_comm_in(rank_traced, __func__, new simgrid::instr::CpuTIData("IO - write_at_all", static_cast<double>(count*datatype->size())));
+ int ret = fh->seek(offset,MPI_SEEK_SET);
+ if(ret!=MPI_SUCCESS)
+ return ret;
+ ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);
+ TRACE_smpi_comm_out(rank_traced);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_delete(char *filename, MPI_Info info){
+ if (filename == nullptr)
+ return MPI_ERR_FILE;
+ smpi_bench_end();
+ int ret = simgrid::smpi::File::del(filename, info);
+ smpi_bench_begin();
+ return ret;
+}
+
+int PMPI_File_get_info(MPI_File fh, MPI_Info* info)
+{
+ CHECK_FILE(fh)
+ *info = fh->info();
+ return MPI_SUCCESS;
+}
+
+int PMPI_File_set_info(MPI_File fh, MPI_Info info)
+{
+ CHECK_FILE(fh)
+ fh->set_info(info);
+ return MPI_SUCCESS;
}
\ No newline at end of file
-/* Copyright (c) 2010-2019. The SimGrid Team.\r
- * All rights reserved. */\r
-\r
-/* This program is free software; you can redistribute it and/or modify it\r
- * under the terms of the license (GNU LGPL) which comes with this package. */\r
-\r
-#ifndef SMPI_FILE_HPP_INCLUDED\r
-#define SMPI_FILE_HPP_INCLUDED\r
-#include "simgrid/plugins/file_system.h"\r
-#include "smpi_comm.hpp"\r
-#include "smpi_coll.hpp"\r
-#include "smpi_datatype.hpp"\r
-#include "smpi_info.hpp"\r
-#include <algorithm>\r
-\r
-\r
-namespace simgrid{\r
-namespace smpi{\r
-class File{\r
- MPI_Comm comm_;\r
- int flags_;\r
- simgrid::s4u::File* file_;\r
- MPI_Info info_;\r
- MPI_Offset* shared_file_pointer_;\r
- s4u::MutexPtr shared_mutex_;\r
- MPI_Win win_;\r
- char* list_;\r
- public:\r
- File(MPI_Comm comm, char *filename, int amode, MPI_Info info);\r
- ~File();\r
- int size();\r
- int get_position(MPI_Offset* offset);\r
- int get_position_shared(MPI_Offset* offset);\r
- int flags();\r
- int sync();\r
- int seek(MPI_Offset offset, int whence);\r
- int seek_shared(MPI_Offset offset, int whence);\r
- MPI_Info info();\r
- void set_info( MPI_Info info);\r
- static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
- static int read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
- static int read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
- static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
- static int write_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
- static int write_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
- template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)> int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status);\r
- static int close(MPI_File *fh);\r
- static int del(char *filename, MPI_Info info);\r
-};\r
-\r
- /* Read_all, Write_all : loosely based on */\r
- /* @article{Thakur:1996:ETM:245875.245879,*/\r
- /* author = {Thakur, Rajeev and Choudhary, Alok},*/\r
- /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/\r
- /* journal = {Sci. Program.},*/\r
- /* issue_date = {Winter 1996},*/\r
- /* pages = {301--317},*/\r
- /* }*/ \r
- template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)>\r
- int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
- //get min and max offsets from everyone.\r
- int size = comm_->size();\r
- int rank = comm_-> rank();\r
- MPI_Offset min_offset = file_->tell();\r
- MPI_Offset max_offset = (min_offset + count * datatype->size());//cheating, as we don't care about exact data location, we can skip extent\r
- MPI_Offset* min_offsets = xbt_new(MPI_Offset, size);\r
- MPI_Offset* max_offsets = xbt_new(MPI_Offset, size);\r
- simgrid::smpi::Colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);\r
- simgrid::smpi::Colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);\r
- MPI_Offset min=min_offset;\r
- MPI_Offset max=max_offset;\r
- MPI_Offset tot= 0;\r
- int empty=1;\r
- for(int i=0;i<size;i++){\r
- if(min_offsets[i]!=max_offsets[i])\r
- empty=0;\r
- tot+=(max_offsets[i]-min_offsets[i]);\r
- if(min_offsets[i]<min)\r
- min=min_offsets[i];\r
- if(max_offsets[i]>max)\r
- max=max_offsets[i];\r
- }\r
- \r
- XBT_DEBUG("my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, max);\r
- if(empty==1){\r
- status->count=0;\r
- return MPI_SUCCESS;\r
- }\r
- MPI_Offset total = max-min;\r
- if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){\r
- //contiguous. Just have each proc perform its read\r
- status->count=count * datatype->size();\r
- return T(this,buf,count,datatype, status);\r
- }\r
-\r
- //Interleaved case : How much do I need to read, and whom to send it ?\r
- MPI_Offset my_chunk_start=(max-min+1)/size*rank;\r
- MPI_Offset my_chunk_end=((max-min+1)/size*(rank+1));\r
- XBT_DEBUG("my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);\r
- int* send_sizes = xbt_new0(int, size);\r
- int* recv_sizes = xbt_new(int, size);\r
- int* send_disps = xbt_new(int, size);\r
- int* recv_disps = xbt_new(int, size);\r
- int total_sent=0;\r
- for(int i=0;i<size;i++){\r
- if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])||\r
- ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){\r
- send_sizes[i]=(std::min(max_offsets[i]-1, my_chunk_end-1)-std::max(min_offsets[i], my_chunk_start));\r
- //store min and max offest to actually read\r
- min_offset=std::min(min_offset, min_offsets[i]);\r
- send_disps[i]=0;//send_sizes[i]; cheat to avoid issues when send>recv as we use recv buffer\r
- total_sent+=send_sizes[i];\r
- XBT_DEBUG("will have to send %d bytes to %d", send_sizes[i], i);\r
- }\r
- }\r
- min_offset=std::max(min_offset, my_chunk_start);\r
-\r
- //merge the ranges of every process\r
- std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;\r
- for(int i=0; i<size; ++i)\r
- ranges.push_back(std::make_pair(min_offsets[i],max_offsets[i]));\r
- std::sort(ranges.begin(), ranges.end());\r
- std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;\r
- chunks.push_back(ranges[0]);\r
-\r
- unsigned int nchunks=0;\r
- unsigned int i=1;\r
- while(i < ranges.size()){\r
- if(ranges[i].second>chunks[nchunks].second){\r
- // else range included - ignore\r
- if(ranges[i].first>chunks[nchunks].second){\r
- //new disjoint range\r
- chunks.push_back(ranges[i]);\r
- nchunks++;\r
- } else {\r
- //merge ranges\r
- chunks[nchunks].second=ranges[i].second;\r
- }\r
- }\r
- i++;\r
- }\r
- //what do I need to read ?\r
- MPI_Offset totreads=0;\r
- for(i=0; i<chunks.size();i++){\r
- if(chunks[i].second < my_chunk_start)\r
- continue;\r
- else if (chunks[i].first > my_chunk_end)\r
- continue;\r
- else\r
- totreads += (std::min(chunks[i].second, my_chunk_end-1)-std::max(chunks[i].first, my_chunk_start));\r
- }\r
- XBT_DEBUG("will have to access %lld from my chunk", totreads);\r
-\r
- char* sendbuf= static_cast<char *>(smpi_get_tmp_sendbuffer(total_sent));\r
-\r
- if(totreads>0){\r
- seek(min_offset, MPI_SEEK_SET);\r
- T(this,sendbuf,totreads/datatype->size(),datatype, status);\r
- }\r
- simgrid::smpi::Colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);\r
- int total_recv=0;\r
- for(int i=0;i<size;i++){\r
- recv_disps[i]=total_recv;\r
- total_recv+=recv_sizes[i];\r
- }\r
- //Set buf value to avoid copying dumb data\r
- simgrid::smpi::Colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE,\r
- buf, recv_sizes, recv_disps, MPI_BYTE, comm_);\r
- status->count=count * datatype->size();\r
- smpi_free_tmp_buffer(sendbuf);\r
- xbt_free(send_sizes);\r
- xbt_free(recv_sizes);\r
- xbt_free(send_disps);\r
- xbt_free(recv_disps);\r
- xbt_free(min_offsets);\r
- xbt_free(max_offsets);\r
- return MPI_SUCCESS;\r
- }\r
-}\r
-}\r
-\r
-#endif\r
+/* Copyright (c) 2010-2019. The SimGrid Team.
+ * All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+
+#ifndef SMPI_FILE_HPP_INCLUDED
+#define SMPI_FILE_HPP_INCLUDED
+#include "simgrid/plugins/file_system.h"
+#include "smpi_comm.hpp"
+#include "smpi_coll.hpp"
+#include "smpi_datatype.hpp"
+#include "smpi_info.hpp"
+#include <algorithm>
+
+
+namespace simgrid{
+namespace smpi{
+class File{
+ MPI_Comm comm_;
+ int flags_;
+ simgrid::s4u::File* file_;
+ MPI_Info info_;
+ MPI_Offset* shared_file_pointer_;
+ s4u::MutexPtr shared_mutex_;
+ MPI_Win win_;
+ char* list_;
+ public:
+ File(MPI_Comm comm, char *filename, int amode, MPI_Info info);
+ ~File();
+ int size();
+ int get_position(MPI_Offset* offset);
+ int get_position_shared(MPI_Offset* offset);
+ int flags();
+ int sync();
+ int seek(MPI_Offset offset, int whence);
+ int seek_shared(MPI_Offset offset, int whence);
+ MPI_Info info();
+ void set_info( MPI_Info info);
+ static int read(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
+ static int read_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
+ static int read_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
+ static int write(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
+ static int write_shared(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
+ static int write_ordered(MPI_File fh, void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
+ template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)> int op_all(void *buf, int count,MPI_Datatype datatype, MPI_Status *status);
+ static int close(MPI_File *fh);
+ static int del(char *filename, MPI_Info info);
+};
+
+ /* Read_all, Write_all : loosely based on */
+ /* @article{Thakur:1996:ETM:245875.245879,*/
+ /* author = {Thakur, Rajeev and Choudhary, Alok},*/
+ /* title = {An Extended Two-phase Method for Accessing Sections of Out-of-core Arrays},*/
+ /* journal = {Sci. Program.},*/
+ /* issue_date = {Winter 1996},*/
+ /* pages = {301--317},*/
+ /* }*/
+ template <int (*T)(MPI_File, void *, int, MPI_Datatype, MPI_Status *)>
+ int File::op_all(void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ //get min and max offsets from everyone.
+ int size = comm_->size();
+ int rank = comm_-> rank();
+ MPI_Offset min_offset = file_->tell();
+ MPI_Offset max_offset = (min_offset + count * datatype->size());//cheating, as we don't care about exact data location, we can skip extent
+ MPI_Offset* min_offsets = xbt_new(MPI_Offset, size);
+ MPI_Offset* max_offsets = xbt_new(MPI_Offset, size);
+ simgrid::smpi::Colls::allgather(&min_offset, 1, MPI_OFFSET, min_offsets, 1, MPI_OFFSET, comm_);
+ simgrid::smpi::Colls::allgather(&max_offset, 1, MPI_OFFSET, max_offsets, 1, MPI_OFFSET, comm_);
+ MPI_Offset min=min_offset;
+ MPI_Offset max=max_offset;
+ MPI_Offset tot= 0;
+ int empty=1;
+ for(int i=0;i<size;i++){
+ if(min_offsets[i]!=max_offsets[i])
+ empty=0;
+ tot+=(max_offsets[i]-min_offsets[i]);
+ if(min_offsets[i]<min)
+ min=min_offsets[i];
+ if(max_offsets[i]>max)
+ max=max_offsets[i];
+ }
+
+ XBT_DEBUG("my offsets to read : %lld:%lld, global min and max %lld:%lld", min_offset, max_offset, min, max);
+ if(empty==1){
+ status->count=0;
+ return MPI_SUCCESS;
+ }
+ MPI_Offset total = max-min;
+ if(total==tot && (datatype->flags() & DT_FLAG_CONTIGUOUS)){
+ //contiguous. Just have each proc perform its read
+ status->count=count * datatype->size();
+ return T(this,buf,count,datatype, status);
+ }
+
+ //Interleaved case : How much do I need to read, and whom to send it ?
+ MPI_Offset my_chunk_start=(max-min+1)/size*rank;
+ MPI_Offset my_chunk_end=((max-min+1)/size*(rank+1));
+ XBT_DEBUG("my chunks to read : %lld:%lld", my_chunk_start, my_chunk_end);
+ int* send_sizes = xbt_new0(int, size);
+ int* recv_sizes = xbt_new(int, size);
+ int* send_disps = xbt_new(int, size);
+ int* recv_disps = xbt_new(int, size);
+ int total_sent=0;
+ for(int i=0;i<size;i++){
+ if((my_chunk_start>=min_offsets[i] && my_chunk_start < max_offsets[i])||
+ ((my_chunk_end<=max_offsets[i]) && my_chunk_end> min_offsets[i])){
+ send_sizes[i]=(std::min(max_offsets[i]-1, my_chunk_end-1)-std::max(min_offsets[i], my_chunk_start));
+ //store min and max offest to actually read
+ min_offset=std::min(min_offset, min_offsets[i]);
+ send_disps[i]=0;//send_sizes[i]; cheat to avoid issues when send>recv as we use recv buffer
+ total_sent+=send_sizes[i];
+ XBT_DEBUG("will have to send %d bytes to %d", send_sizes[i], i);
+ }
+ }
+ min_offset=std::max(min_offset, my_chunk_start);
+
+ //merge the ranges of every process
+ std::vector<std::pair<MPI_Offset, MPI_Offset>> ranges;
+ for(int i=0; i<size; ++i)
+ ranges.push_back(std::make_pair(min_offsets[i],max_offsets[i]));
+ std::sort(ranges.begin(), ranges.end());
+ std::vector<std::pair<MPI_Offset, MPI_Offset>> chunks;
+ chunks.push_back(ranges[0]);
+
+ unsigned int nchunks=0;
+ unsigned int i=1;
+ while(i < ranges.size()){
+ if(ranges[i].second>chunks[nchunks].second){
+ // else range included - ignore
+ if(ranges[i].first>chunks[nchunks].second){
+ //new disjoint range
+ chunks.push_back(ranges[i]);
+ nchunks++;
+ } else {
+ //merge ranges
+ chunks[nchunks].second=ranges[i].second;
+ }
+ }
+ i++;
+ }
+ //what do I need to read ?
+ MPI_Offset totreads=0;
+ for(i=0; i<chunks.size();i++){
+ if(chunks[i].second < my_chunk_start)
+ continue;
+ else if (chunks[i].first > my_chunk_end)
+ continue;
+ else
+ totreads += (std::min(chunks[i].second, my_chunk_end-1)-std::max(chunks[i].first, my_chunk_start));
+ }
+ XBT_DEBUG("will have to access %lld from my chunk", totreads);
+
+ char* sendbuf= static_cast<char *>(smpi_get_tmp_sendbuffer(total_sent));
+
+ if(totreads>0){
+ seek(min_offset, MPI_SEEK_SET);
+ T(this,sendbuf,totreads/datatype->size(),datatype, status);
+ }
+ simgrid::smpi::Colls::alltoall(send_sizes, 1, MPI_INT, recv_sizes, 1, MPI_INT, comm_);
+ int total_recv=0;
+ for(int i=0;i<size;i++){
+ recv_disps[i]=total_recv;
+ total_recv+=recv_sizes[i];
+ }
+ //Set buf value to avoid copying dumb data
+ simgrid::smpi::Colls::alltoallv(sendbuf, send_sizes, send_disps, MPI_BYTE,
+ buf, recv_sizes, recv_disps, MPI_BYTE, comm_);
+ status->count=count * datatype->size();
+ smpi_free_tmp_buffer(sendbuf);
+ xbt_free(send_sizes);
+ xbt_free(recv_sizes);
+ xbt_free(send_disps);
+ xbt_free(recv_disps);
+ xbt_free(min_offsets);
+ xbt_free(max_offsets);
+ return MPI_SUCCESS;
+ }
+}
+}
+
+#endif
-/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */\r
-\r
-/* This program is free software; you can redistribute it and/or modify it\r
- * under the terms of the license (GNU LGPL) which comes with this package. */\r
-#include "private.hpp"\r
-\r
-#include "smpi_comm.hpp"\r
-#include "smpi_coll.hpp"\r
-#include "smpi_datatype.hpp"\r
-#include "smpi_info.hpp"\r
-#include "smpi_win.hpp"\r
-#include "smpi_request.hpp"\r
-\r
-//setup here, because we have templates in smpi_file we want to log\r
-XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_io, smpi, "Logging specific to SMPI (RMA operations)");\r
-\r
-#include "smpi_file.hpp"\r
-#include "smpi_status.hpp"\r
-#include "simgrid/plugins/file_system.h"\r
-\r
-#define FP_SIZE sizeof(MPI_Offset)\r
-\r
-\r
-namespace simgrid{\r
-namespace smpi{\r
-\r
- File::File(MPI_Comm comm, char *filename, int amode, MPI_Info info): comm_(comm), flags_(amode), info_(info) {\r
- file_= new simgrid::s4u::File(filename, nullptr);\r
- list_=nullptr;\r
- if (comm_->rank() == 0) {\r
- int size= comm_->size() + FP_SIZE;\r
- list_ = new char[size];\r
- memset(list_, 0, size);\r
- shared_file_pointer_ = new MPI_Offset[1];\r
- shared_mutex_ = s4u::Mutex::create();\r
- *shared_file_pointer_ = 0;\r
- win_=new Win(list_, size, 1, MPI_INFO_NULL, comm_);\r
- }else{\r
- win_=new Win(list_, 0, 1, MPI_INFO_NULL, comm_);\r
- }\r
- simgrid::smpi::Colls::bcast(&shared_file_pointer_, 1, MPI_AINT, 0, comm);\r
- simgrid::smpi::Colls::bcast(&shared_mutex_, 1, MPI_AINT, 0, comm);\r
- if(comm_->rank() != 0)\r
- intrusive_ptr_add_ref(&*shared_mutex_);\r
- }\r
-\r
- File::~File(){\r
- delete file_;\r
- }\r
-\r
- int File::close(MPI_File *fh){\r
- XBT_DEBUG("Closing MPI_File %s", (*fh)->file_->get_path());\r
- (*fh)->sync();\r
- if((*fh)->flags() & MPI_MODE_DELETE_ON_CLOSE)\r
- (*fh)->file_->unlink();\r
- delete (*fh);\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::del(char *filename, MPI_Info info){\r
- //get the file with MPI_MODE_DELETE_ON_CLOSE and then close it\r
- File* f = new File(MPI_COMM_SELF,filename,MPI_MODE_DELETE_ON_CLOSE|MPI_MODE_RDWR, nullptr);\r
- close(&f);\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::get_position(MPI_Offset* offset){\r
- *offset=file_->tell();\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::get_position_shared(MPI_Offset* offset){\r
- shared_mutex_->lock();\r
- *offset=*shared_file_pointer_;\r
- shared_mutex_->unlock();\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::seek(MPI_Offset offset, int whence){\r
- switch(whence){\r
- case(MPI_SEEK_SET):\r
- XBT_VERB("Seeking in MPI_File %s, setting offset %lld", file_->get_path(), offset);\r
- file_->seek(offset,SEEK_SET);\r
- break;\r
- case(MPI_SEEK_CUR):\r
- XBT_VERB("Seeking in MPI_File %s, current offset + %lld", file_->get_path(), offset);\r
- file_->seek(offset,SEEK_CUR);\r
- break;\r
- case(MPI_SEEK_END):\r
- XBT_VERB("Seeking in MPI_File %s, end offset + %lld", file_->get_path(), offset);\r
- file_->seek(offset,SEEK_END);\r
- break;\r
- default:\r
- return MPI_ERR_FILE;\r
- }\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::seek_shared(MPI_Offset offset, int whence){\r
- shared_mutex_->lock();\r
- seek(offset,whence);\r
- *shared_file_pointer_=offset;\r
- shared_mutex_->unlock();\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::read(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
- //get position first as we may be doing non contiguous reads and it will probably be updated badly\r
- MPI_Offset position = fh->file_->tell();\r
- MPI_Offset movesize = datatype->get_extent()*count;\r
- MPI_Offset readsize = datatype->size()*count;\r
- XBT_DEBUG("Position before read in MPI_File %s : %llu",fh->file_->get_path(),fh->file_->tell());\r
- MPI_Offset read = fh->file_->read(readsize);\r
- XBT_VERB("Read in MPI_File %s, %lld bytes read, readsize %lld bytes, movesize %lld", fh->file_->get_path(), read, readsize, movesize);\r
- if(readsize!=movesize){\r
- fh->file_->seek(position+movesize, SEEK_SET);\r
- }\r
- XBT_VERB("Position after read in MPI_File %s : %llu",fh->file_->get_path(), fh->file_->tell());\r
- status->count=count*datatype->size();\r
- return MPI_SUCCESS;\r
- }\r
-\r
- /*Ordered and Shared Versions, with RMA-based locks : Based on the model described in :*/\r
- /* @InProceedings{10.1007/11557265_15,*/\r
- /* author="Latham, Robert and Ross, Robert and Thakur, Rajeev and Toonen, Brian",*/ \r
- /* title="Implementing MPI-IO Shared File Pointers Without File System Support",*/\r
- /* booktitle="Recent Advances in Parallel Virtual Machine and Message Passing Interface",*/\r
- /* year="2005",*/\r
- /* publisher="Springer Berlin Heidelberg",*/\r
- /* address="Berlin, Heidelberg",*/\r
- /* pages="84--93"*/\r
- /* }*/\r
- int File::read_shared(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
- fh->shared_mutex_->lock();\r
- fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET);\r
- read(fh, buf, count, datatype, status);\r
- *(fh->shared_file_pointer_)=fh->file_->tell();\r
- fh->shared_mutex_->unlock();\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::read_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
- //0 needs to get the shared pointer value\r
- MPI_Offset val;\r
- if(fh->comm_->rank()==0){\r
- val=*(fh->shared_file_pointer_);\r
- }else{\r
- val=count*datatype->size();\r
- }\r
-\r
- MPI_Offset result;\r
- simgrid::smpi::Colls::scan(&val, &result, 1, MPI_OFFSET, MPI_SUM, fh->comm_);\r
- fh->seek(result, MPI_SEEK_SET);\r
- int ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);\r
- if(fh->comm_->rank()==fh->comm_->size()-1){\r
- fh->shared_mutex_->lock();\r
- *(fh->shared_file_pointer_)=fh->file_->tell();\r
- fh->shared_mutex_->unlock();\r
- }\r
- char c;\r
- simgrid::smpi::Colls::bcast(&c, 1, MPI_BYTE, fh->comm_->size()-1, fh->comm_);\r
- return ret;\r
- }\r
-\r
- int File::write(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
- //get position first as we may be doing non contiguous reads and it will probably be updated badly\r
- MPI_Offset position = fh->file_->tell();\r
- MPI_Offset movesize = datatype->get_extent()*count;\r
- MPI_Offset writesize = datatype->size()*count;\r
- XBT_DEBUG("Position before write in MPI_File %s : %llu",fh->file_->get_path(),fh->file_->tell());\r
- MPI_Offset write = fh->file_->write(writesize);\r
- XBT_VERB("Write in MPI_File %s, %lld bytes written, readsize %lld bytes, movesize %lld", fh->file_->get_path(), write, writesize, movesize);\r
- if(writesize!=movesize){\r
- fh->file_->seek(position+movesize, SEEK_SET);\r
- }\r
- XBT_VERB("Position after write in MPI_File %s : %llu",fh->file_->get_path(), fh->file_->tell());\r
- status->count=count*datatype->size();\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::write_shared(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
- fh->shared_mutex_->lock();\r
- fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET);\r
- write(fh, buf, count, datatype, status);\r
- *(fh->shared_file_pointer_)=fh->file_->tell();\r
- fh->shared_mutex_->unlock();\r
- return MPI_SUCCESS;\r
- }\r
-\r
- int File::write_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){\r
- //0 needs to get the shared pointer value\r
- MPI_Offset val;\r
- if(fh->comm_->rank()==0){\r
- val=*(fh->shared_file_pointer_);\r
- }else{\r
- val=count*datatype->size();\r
- }\r
- MPI_Offset result;\r
- simgrid::smpi::Colls::scan(&val, &result, 1, MPI_OFFSET, MPI_SUM, fh->comm_);\r
- fh->seek(result, MPI_SEEK_SET);\r
- int ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);\r
- if(fh->comm_->rank()==fh->comm_->size()-1){\r
- fh->shared_mutex_->lock();\r
- *(fh->shared_file_pointer_)=fh->file_->tell();\r
- fh->shared_mutex_->unlock();\r
- }\r
- char c;\r
- simgrid::smpi::Colls::bcast(&c, 1, MPI_BYTE, fh->comm_->size()-1, fh->comm_);\r
- return ret;\r
- }\r
-\r
- int File::size(){\r
- return file_->size();\r
- }\r
-\r
- int File::flags(){\r
- return flags_;\r
- }\r
-\r
- int File::sync(){\r
- //no idea\r
- return simgrid::smpi::Colls::barrier(comm_);\r
- }\r
-\r
-MPI_Info File::info(){\r
- if(info_== MPI_INFO_NULL)\r
- info_ = new Info();\r
- info_->ref();\r
- return info_;\r
-}\r
-\r
-void File::set_info(MPI_Info info){\r
- if(info_!= MPI_INFO_NULL)\r
- info->ref();\r
- info_=info;\r
-}\r
-\r
-}\r
-}\r
+/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
+
+/* This program is free software; you can redistribute it and/or modify it
+ * under the terms of the license (GNU LGPL) which comes with this package. */
+#include "private.hpp"
+
+#include "smpi_comm.hpp"
+#include "smpi_coll.hpp"
+#include "smpi_datatype.hpp"
+#include "smpi_info.hpp"
+#include "smpi_win.hpp"
+#include "smpi_request.hpp"
+
+//setup here, because we have templates in smpi_file we want to log
+XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_io, smpi, "Logging specific to SMPI (RMA operations)");
+
+#include "smpi_file.hpp"
+#include "smpi_status.hpp"
+#include "simgrid/plugins/file_system.h"
+
+#define FP_SIZE sizeof(MPI_Offset)
+
+
+namespace simgrid{
+namespace smpi{
+
+ File::File(MPI_Comm comm, char *filename, int amode, MPI_Info info): comm_(comm), flags_(amode), info_(info) {
+ file_= new simgrid::s4u::File(filename, nullptr);
+ list_=nullptr;
+ if (comm_->rank() == 0) {
+ int size= comm_->size() + FP_SIZE;
+ list_ = new char[size];
+ memset(list_, 0, size);
+ shared_file_pointer_ = new MPI_Offset[1];
+ shared_mutex_ = s4u::Mutex::create();
+ *shared_file_pointer_ = 0;
+ win_=new Win(list_, size, 1, MPI_INFO_NULL, comm_);
+ }else{
+ win_=new Win(list_, 0, 1, MPI_INFO_NULL, comm_);
+ }
+ simgrid::smpi::Colls::bcast(&shared_file_pointer_, 1, MPI_AINT, 0, comm);
+ simgrid::smpi::Colls::bcast(&shared_mutex_, 1, MPI_AINT, 0, comm);
+ if(comm_->rank() != 0)
+ intrusive_ptr_add_ref(&*shared_mutex_);
+ }
+
+ File::~File(){
+ delete file_;
+ }
+
+ int File::close(MPI_File *fh){
+ XBT_DEBUG("Closing MPI_File %s", (*fh)->file_->get_path());
+ (*fh)->sync();
+ if((*fh)->flags() & MPI_MODE_DELETE_ON_CLOSE)
+ (*fh)->file_->unlink();
+ delete (*fh);
+ return MPI_SUCCESS;
+ }
+
+ int File::del(char *filename, MPI_Info info){
+ //get the file with MPI_MODE_DELETE_ON_CLOSE and then close it
+ File* f = new File(MPI_COMM_SELF,filename,MPI_MODE_DELETE_ON_CLOSE|MPI_MODE_RDWR, nullptr);
+ close(&f);
+ return MPI_SUCCESS;
+ }
+
+ int File::get_position(MPI_Offset* offset){
+ *offset=file_->tell();
+ return MPI_SUCCESS;
+ }
+
+ int File::get_position_shared(MPI_Offset* offset){
+ shared_mutex_->lock();
+ *offset=*shared_file_pointer_;
+ shared_mutex_->unlock();
+ return MPI_SUCCESS;
+ }
+
+ int File::seek(MPI_Offset offset, int whence){
+ switch(whence){
+ case(MPI_SEEK_SET):
+ XBT_VERB("Seeking in MPI_File %s, setting offset %lld", file_->get_path(), offset);
+ file_->seek(offset,SEEK_SET);
+ break;
+ case(MPI_SEEK_CUR):
+ XBT_VERB("Seeking in MPI_File %s, current offset + %lld", file_->get_path(), offset);
+ file_->seek(offset,SEEK_CUR);
+ break;
+ case(MPI_SEEK_END):
+ XBT_VERB("Seeking in MPI_File %s, end offset + %lld", file_->get_path(), offset);
+ file_->seek(offset,SEEK_END);
+ break;
+ default:
+ return MPI_ERR_FILE;
+ }
+ return MPI_SUCCESS;
+ }
+
+ int File::seek_shared(MPI_Offset offset, int whence){
+ shared_mutex_->lock();
+ seek(offset,whence);
+ *shared_file_pointer_=offset;
+ shared_mutex_->unlock();
+ return MPI_SUCCESS;
+ }
+
+ int File::read(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ //get position first as we may be doing non contiguous reads and it will probably be updated badly
+ MPI_Offset position = fh->file_->tell();
+ MPI_Offset movesize = datatype->get_extent()*count;
+ MPI_Offset readsize = datatype->size()*count;
+ XBT_DEBUG("Position before read in MPI_File %s : %llu",fh->file_->get_path(),fh->file_->tell());
+ MPI_Offset read = fh->file_->read(readsize);
+ XBT_VERB("Read in MPI_File %s, %lld bytes read, readsize %lld bytes, movesize %lld", fh->file_->get_path(), read, readsize, movesize);
+ if(readsize!=movesize){
+ fh->file_->seek(position+movesize, SEEK_SET);
+ }
+ XBT_VERB("Position after read in MPI_File %s : %llu",fh->file_->get_path(), fh->file_->tell());
+ status->count=count*datatype->size();
+ return MPI_SUCCESS;
+ }
+
+ /*Ordered and Shared Versions, with RMA-based locks : Based on the model described in :*/
+ /* @InProceedings{10.1007/11557265_15,*/
+ /* author="Latham, Robert and Ross, Robert and Thakur, Rajeev and Toonen, Brian",*/
+ /* title="Implementing MPI-IO Shared File Pointers Without File System Support",*/
+ /* booktitle="Recent Advances in Parallel Virtual Machine and Message Passing Interface",*/
+ /* year="2005",*/
+ /* publisher="Springer Berlin Heidelberg",*/
+ /* address="Berlin, Heidelberg",*/
+ /* pages="84--93"*/
+ /* }*/
+ int File::read_shared(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ fh->shared_mutex_->lock();
+ fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET);
+ read(fh, buf, count, datatype, status);
+ *(fh->shared_file_pointer_)=fh->file_->tell();
+ fh->shared_mutex_->unlock();
+ return MPI_SUCCESS;
+ }
+
+ int File::read_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ //0 needs to get the shared pointer value
+ MPI_Offset val;
+ if(fh->comm_->rank()==0){
+ val=*(fh->shared_file_pointer_);
+ }else{
+ val=count*datatype->size();
+ }
+
+ MPI_Offset result;
+ simgrid::smpi::Colls::scan(&val, &result, 1, MPI_OFFSET, MPI_SUM, fh->comm_);
+ fh->seek(result, MPI_SEEK_SET);
+ int ret = fh->op_all<simgrid::smpi::File::read>(buf, count, datatype, status);
+ if(fh->comm_->rank()==fh->comm_->size()-1){
+ fh->shared_mutex_->lock();
+ *(fh->shared_file_pointer_)=fh->file_->tell();
+ fh->shared_mutex_->unlock();
+ }
+ char c;
+ simgrid::smpi::Colls::bcast(&c, 1, MPI_BYTE, fh->comm_->size()-1, fh->comm_);
+ return ret;
+ }
+
+ int File::write(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ //get position first as we may be doing non contiguous reads and it will probably be updated badly
+ MPI_Offset position = fh->file_->tell();
+ MPI_Offset movesize = datatype->get_extent()*count;
+ MPI_Offset writesize = datatype->size()*count;
+ XBT_DEBUG("Position before write in MPI_File %s : %llu",fh->file_->get_path(),fh->file_->tell());
+ MPI_Offset write = fh->file_->write(writesize);
+ XBT_VERB("Write in MPI_File %s, %lld bytes written, readsize %lld bytes, movesize %lld", fh->file_->get_path(), write, writesize, movesize);
+ if(writesize!=movesize){
+ fh->file_->seek(position+movesize, SEEK_SET);
+ }
+ XBT_VERB("Position after write in MPI_File %s : %llu",fh->file_->get_path(), fh->file_->tell());
+ status->count=count*datatype->size();
+ return MPI_SUCCESS;
+ }
+
+ int File::write_shared(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ fh->shared_mutex_->lock();
+ fh->seek(*(fh->shared_file_pointer_),MPI_SEEK_SET);
+ write(fh, buf, count, datatype, status);
+ *(fh->shared_file_pointer_)=fh->file_->tell();
+ fh->shared_mutex_->unlock();
+ return MPI_SUCCESS;
+ }
+
+ int File::write_ordered(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ //0 needs to get the shared pointer value
+ MPI_Offset val;
+ if(fh->comm_->rank()==0){
+ val=*(fh->shared_file_pointer_);
+ }else{
+ val=count*datatype->size();
+ }
+ MPI_Offset result;
+ simgrid::smpi::Colls::scan(&val, &result, 1, MPI_OFFSET, MPI_SUM, fh->comm_);
+ fh->seek(result, MPI_SEEK_SET);
+ int ret = fh->op_all<simgrid::smpi::File::write>(buf, count, datatype, status);
+ if(fh->comm_->rank()==fh->comm_->size()-1){
+ fh->shared_mutex_->lock();
+ *(fh->shared_file_pointer_)=fh->file_->tell();
+ fh->shared_mutex_->unlock();
+ }
+ char c;
+ simgrid::smpi::Colls::bcast(&c, 1, MPI_BYTE, fh->comm_->size()-1, fh->comm_);
+ return ret;
+ }
+
+ int File::size(){
+ return file_->size();
+ }
+
+ int File::flags(){
+ return flags_;
+ }
+
+ int File::sync(){
+ //no idea
+ return simgrid::smpi::Colls::barrier(comm_);
+ }
+
+MPI_Info File::info(){
+ if(info_== MPI_INFO_NULL)
+ info_ = new Info();
+ info_->ref();
+ return info_;
+}
+
+void File::set_info(MPI_Info info){
+ if(info_!= MPI_INFO_NULL)
+ info->ref();
+ info_=info;
+}
+
+}
+}
-#include "mpi.h"\r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <string.h>\r
-#include <memory.h>\r
-\r
-/* Test reading and writing zero bytes (set status correctly) */\r
-\r
-int main( int argc, char *argv[] )\r
-{\r
- int errs = 0;\r
- int size, rank, i, *buf, count;\r
- MPI_File fh;\r
- MPI_Comm comm;\r
- MPI_Status status;\r
- \r
- MPI_Init( &argc, &argv );\r
- \r
- comm = MPI_COMM_WORLD;\r
- MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
- MPI_Comm_size( comm, &size );\r
- MPI_Comm_rank( comm, &rank );\r
- buf = (int *)malloc( 10 * sizeof(int) );\r
- buf[0] = rank;\r
- \r
- /* Write to file */\r
- MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET ); \r
- MPI_File_write_all( fh, buf, 1, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 1) {\r
- errs++;\r
- fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);\r
- }\r
- /* Write to file, overlapping */\r
- MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET ); \r
- MPI_File_write_all( fh, buf, 10, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 10) {\r
- errs++;\r
- fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);\r
- }\r
- /* Read nothing (check status) */\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_read_all( fh, buf, 0, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 0) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);\r
- }\r
-\r
- /* Write nothing (check status) */\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_write_all( fh, buf, 0, MPI_INT, &status );\r
- if (count != 0) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
- }\r
-\r
- MPI_Barrier( comm );\r
-\r
- MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );\r
- for (i=0; i<size; i++) buf[i] = -1;\r
- MPI_File_read_all( fh, buf, 10, MPI_INT, &status );\r
- // if (buf[0] != rank) {\r
- // errs++;\r
- // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);\r
- // }\r
- \r
- free( buf );\r
- MPI_File_close( &fh );\r
- \r
- MPI_Finalize();\r
- return errs;\r
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <memory.h>
+
+/* Test reading and writing zero bytes (set status correctly) */
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank, i, *buf, count;
+ MPI_File fh;
+ MPI_Comm comm;
+ MPI_Status status;
+
+ MPI_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ buf = (int *)malloc( 10 * sizeof(int) );
+ buf[0] = rank;
+
+ /* Write to file */
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );
+ MPI_File_write_all( fh, buf, 1, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 1) {
+ errs++;
+ fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);
+ }
+ /* Write to file, overlapping */
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );
+ MPI_File_write_all( fh, buf, 10, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 10) {
+ errs++;
+ fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);
+ }
+ /* Read nothing (check status) */
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_read_all( fh, buf, 0, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 0) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);
+ }
+
+ /* Write nothing (check status) */
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_write_all( fh, buf, 0, MPI_INT, &status );
+ if (count != 0) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);
+ }
+
+ MPI_Barrier( comm );
+
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );
+ for (i=0; i<size; i++) buf[i] = -1;
+ MPI_File_read_all( fh, buf, 10, MPI_INT, &status );
+ // if (buf[0] != rank) {
+ // errs++;
+ // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);
+ // }
+
+ free( buf );
+ MPI_File_close( &fh );
+
+ MPI_Finalize();
+ return errs;
}
\ No newline at end of file
-# Test for MPI_File_read and MPI_File_write\r
-! output sort\r
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-all\r
-> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
-> [rank 0] -> bob\r
-> [rank 1] -> carl\r
-> [rank 2] -> bob\r
-> [rank 3] -> carl\r
-> [ 0.000282] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000358] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.000534] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
-> [ 0.000534] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.000737] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000737] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
-> [ 0.000737] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
-> [ 0.000737] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000737] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
-> [ 0.000737] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.000838] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000838] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
-> [ 0.000838] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.000838] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000838] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.000838] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001040] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 39\r
-> [ 0.001040] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 26\r
-> [ 0.001040] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001040] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 13\r
-> [ 0.001041] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12\r
-> [ 0.001041] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 51\r
-> [ 0.001041] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12\r
-> [ 0.001041] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 25\r
-> [ 0.001141] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12\r
-> [ 0.001141] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
-> [ 0.001141] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12\r
-> [ 0.001141] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 38\r
-> [ 0.001799] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001814] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.001849] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
-> [ 0.001849] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.002052] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.002052] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 26\r
-> [ 0.002052] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 39\r
-> [ 0.002052] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 13\r
-> [ 0.002052] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12\r
-> [ 0.002052] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 51\r
-> [ 0.002052] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12\r
-> [ 0.002052] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 25\r
-> [ 0.002153] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12\r
-> [ 0.002153] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 38\r
-> [ 0.002153] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12\r
-> [ 0.002153] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+# Test for MPI_File_read and MPI_File_write
+! output sort
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-all
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...
+> [rank 0] -> bob
+> [rank 1] -> carl
+> [rank 2] -> bob
+> [rank 3] -> carl
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12
+> (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 39
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 26
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 13
+> (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12
+> (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 51
+> (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12
+> (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 25
+> (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12
+> (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12
+> (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes written, readsize 12 bytes, movesize 12
+> (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 38
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 26
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 39
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 13
+> (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12
+> (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 51
+> (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12
+> (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 25
+> (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12
+> (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 38
+> (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 12 bytes read, readsize 12 bytes, movesize 12
+> (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12
-#include "mpi.h"\r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <string.h>\r
-#include <memory.h>\r
-\r
-/* Test reading and writing zero bytes (set status correctly) */\r
-\r
-int main( int argc, char *argv[] )\r
-{\r
- int errs = 0;\r
- int size, rank, i, *buf, count;\r
- MPI_File fh;\r
- MPI_Comm comm;\r
- MPI_Status status;\r
- \r
- MPI_Init( &argc, &argv );\r
- \r
- comm = MPI_COMM_WORLD;\r
- MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
- MPI_Comm_size( comm, &size );\r
- MPI_Comm_rank( comm, &rank );\r
- buf = (int *)malloc( 10* sizeof(int) );\r
- buf[0] = rank;\r
- \r
- /* Write to file */\r
- MPI_File_write_ordered( fh, buf, 10, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 10) {\r
- errs++;\r
- fprintf( stderr, "Wrong count (%d) on write-ordered\n", count );fflush(stderr);\r
- }\r
- MPI_Barrier( comm );\r
- MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
- for (i=0; i<size; i++) buf[i] = -1;\r
- MPI_File_read_ordered( fh, buf, 10, MPI_INT, &status );\r
-\r
- MPI_Barrier(comm);\r
- MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
-\r
- free( buf );\r
- MPI_File_close( &fh );\r
- \r
- MPI_Finalize();\r
- return errs;\r
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <memory.h>
+
+/* Test reading and writing zero bytes (set status correctly) */
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank, i, *buf, count;
+ MPI_File fh;
+ MPI_Comm comm;
+ MPI_Status status;
+
+ MPI_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ buf = (int *)malloc( 10* sizeof(int) );
+ buf[0] = rank;
+
+ /* Write to file */
+ MPI_File_write_ordered( fh, buf, 10, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 10) {
+ errs++;
+ fprintf( stderr, "Wrong count (%d) on write-ordered\n", count );fflush(stderr);
+ }
+ MPI_Barrier( comm );
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
+ for (i=0; i<size; i++) buf[i] = -1;
+ MPI_File_read_ordered( fh, buf, 10, MPI_INT, &status );
+
+ MPI_Barrier(comm);
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
+
+ free( buf );
+ MPI_File_close( &fh );
+
+ MPI_Finalize();
+ return errs;
}
\ No newline at end of file
-# Test for MPI_File_read and MPI_File_write\r
-! output sort\r
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-ordered\r
-> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
-> [rank 0] -> bob\r
-> [rank 1] -> carl\r
-> [rank 2] -> bob\r
-> [rank 3] -> carl\r
-> [ 0.000282] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000635] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80\r
-> [ 0.000635] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40\r
-> [ 0.000635] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120\r
-> [ 0.000839] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40\r
-> [ 0.000839] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160\r
-> [ 0.000839] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40\r
-> [ 0.000839] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80\r
-> [ 0.000940] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40\r
-> [ 0.000940] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120\r
-> [ 0.000940] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40\r
-> [ 0.000940] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40\r
-> [ 0.001091] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001091] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001106] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001141] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001141] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001242] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80\r
-> [ 0.001242] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40\r
-> [ 0.001242] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120\r
-> [ 0.001446] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
-> [ 0.001446] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160\r
-> [ 0.001446] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
-> [ 0.001446] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80\r
-> [ 0.001548] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
-> [ 0.001548] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120\r
-> [ 0.001548] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
-> [ 0.001548] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40\r
-> [ 0.001699] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001714] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001749] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001749] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
+# Test for MPI_File_read and MPI_File_write
+! output sort
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-ordered
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...
+> [rank 0] -> bob
+> [rank 1] -> carl
+> [rank 2] -> bob
+> [rank 3] -> carl
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120
+> (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40
+> (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160
+> (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40
+> (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80
+> (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40
+> (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120
+> (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes written, readsize 40 bytes, movesize 40
+> (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120
+> (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40
+> (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160
+> (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40
+> (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80
+> (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40
+> (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120
+> (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40
+> (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
-#include "mpi.h"\r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <string.h>\r
-#include <memory.h>\r
-\r
-/* Test reading and writing zero bytes (set status correctly) */\r
-\r
-int main( int argc, char *argv[] )\r
-{\r
- int errs = 0;\r
- int size, rank, i, *buf, count;\r
- MPI_File fh;\r
- MPI_Comm comm;\r
- MPI_Status status;\r
- \r
- MPI_Init( &argc, &argv );\r
- \r
- comm = MPI_COMM_WORLD;\r
- MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
- MPI_Comm_size( comm, &size );\r
- MPI_Comm_rank( comm, &rank );\r
- buf = (int *)malloc( 10+size * sizeof(int) );\r
- buf[0] = rank;\r
- \r
- /* Write nothing (check status) */\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_write( fh, buf, 0, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 0) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
- }\r
- \r
- MPI_Barrier( comm );\r
- MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
- for (i=0; i<size; i++) buf[i] = -1;\r
- MPI_File_read_ordered( fh, buf, 10, MPI_INT, &status );\r
-\r
- MPI_Barrier(comm);\r
- MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
-\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_read_shared( fh, buf, 1, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 1) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on read shared\n", count );fflush(stderr);\r
- }\r
- \r
- MPI_Barrier(comm);\r
- MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );\r
-\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_write_shared( fh, buf, 1, MPI_INT, &status );\r
- if (count != 1) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
- }\r
-\r
- free( buf );\r
- MPI_File_close( &fh );\r
- \r
- MPI_Finalize();\r
- return errs;\r
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <memory.h>
+
+/* Test reading and writing zero bytes (set status correctly) */
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank, i, *buf, count;
+ MPI_File fh;
+ MPI_Comm comm;
+ MPI_Status status;
+
+ MPI_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ buf = (int *)malloc( 10+size * sizeof(int) );
+ buf[0] = rank;
+
+ /* Write nothing (check status) */
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_write( fh, buf, 0, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 0) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);
+ }
+
+ MPI_Barrier( comm );
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
+ for (i=0; i<size; i++) buf[i] = -1;
+ MPI_File_read_ordered( fh, buf, 10, MPI_INT, &status );
+
+ MPI_Barrier(comm);
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
+
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_read_shared( fh, buf, 1, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 1) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on read shared\n", count );fflush(stderr);
+ }
+
+ MPI_Barrier(comm);
+ MPI_File_seek_shared( fh, 0, MPI_SEEK_SET );
+
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_write_shared( fh, buf, 1, MPI_INT, &status );
+ if (count != 1) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);
+ }
+
+ free( buf );
+ MPI_File_close( &fh );
+
+ MPI_Finalize();
+ return errs;
}
\ No newline at end of file
-# Test for MPI_File_read and MPI_File_write\r
-! output sort\r
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-shared\r
-> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
-> [rank 0] -> bob\r
-> [rank 1] -> carl\r
-> [rank 2] -> bob\r
-> [rank 3] -> carl\r
-> [ 0.000584] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000584] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000599] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000634] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000634] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000735] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80\r
-> [ 0.000735] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40\r
-> [ 0.000735] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120\r
-> [ 0.000939] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
-> [ 0.000939] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160\r
-> [ 0.000939] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
-> [ 0.000939] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80\r
-> [ 0.001041] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
-> [ 0.001041] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120\r
-> [ 0.001041] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40\r
-> [ 0.001041] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40\r
-> [ 0.001192] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001192] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001292] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.001292] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.001292] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001292] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001292] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001292] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001393] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.001393] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.001393] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.001393] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.001393] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
-> [ 0.001393] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.001393] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.001393] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
-> [ 0.001443] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001443] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001544] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.001544] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.001544] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001544] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001544] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001544] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.001645] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.001645] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.001645] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.001645] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.001645] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
-> [ 0.001645] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.001645] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.001645] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+# Test for MPI_File_read and MPI_File_write
+! output sort
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-shared
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...
+> [rank 0] -> bob
+> [rank 1] -> carl
+> [rank 2] -> bob
+> [rank 3] -> carl
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 80
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 40
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 120
+> (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40
+> (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 160
+> (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40
+> (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 80
+> (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40
+> (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 120
+> (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 40 bytes read, readsize 40 bytes, movesize 40
+> (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 40
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12
-#include "mpi.h"\r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <string.h>\r
-#include <memory.h>\r
-\r
-/* Test reading and writing zero bytes (set status correctly) */\r
-\r
-int main( int argc, char *argv[] )\r
-{\r
- int errs = 0;\r
- int size, rank, i, *buf, count;\r
- MPI_File fh;\r
- MPI_Comm comm;\r
- MPI_Status status;\r
- \r
- MPI_Init( &argc, &argv );\r
- \r
- comm = MPI_COMM_WORLD;\r
- MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
- MPI_Comm_size( comm, &size );\r
- MPI_Comm_rank( comm, &rank );\r
- buf = (int *)malloc( size * sizeof(int) );\r
- buf[0] = rank;\r
- \r
- /* Write to file */\r
- MPI_File_write_at( fh, sizeof(int)*rank, buf, 1, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 1) {\r
- errs++;\r
- fprintf( stderr, "Wrong count (%d) on write_at\n", count );fflush(stderr);\r
- }\r
-\r
- /* Read nothing (check status) */\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_read_at( fh, sizeof(int)*rank, buf, 0, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 0) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);\r
- }\r
-\r
- /* Write nothing (check status) */\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_write_at( fh, sizeof(int)*rank, buf, 0, MPI_INT, &status );\r
- if (count != 0) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
- }\r
-\r
- MPI_Barrier( comm );\r
-\r
- for (i=0; i<size; i++) buf[i] = -1;\r
- MPI_File_read_at( fh, sizeof(int)*rank, buf, 1, MPI_INT, &status );\r
- // if (buf[0] != rank) {\r
- // errs++;\r
- // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);\r
- // }\r
- \r
- free( buf );\r
- MPI_File_close( &fh );\r
- \r
- MPI_Finalize();\r
- return errs;\r
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <memory.h>
+
+/* Test reading and writing zero bytes (set status correctly) */
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank, i, *buf, count;
+ MPI_File fh;
+ MPI_Comm comm;
+ MPI_Status status;
+
+ MPI_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ buf = (int *)malloc( size * sizeof(int) );
+ buf[0] = rank;
+
+ /* Write to file */
+ MPI_File_write_at( fh, sizeof(int)*rank, buf, 1, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 1) {
+ errs++;
+ fprintf( stderr, "Wrong count (%d) on write_at\n", count );fflush(stderr);
+ }
+
+ /* Read nothing (check status) */
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_read_at( fh, sizeof(int)*rank, buf, 0, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 0) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);
+ }
+
+ /* Write nothing (check status) */
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_write_at( fh, sizeof(int)*rank, buf, 0, MPI_INT, &status );
+ if (count != 0) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);
+ }
+
+ MPI_Barrier( comm );
+
+ for (i=0; i<size; i++) buf[i] = -1;
+ MPI_File_read_at( fh, sizeof(int)*rank, buf, 1, MPI_INT, &status );
+ // if (buf[0] != rank) {
+ // errs++;
+ // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);
+ // }
+
+ free( buf );
+ MPI_File_close( &fh );
+
+ MPI_Finalize();
+ return errs;
}
\ No newline at end of file
-# Test for MPI_File_read and MPI_File_write\r
-! output sort\r
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-simple-at\r
-> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
-> [rank 0] -> bob\r
-> [rank 1] -> carl\r
-> [rank 2] -> bob\r
-> [rank 3] -> carl\r
-> [ 0.000282] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000358] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.000383] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000383] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.000459] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000459] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
-> [ 0.000534] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
-> [ 0.000534] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.000534] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000534] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
-> [ 0.000534] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000534] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
-> [ 0.000584] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000599] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.000634] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
-> [ 0.000634] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.000634] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.000634] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
-> [ 0.000634] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.000634] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
-> [ 0.000685] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.000685] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.000700] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.000700] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+# Test for MPI_File_read and MPI_File_write
+! output sort
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-simple-at
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...
+> [rank 0] -> bob
+> [rank 1] -> carl
+> [rank 2] -> bob
+> [rank 3] -> carl
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16
+> (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16
+> (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8
+> (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12
-#include "mpi.h"\r
-#include <stdio.h>\r
-#include <stdlib.h>\r
-#include <string.h>\r
-#include <memory.h>\r
-\r
-/* Test reading and writing zero bytes (set status correctly) */\r
-\r
-int main( int argc, char *argv[] )\r
-{\r
- int errs = 0;\r
- int size, rank, i, *buf, count;\r
- MPI_File fh;\r
- MPI_Comm comm;\r
- MPI_Status status;\r
- \r
- MPI_Init( &argc, &argv );\r
- \r
- comm = MPI_COMM_WORLD;\r
- MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );\r
- MPI_Comm_size( comm, &size );\r
- MPI_Comm_rank( comm, &rank );\r
- buf = (int *)malloc( size * sizeof(int) );\r
- buf[0] = rank;\r
- \r
- /* Write to file */\r
- MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET ); \r
- MPI_File_write( fh, buf, 1, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 1) {\r
- errs++;\r
- fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);\r
- }\r
-\r
- /* Read nothing (check status) */\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_read( fh, buf, 0, MPI_INT, &status );\r
- MPI_Get_count( &status, MPI_INT, &count );\r
- if (count != 0) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);\r
- }\r
-\r
- /* Write nothing (check status) */\r
- memset( &status, 0xff, sizeof(MPI_Status) );\r
- MPI_File_write( fh, buf, 0, MPI_INT, &status );\r
- if (count != 0) {\r
- errs++;\r
- fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);\r
- }\r
-\r
- MPI_Barrier( comm );\r
-\r
- MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );\r
- for (i=0; i<size; i++) buf[i] = -1;\r
- MPI_File_read( fh, buf, 1, MPI_INT, &status );\r
- // if (buf[0] != rank) {\r
- // errs++;\r
- // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);\r
- // }\r
- \r
- free( buf );\r
- MPI_File_close( &fh );\r
- \r
- MPI_Finalize();\r
- return errs;\r
+#include "mpi.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <memory.h>
+
+/* Test reading and writing zero bytes (set status correctly) */
+
+int main( int argc, char *argv[] )
+{
+ int errs = 0;
+ int size, rank, i, *buf, count;
+ MPI_File fh;
+ MPI_Comm comm;
+ MPI_Status status;
+
+ MPI_Init( &argc, &argv );
+
+ comm = MPI_COMM_WORLD;
+ MPI_File_open( comm, (char*)"/scratch/lib/libsimgrid.so.3.6.2", MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh );
+ MPI_Comm_size( comm, &size );
+ MPI_Comm_rank( comm, &rank );
+ buf = (int *)malloc( size * sizeof(int) );
+ buf[0] = rank;
+
+ /* Write to file */
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );
+ MPI_File_write( fh, buf, 1, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 1) {
+ errs++;
+ fprintf( stderr, "Wrong count (%d) on write\n", count );fflush(stderr);
+ }
+
+ /* Read nothing (check status) */
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_read( fh, buf, 0, MPI_INT, &status );
+ MPI_Get_count( &status, MPI_INT, &count );
+ if (count != 0) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on read\n", count );fflush(stderr);
+ }
+
+ /* Write nothing (check status) */
+ memset( &status, 0xff, sizeof(MPI_Status) );
+ MPI_File_write( fh, buf, 0, MPI_INT, &status );
+ if (count != 0) {
+ errs++;
+ fprintf( stderr, "Count not zero (%d) on write\n", count );fflush(stderr);
+ }
+
+ MPI_Barrier( comm );
+
+ MPI_File_seek( fh, sizeof(int)*rank, MPI_SEEK_SET );
+ for (i=0; i<size; i++) buf[i] = -1;
+ MPI_File_read( fh, buf, 1, MPI_INT, &status );
+ // if (buf[0] != rank) {
+ // errs++;
+ // fprintf( stderr, "%d: buf = %d\n", rank, buf[0] );fflush(stderr);
+ // }
+
+ free( buf );
+ MPI_File_close( &fh );
+
+ MPI_Finalize();
+ return errs;
}
\ No newline at end of file
-# Test for MPI_File_read and MPI_File_write\r
-! output sort\r
-$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:[%10.6r]%e(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-simple\r
-> You requested to use 4 ranks, but there is only 2 processes in your hostfile...\r
-> [rank 0] -> bob\r
-> [rank 1] -> carl\r
-> [rank 2] -> bob\r
-> [rank 3] -> carl\r
-> [ 0.000282] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000358] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.000383] (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000383] (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.000459] (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000459] (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
-> [ 0.000534] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
-> [ 0.000534] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.000534] (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000534] (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
-> [ 0.000534] (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4\r
-> [ 0.000534] (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
-> [ 0.000584] (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0\r
-> [ 0.000599] (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8\r
-> [ 0.000634] (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12\r
-> [ 0.000634] (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4\r
-> [ 0.000634] (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.000634] (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16\r
-> [ 0.000634] (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.000634] (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8\r
-> [ 0.000685] (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.000685] (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4\r
-> [ 0.000700] (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4\r
-> [ 0.000700] (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12\r
+# Test for MPI_File_read and MPI_File_write
+! output sort
+$ ${bindir:=.}/../../../smpi_script/bin/smpirun -map -hostfile ../hostfile_io -platform ../../../examples/platforms/storage/remote_io.xml -np 4 --log=xbt_cfg.thres:critical --log=smpi_kernel.thres:warning --log=smpi_mpi.thres:error --log=smpi_io.thres:verbose "--log=root.fmt:(%P@%h)%e%m%n" --cfg=smpi/simulate-computation:0 ${bindir:=.}/io-simple
+> You requested to use 4 ranks, but there is only 2 processes in your hostfile...
+> [rank 0] -> bob
+> [rank 1] -> carl
+> [rank 2] -> bob
+> [rank 3] -> carl
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (0@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (0@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (2@bob) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (2@bob) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (3@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (3@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16
+> (1@carl) Write in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes written, readsize 4 bytes, movesize 4
+> (1@carl) Position after write in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8
+> (0@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 0
+> (2@bob) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 8
+> (3@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 12
+> (1@carl) Seeking in MPI_File /scratch/lib/libsimgrid.so.3.6.2, setting offset 4
+> (3@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (3@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 16
+> (1@carl) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (1@carl) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 8
+> (0@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (0@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 4
+> (2@bob) Read in MPI_File /scratch/lib/libsimgrid.so.3.6.2, 4 bytes read, readsize 4 bytes, movesize 4
+> (2@bob) Position after read in MPI_File /scratch/lib/libsimgrid.so.3.6.2 : 12