/* PMPI User level calls */
-int PMPI_Init(int *argc, char ***argv)
+int PMPI_Init(int*, char***)
{
xbt_assert(simgrid::s4u::Engine::is_initialized(),
"Your MPI program was not properly initialized. The easiest is to use smpirun to start it.");
return smpi_process()->info_env();
}
-void smpi_process_init(int *argc, char ***argv){
+void smpi_process_init(int*, char***)
+{
simgrid::smpi::ActorExt::init();
}
{
smpi_fortran_entry_point_type entry_point_fortran = (smpi_fortran_entry_point_type)dlsym(handle, "user_main_");
if (entry_point_fortran != nullptr) {
- return [entry_point_fortran](int argc, char** argv) {
+ return [entry_point_fortran](int, char**) {
entry_point_fortran();
return 0;
};
info_=info;
}
-MPI_Comm Comm::split_type(int type, int key, MPI_Info info)
+MPI_Comm Comm::split_type(int type, int /*key*/, MPI_Info)
{
if(type != MPI_COMM_TYPE_SHARED){
return MPI_COMM_NULL;
name_ = xbt_strdup(name);
}
-int Datatype::pack(void* inbuf, int incount, void* outbuf, int outcount, int* position,MPI_Comm comm){
+int Datatype::pack(void* inbuf, int incount, void* outbuf, int outcount, int* position, MPI_Comm)
+{
if (outcount - *position < incount*static_cast<int>(size_))
return MPI_ERR_OTHER;
Datatype::copy(inbuf, incount, this, static_cast<char*>(outbuf) + *position, outcount, MPI_CHAR);
return MPI_SUCCESS;
}
-int Datatype::unpack(void* inbuf, int insize, int* position, void* outbuf, int outcount,MPI_Comm comm){
+int Datatype::unpack(void* inbuf, int insize, int* position, void* outbuf, int outcount, MPI_Comm)
+{
if (outcount*static_cast<int>(size_)> insize)
return MPI_ERR_OTHER;
Datatype::copy(static_cast<char*>(inbuf) + *position, insize, MPI_CHAR, outbuf, outcount, this);
return MPI_SUCCESS;
}
- int File::del(char *filename, MPI_Info info){
+ int File::del(char* filename, MPI_Info)
+ {
//get the file with MPI_MODE_DELETE_ON_CLOSE and then close it
File* f = new File(MPI_COMM_SELF,filename,MPI_MODE_DELETE_ON_CLOSE|MPI_MODE_RDWR, nullptr);
close(&f);
return MPI_SUCCESS;
}
- int File::read(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ int File::read(MPI_File fh, void* /*buf*/, int count, MPI_Datatype datatype, MPI_Status* status)
+ {
//get position first as we may be doing non contiguous reads and it will probably be updated badly
MPI_Offset position = fh->file_->tell();
MPI_Offset movesize = datatype->get_extent()*count;
return ret;
}
- int File::write(MPI_File fh, void *buf, int count, MPI_Datatype datatype, MPI_Status *status){
+ int File::write(MPI_File fh, void* /*buf*/, int count, MPI_Datatype datatype, MPI_Status* status)
+ {
//get position first as we may be doing non contiguous reads and it will probably be updated badly
MPI_Offset position = fh->file_->tell();
MPI_Offset movesize = datatype->get_extent()*count;
return &attributes_;
};
-
-template <> int Keyval::call_deleter<Comm>(Comm* obj, smpi_key_elem elem, int keyval, void * value, int* flag){
+template <> int Keyval::call_deleter<Comm>(Comm* obj, smpi_key_elem elem, int keyval, void* value, int* /*flag*/)
+{
int ret = MPI_SUCCESS;
if(elem->delete_fn.comm_delete_fn!=MPI_NULL_DELETE_FN)
ret = elem->delete_fn.comm_delete_fn(obj, keyval, value, elem->extra_state);
return ret;
}
-template <> int Keyval::call_deleter<Win>(Win* obj, smpi_key_elem elem, int keyval, void * value, int* flag){
+template <> int Keyval::call_deleter<Win>(Win* obj, smpi_key_elem elem, int keyval, void* value, int* /*flag*/)
+{
int ret = MPI_SUCCESS;
if(elem->delete_fn.win_delete_fn!=MPI_NULL_DELETE_FN)
ret = elem->delete_fn.win_delete_fn(obj, keyval, value, elem->extra_state);
return ret;
}
-template <> int Keyval::call_deleter<Datatype>(Datatype* obj, smpi_key_elem elem, int keyval, void * value, int* flag){
+template <>
+int Keyval::call_deleter<Datatype>(Datatype* obj, smpi_key_elem elem, int keyval, void* value, int* /*flag*/)
+{
int ret = MPI_SUCCESS;
if(elem->delete_fn.type_delete_fn!=MPI_NULL_DELETE_FN)
ret = elem->delete_fn.type_delete_fn(obj, keyval, value, elem->extra_state);
/* reorder is ignored, don't know what would be the consequences of a dumb reordering but neither do I see the point of
* reordering*/
-Topo_Cart::Topo_Cart(MPI_Comm comm_old, int ndims, int dims[], int periods[], int reorder, MPI_Comm *comm_cart) : Topo_Cart(ndims) {
+Topo_Cart::Topo_Cart(MPI_Comm comm_old, int ndims, int dims[], int periods[], int /*reorder*/, MPI_Comm* comm_cart)
+ : Topo_Cart(ndims)
+{
MPI_Group newGroup;
MPI_Group oldGroup;
return res;
}
-int Topo_Cart::coords(int rank, int maxdims, int coords[]) {
+int Topo_Cart::coords(int rank, int /*maxdims*/, int coords[])
+{
int nnodes = nnodes_;
for (int i = 0; i< ndims_; i++ ) {
nnodes = nnodes /dims_[i];
cleanup_attr<Win>();
}
-int Win::attach (void *base, MPI_Aint size){
+int Win::attach(void* /*base*/, MPI_Aint size)
+{
if (not(base_ == MPI_BOTTOM || base_ == 0))
return MPI_ERR_ARG;
base_=0;//actually the address will be given in the RMA calls, as being the disp.
return MPI_SUCCESS;
}
-int Win::detach (void *base){
+int Win::detach(void* /*base*/)
+{
base_=MPI_BOTTOM;
size_=-1;
return MPI_SUCCESS;
return MPI_SUCCESS;
}
-int Win::get_accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, void *result_addr,
- int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp, int target_count,
- MPI_Datatype target_datatype, MPI_Op op, MPI_Request* request){
-
+int Win::get_accumulate(void* origin_addr, int origin_count, MPI_Datatype origin_datatype, void* result_addr,
+ int result_count, MPI_Datatype result_datatype, int target_rank, MPI_Aint target_disp,
+ int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Request*)
+{
//get sender pointer
MPI_Win send_win = connected_wins_[target_rank];
return MPI_SUCCESS;
}
-int Win::start(MPI_Group group, int assert){
+int Win::start(MPI_Group group, int /*assert*/)
+{
/* From MPI forum advices
The call to MPI_WIN_COMPLETE does not return until the put call has completed at the origin; and the target window
will be accessed by the put operation only after the call to MPI_WIN_START has matched a call to MPI_WIN_POST by
return MPI_SUCCESS;
}
-int Win::post(MPI_Group group, int assert){
+int Win::post(MPI_Group group, int /*assert*/)
+{
//let's make a synchronous send here
int i = 0;
int j = 0;
return MPI_SUCCESS;
}
-int Win::lock(int lock_type, int rank, int assert){
+int Win::lock(int lock_type, int rank, int /*assert*/)
+{
MPI_Win target_win = connected_wins_[rank];
if ((lock_type == MPI_LOCK_EXCLUSIVE && target_win->mode_ != MPI_LOCK_SHARED)|| target_win->mode_ == MPI_LOCK_EXCLUSIVE){