A
lgorithmique
N
umérique
D
istribuée
Public GIT Repository
projects
/
simgrid.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'xbt_random' into 'master'
[simgrid.git]
/
src
/
smpi
/
colls
/
bcast
/
bcast-SMP-linear.cpp
diff --git
a/src/smpi/colls/bcast/bcast-SMP-linear.cpp
b/src/smpi/colls/bcast/bcast-SMP-linear.cpp
index
a63e905
..
ea576fe
100644
(file)
--- a/
src/smpi/colls/bcast/bcast-SMP-linear.cpp
+++ b/
src/smpi/colls/bcast/bcast-SMP-linear.cpp
@@
-1,4
+1,4
@@
-/* Copyright (c) 2013-201
7
. The SimGrid Team.
+/* Copyright (c) 2013-201
9
. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
@@
-9,15
+9,13
@@
int bcast_SMP_linear_segment_byte = 8192;
namespace simgrid{
namespace smpi{
int bcast_SMP_linear_segment_byte = 8192;
namespace simgrid{
namespace smpi{
-int
Coll_bcast_SMP_linear::bcast
(void *buf, int count,
-
MPI_Datatype datatype, int root,
-
MPI_Comm comm)
+int
bcast__SMP_linear
(void *buf, int count,
+ MPI_Datatype datatype, int root,
+ MPI_Comm comm)
{
int tag = COLL_TAG_BCAST;
MPI_Status status;
MPI_Request request;
{
int tag = COLL_TAG_BCAST;
MPI_Status status;
MPI_Request request;
- MPI_Request *request_array;
- MPI_Status *status_array;
int rank, size;
int i;
MPI_Aint extent;
int rank, size;
int i;
MPI_Aint extent;
@@
-33,8
+31,7
@@
int Coll_bcast_SMP_linear::bcast(void *buf, int count,
num_core = comm->get_intra_comm()->size();
}else{
//implementation buggy in this case
num_core = comm->get_intra_comm()->size();
}else{
//implementation buggy in this case
- return Coll_bcast_mpich::bcast( buf , count, datatype,
- root, comm);
+ return bcast__mpich(buf, count, datatype, root, comm);
}
int segment = bcast_SMP_linear_segment_byte / extent;
}
int segment = bcast_SMP_linear_segment_byte / extent;
@@
-54,7
+51,7
@@
int Coll_bcast_SMP_linear::bcast(void *buf, int count,
// call native when MPI communication size is too small
if (size <= num_core) {
XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast.");
// call native when MPI communication size is too small
if (size <= num_core) {
XBT_WARN("MPI_bcast_SMP_linear use default MPI_bcast.");
-
Coll_bcast_default::bcas
t(buf, count, datatype, root, comm);
+
bcast__defaul
t(buf, count, datatype, root, comm);
return MPI_SUCCESS;
}
// if root is not zero send to rank zero first
return MPI_SUCCESS;
}
// if root is not zero send to rank zero first
@@
-99,10
+96,8
@@
int Coll_bcast_SMP_linear::bcast(void *buf, int count,
}
// pipeline bcast
else {
}
// pipeline bcast
else {
- request_array =
- (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
- status_array =
- (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
+ MPI_Request* request_array = new MPI_Request[size + pipe_length];
+ MPI_Status* status_array = new MPI_Status[size + pipe_length];
// case ROOT of each SMP
if (rank % num_core == 0) {
// case ROOT of each SMP
if (rank % num_core == 0) {
@@
-164,8
+159,8
@@
int Coll_bcast_SMP_linear::bcast(void *buf, int count,
}
}
}
}
}
}
-
free(request_array)
;
-
free(status_array)
;
+
delete[] request_array
;
+
delete[] status_array
;
}
// when count is not divisible by block size, use default BCAST for the remainder
}
// when count is not divisible by block size, use default BCAST for the remainder