A
lgorithmique
N
umérique
D
istribuée
Public GIT Repository
projects
/
simgrid.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
reduce the use of simcall_process_sleep() -> this_actor::sleep_for()
[simgrid.git]
/
src
/
smpi
/
colls
/
reduce
/
reduce-arrival-pattern-aware.cpp
diff --git
a/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp
b/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp
index
277034f
..
1ff8bd4
100644
(file)
--- a/
src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp
+++ b/
src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp
@@
-1,10
+1,10
@@
-/* Copyright (c) 2013-201
4
. The SimGrid Team.
+/* Copyright (c) 2013-201
9
. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "../colls_private.h"
+#include "../colls_private.h
pp
"
//#include <star-reduction.c>
int reduce_arrival_pattern_aware_segment_size_in_byte = 8192;
//#include <star-reduction.c>
int reduce_arrival_pattern_aware_segment_size_in_byte = 8192;
@@
-16,9
+16,10
@@
int reduce_arrival_pattern_aware_segment_size_in_byte = 8192;
#ifndef MAX_NODE
#define MAX_NODE 1024
#endif
#ifndef MAX_NODE
#define MAX_NODE 1024
#endif
-
+namespace simgrid{
+namespace smpi{
/* Non-topology-specific pipelined linear-reduce function */
/* Non-topology-specific pipelined linear-reduce function */
-int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
+int Coll_reduce_arrival_pattern_aware::reduce(
const
void *buf, void *rbuf,
int count,
MPI_Datatype datatype,
MPI_Op op, int root,
int count,
MPI_Datatype datatype,
MPI_Op op, int root,
@@
-28,10
+29,6
@@
int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
int tag = -COLL_TAG_REDUCE;
MPI_Status status;
MPI_Request request;
int tag = -COLL_TAG_REDUCE;
MPI_Status status;
MPI_Request request;
- MPI_Request *send_request_array;
- MPI_Request *recv_request_array;
- MPI_Status *send_status_array;
- MPI_Status *recv_status_array;
MPI_Status temp_status_array[MAX_NODE];
MPI_Status temp_status_array[MAX_NODE];
@@
-61,7
+58,7
@@
int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
/* use for buffer offset for sending and receiving data = segment size in byte */
int increment = segment * extent;
/* use for buffer offset for sending and receiving data = segment size in byte */
int increment = segment * extent;
- /* if the input size is not divisible by segment size =>
+ /* if the input size is not divisible by segment size =>
the small remainder will be done with native implementation */
int remainder = count % segment;
the small remainder will be done with native implementation */
int remainder = count % segment;
@@
-71,8
+68,7
@@
int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
already_received[i] = 0;
}
already_received[i] = 0;
}
- char *tmp_buf;
- tmp_buf = (char *) smpi_get_tmp_sendbuffer(count * extent);
+ unsigned char* tmp_buf = smpi_get_tmp_sendbuffer(count * extent);
Request::sendrecv(buf, count, datatype, rank, tag, rbuf, count, datatype, rank,
tag, comm, &status);
Request::sendrecv(buf, count, datatype, rank, tag, rbuf, count, datatype, rank,
tag, comm, &status);
@@
-89,11
+85,10
@@
int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
for (i = 1; i < size; i++) {
if (already_received[i] == 0) {
for (i = 1; i < size; i++) {
if (already_received[i] == 0) {
- Request::iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
-
MPI_STATUSES_IGNORE
);
- simcall_process_sleep(0.0001);
+ Request::iprobe(i, MPI_ANY_TAG, comm, &flag_array[i],
MPI_STATUSES_IGNORE);
+
simgrid::s4u::this_actor::sleep_for(0.0001
);
+ }
}
}
- }
header_index = 0;
/* recv 1-byte message */
header_index = 0;
/* recv 1-byte message */
@@
-189,14
+184,10
@@
int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
else {
// printf("node %d start\n",rank);
else {
// printf("node %d start\n",rank);
- send_request_array =
- (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
- recv_request_array =
- (MPI_Request *) xbt_malloc((size + pipe_length) * sizeof(MPI_Request));
- send_status_array =
- (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
- recv_status_array =
- (MPI_Status *) xbt_malloc((size + pipe_length) * sizeof(MPI_Status));
+ MPI_Request* send_request_array = new MPI_Request[size + pipe_length];
+ MPI_Request* recv_request_array = new MPI_Request[size + pipe_length];
+ MPI_Status* send_status_array = new MPI_Status[size + pipe_length];
+ MPI_Status* recv_status_array = new MPI_Status[size + pipe_length];
if (rank == 0) {
sent_count = 0;
if (rank == 0) {
sent_count = 0;
@@
-318,13
+309,10
@@
int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
}
} /* non-root */
}
} /* non-root */
-
-
-
- free(send_request_array);
- free(recv_request_array);
- free(send_status_array);
- free(recv_status_array);
+ delete[] send_request_array;
+ delete[] recv_request_array;
+ delete[] send_status_array;
+ delete[] recv_status_array;
//printf("node %d done\n",rank);
} /* end pipeline */
//printf("node %d done\n",rank);
} /* end pipeline */
@@
-344,12
+332,13
@@
int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf,
/* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
/* when count is not divisible by block size, use default BCAST for the remainder */
if ((remainder != 0) && (count > segment)) {
- Coll_reduce_default::reduce((char *)buf + (pipe_length * increment),
- (char *)rbuf + (pipe_length * increment), remainder, datatype, op, root,
- comm);
+ Coll_reduce_default::reduce((char*)buf + (pipe_length * increment), (char*)rbuf + (pipe_length * increment),
+ remainder, datatype, op, root, comm);
}
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
}
smpi_free_tmp_buffer(tmp_buf);
return MPI_SUCCESS;
}
+}
+}