* under the terms of the license (GNU LGPL) which comes with this package. */
#include "simgrid/msg.h"
-#include "xbt/fifo.h"
+#include "xbt/dynar.h"
#include <math.h>
+
XBT_LOG_NEW_DEFAULT_CATEGORY(msg_pastry, "Messages specific for this msg example");
/* TODO: *
int routing_table[LEVELS_COUNT][LEVEL_SIZE];
int ready;
msg_comm_t comm_receive; // current communication to receive
- xbt_fifo_t pending_tasks;
+ xbt_dynar_t pending_tasks;
} s_node_t, *node_t;
typedef struct s_state {
typedef struct s_task_data {
e_task_type_t type; // type of task
- int sender_id; // id paramater (used by some types of tasks)
+ int sender_id; // id parameter (used by some types of tasks)
//int request_finger; // finger parameter (used by some types of tasks)
int answer_id; // answer (used by some types of tasks)
char answer_to[MAILBOX_NAME_SIZE]; // mailbox to send an answer to (if any)
/* Frees the memory used by a task and destroy it */
static void task_free(void* task)
{
- // TODO add a parameter data_free_function to MSG_task_create?
if(task != NULL){
s_task_data_t* data = (s_task_data_t*)MSG_task_get_data(task);
xbt_free(data->state);
//rare case
int dist = abs(node->id - dest);
- int i;
- for (i=l; i<LEVELS_COUNT; i++) {
+ for (int i=l; i<LEVELS_COUNT; i++) {
for (int j=0; j<LEVEL_SIZE; j++) {
res = node->routing_table[i][j];
if (res!=-1 && abs(res - dest)<dist)
}
}
- for (i=0; i<NEIGHBORHOOD_SIZE; i++) {
+ for (int i=0; i<NEIGHBORHOOD_SIZE; i++) {
res = node->neighborhood_set[i];
if (res!=-1 && shl(res, dest)>=l && abs(res - dest)<dist)
return res;
}
- for (i=0; i<NAMESPACE_SIZE; i++) {
+ for (int i=0; i<NAMESPACE_SIZE; i++) {
res = node->namespace_set[i];
if (res!=-1 && shl(res, dest)>=l && abs(res - dest)<dist)
return res;
/* Get the corresponding state of a node */
static state_t node_get_state(node_t node) {
- int i;
state_t state = xbt_new0(s_state_t,1);
state->id = node->id;
- for (i=0; i<NEIGHBORHOOD_SIZE; i++)
+ for (int i=0; i<NEIGHBORHOOD_SIZE; i++)
state->neighborhood_set[i] = node->neighborhood_set[i];
- for (i=0; i<LEVELS_COUNT; i++)
+ for (int i=0; i<LEVELS_COUNT; i++)
for (int j=0; j<LEVEL_SIZE; j++)
state->routing_table[i][j] = node->routing_table[i][j];
- for (i=0; i<NAMESPACE_SIZE; i++)
+ for (int i=0; i<NAMESPACE_SIZE; i++)
state->namespace_set[i] = node->namespace_set[i];
return state;
}
-/* Print the node id */
static void print_node_id(node_t node) {
XBT_INFO(" Id: %i '%08x' ", node->id, node->id);
}
-/* * Print the node neighborhood set */
static void print_node_neighborood_set(node_t node) {
XBT_INFO(" Neighborhood:");
for (int i=0; i<NEIGHBORHOOD_SIZE; i++)
XBT_INFO(" %08x", node->neighborhood_set[i]);
}
-/* Print the routing table */
static void print_node_routing_table(node_t node) {
XBT_INFO(" Routing table:");
for (int i=0; i<LEVELS_COUNT; i++){
XBT_INFO(" %08x ", node->routing_table[i][j]);
}
}
-
/* Print the node namespace set */
static void print_node_namespace_set(node_t node) {
XBT_INFO(" Namespace:");
// If the node is not ready keep the task for later
if (node->ready != 0 && !(type==TASK_JOIN_LAST_REPLY || type==TASK_JOIN_REPLY)) {
XBT_DEBUG("Task pending %i", type);
- xbt_fifo_push(node->pending_tasks, task);
+ xbt_dynar_push(node->pending_tasks, &task);
return;
}
switch (type) {
// if the node is ready, do all the pending tasks and send update to known nodes
if (node->ready==0) {
XBT_DEBUG("Node %i is ready!!!", node->id);
-
- while(xbt_fifo_size(node->pending_tasks))
- handle_task(node, xbt_fifo_pop(node->pending_tasks));
+ while(xbt_dynar_length(node->pending_tasks)){
+ msg_task_t task;
+ xbt_dynar_shift(node->pending_tasks, &task);
+ handle_task(node, task);
+ }
for (i=0; i<NAMESPACE_SIZE; i++) {
j = node->namespace_set[i];
task_free(task);
}
-/** \brief Initializes the current node as the first one of the system.
- * \param node the current node
- */
-static void create(node_t node){
- node->ready = 0;
- XBT_DEBUG("Create a new Pastry ring...");
-}
-
/* Join the ring */
static int join(node_t node){
task_data_t req_data = xbt_new0(s_task_data_t,1);
node.id = xbt_str_parse_int(argv[1], "Invalid ID: %s");
node.known_id = -1;
node.ready = -1;
- node.pending_tasks = xbt_fifo_new();
+ node.pending_tasks = xbt_dynar_new(sizeof(msg_task_t), NULL);
get_mailbox(node.id, node.mailbox);
XBT_DEBUG("New node with id %s (%08x)", node.mailbox, node.id);
-
- int i;
- for (i=0; i<LEVELS_COUNT; i++){
+
+ for (int i=0; i<LEVELS_COUNT; i++){
int d = domain(node.id, i);
for (int j=0; j<LEVEL_SIZE; j++)
node.routing_table[i][j] = (d==j) ? node.id : -1;
}
- for (i=0; i<NEIGHBORHOOD_SIZE; i++)
+ for (int i=0; i<NEIGHBORHOOD_SIZE; i++)
node.neighborhood_set[i] = -1;
- for (i=0; i<NAMESPACE_SIZE; i++)
+ for (int i=0; i<NAMESPACE_SIZE; i++)
node.namespace_set[i] = -1;
if (argc == 3) { // first ring
XBT_DEBUG("Hey! Let's create the system.");
deadline = xbt_str_parse_double(argv[2], "Invalid deadline: %s");
- create(&node);
+ node.ready = 0;
+ XBT_DEBUG("Create a new Pastry ring...");
join_success = 1;
- }
- else {
+ } else {
node.known_id = xbt_str_parse_int(argv[2], "Invalid known ID: %s");
double sleep_time = xbt_str_parse_double(argv[3], "Invalid sleep time: %s");
deadline = xbt_str_parse_double(argv[4], "Invalid deadline: %s");
XBT_DEBUG("Failed to receive a task. Nevermind.");
MSG_comm_destroy(node.comm_receive);
node.comm_receive = NULL;
- }
- else {
+ } else {
// the task was successfully received
MSG_comm_destroy(node.comm_receive);
node.comm_receive = NULL;
}
}
- xbt_free(node.pending_tasks);
+ xbt_dynar_free(&node.pending_tasks);
return 1;
}
* under the terms of the license (GNU LGPL) which comes with this package. */
#include "private.h"
-#include <stdio.h>
#include <xbt.h>
#include <xbt/replay.h>
#include <unordered_map>
//allocate a single buffer for all recv
void* smpi_get_tmp_recvbuffer(int size){
if (!smpi_process_get_replaying())
- return xbt_malloc(size);
+ return xbt_malloc(size);
if (recvbuffer_size<size){
recvbuffer=static_cast<char*>(xbt_realloc(recvbuffer,size));
recvbuffer_size=size;
/* Helper function */
static double parse_double(const char *string)
{
- double value;
char *endptr;
- value = strtod(string, &endptr);
+ double value = strtod(string, &endptr);
if (*endptr != '\0')
THROWF(unknown_error, 0, "%s is not a double", string);
return value;
static MPI_Datatype decode_datatype(const char *const action)
{
-// Declared datatypes,
switch(atoi(action)) {
case 0:
MPI_CURRENT_TYPE=MPI_DOUBLE;
return MPI_CURRENT_TYPE;
}
-
const char* encode_datatype(MPI_Datatype datatype, int* known)
{
//default type for output is set to MPI_BYTE
// MPI_DEFAULT_TYPE is not set for output, use directly MPI_BYTE
if(known!=nullptr)
*known=1;
- if (datatype==MPI_BYTE){
+ if (datatype==MPI_BYTE)
return "";
- }
if(datatype==MPI_DOUBLE)
return "0";
if(datatype==MPI_INT)
static void action_comm_size(const char *const *action)
{
- double clock = smpi_process_simulated_elapsed();
-
communicator_size = parse_double(action[2]);
- log_timed_action (action, clock);
+ log_timed_action (action, smpi_process_simulated_elapsed());
}
static void action_comm_split(const char *const *action)
{
- double clock = smpi_process_simulated_elapsed();
-
- log_timed_action (action, clock);
+ log_timed_action (action, smpi_process_simulated_elapsed());
}
static void action_comm_dup(const char *const *action)
{
- double clock = smpi_process_simulated_elapsed();
-
- log_timed_action (action, clock);
+ log_timed_action (action, smpi_process_simulated_elapsed());
}
static void action_compute(const char *const *action)
extra->dst = dst_traced;
extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE, nullptr);
TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
- if (!TRACE_smpi_view_internals()) {
+ if (!TRACE_smpi_view_internals())
TRACE_smpi_send(rank, rank, dst_traced, 0, size*smpi_datatype_size(MPI_CURRENT_TYPE));
- }
smpi_mpi_send(nullptr, size, MPI_CURRENT_TYPE, to , 0, MPI_COMM_WORLD);
int to = atoi(action[2]);
double size=parse_double(action[3]);
double clock = smpi_process_simulated_elapsed();
- MPI_Request request;
if(action[4])
MPI_CURRENT_TYPE=decode_datatype(action[4]);
extra->dst = dst_traced;
extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE, nullptr);
TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
- if (!TRACE_smpi_view_internals()) {
+ if (!TRACE_smpi_view_internals())
TRACE_smpi_send(rank, rank, dst_traced, 0, size*smpi_datatype_size(MPI_CURRENT_TYPE));
- }
- request = smpi_mpi_isend(nullptr, size, MPI_CURRENT_TYPE, to, 0,MPI_COMM_WORLD);
+ MPI_Request request = smpi_mpi_isend(nullptr, size, MPI_CURRENT_TYPE, to, 0,MPI_COMM_WORLD);
TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
request->send = 1;
int from = atoi(action[2]);
double size=parse_double(action[3]);
double clock = smpi_process_simulated_elapsed();
- MPI_Request request;
if(action[4])
MPI_CURRENT_TYPE=decode_datatype(action[4]);
size=status.count;
}
- request = smpi_mpi_irecv(nullptr, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD);
+ MPI_Request request = smpi_mpi_irecv(nullptr, size, MPI_CURRENT_TYPE, from, 0, MPI_COMM_WORLD);
TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
request->recv = 1;
CHECK_ACTION_PARAMS(action, 0, 0)
double clock = smpi_process_simulated_elapsed();
MPI_Status status;
- int flag = true;
MPI_Request request = get_reqq_self()->back();
get_reqq_self()->pop_back();
extra->type=TRACING_TEST;
TRACE_smpi_testing_in(rank, extra);
- flag = smpi_mpi_test(&request, &status);
+ int flag = smpi_mpi_test(&request, &status);
XBT_DEBUG("MPI_Test result: %d", flag);
/* push back request in vector to be caught by a subsequent wait. if the test did succeed, the request is now nullptr.*/
extra->type = TRACING_WAITALL;
extra->send_size=count_requests;
TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra);
- int* recvs_snd= xbt_new0(int,count_requests);
- int* recvs_rcv= xbt_new0(int,count_requests);
+ int recvs_snd[count_requests];
+ int recvs_rcv[count_requests];
unsigned int i=0;
for (auto req : *(get_reqq_self())){
if (req && req->recv){
if (recvs_snd[i]!=-100)
TRACE_smpi_recv(rank_traced, recvs_snd[i], recvs_rcv[i],0);
}
- xbt_free(recvs_rcv);
- xbt_free(recvs_snd);
TRACE_smpi_ptp_out(rank_traced, -1, -1, __FUNCTION__);
}
log_timed_action (action, clock);
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
int send_size = parse_double(action[2]);
int recv_size = parse_double(action[3]);
- MPI_Datatype MPI_CURRENT_TYPE2;
+ MPI_Datatype MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
if(action[4] && action[5]) {
MPI_CURRENT_TYPE=decode_datatype(action[4]);
}
else{
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
- MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
}
void *send = smpi_get_tmp_sendbuffer(send_size*comm_size* smpi_datatype_size(MPI_CURRENT_TYPE));
static void action_gather(const char *const *action) {
/* The structure of the gather action for the rank 0 (total 4 processes) is the following:
- 0 gather 68 68 0 0 0
-
- where:
- 1) 68 is the sendcounts
- 2) 68 is the recvcounts
- 3) 0 is the root node
- 4) 0 is the send datatype id, see decode_datatype()
- 5) 0 is the recv datatype id, see decode_datatype()
+ 0 gather 68 68 0 0 0
+ where:
+ 1) 68 is the sendcounts
+ 2) 68 is the recvcounts
+ 3) 0 is the root node
+ 4) 0 is the send datatype id, see decode_datatype()
+ 5) 0 is the recv datatype id, see decode_datatype()
*/
CHECK_ACTION_PARAMS(action, 2, 3)
double clock = smpi_process_simulated_elapsed();
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
int send_size = parse_double(action[2]);
int recv_size = parse_double(action[3]);
- MPI_Datatype MPI_CURRENT_TYPE2;
+ MPI_Datatype MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
if(action[4] && action[5]) {
MPI_CURRENT_TYPE=decode_datatype(action[5]);
MPI_CURRENT_TYPE2=decode_datatype(action[6]);
} else {
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
- MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
}
void *send = smpi_get_tmp_sendbuffer(send_size* smpi_datatype_size(MPI_CURRENT_TYPE));
void *recv = nullptr;
static void action_gatherv(const char *const *action) {
/* The structure of the gatherv action for the rank 0 (total 4 processes) is the following:
- 0 gather 68 68 10 10 10 0 0 0
-
- where:
- 1) 68 is the sendcount
- 2) 68 10 10 10 is the recvcounts
- 3) 0 is the root node
- 4) 0 is the send datatype id, see decode_datatype()
- 5) 0 is the recv datatype id, see decode_datatype()
+ 0 gather 68 68 10 10 10 0 0 0
+ where:
+ 1) 68 is the sendcount
+ 2) 68 10 10 10 is the recvcounts
+ 3) 0 is the root node
+ 4) 0 is the send datatype id, see decode_datatype()
+ 5) 0 is the recv datatype id, see decode_datatype()
*/
double clock = smpi_process_simulated_elapsed();
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
CHECK_ACTION_PARAMS(action, comm_size+1, 2)
int send_size = parse_double(action[2]);
- int *disps = xbt_new0(int, comm_size);
- int *recvcounts = xbt_new0(int, comm_size);
+ int disps[comm_size] = { 0 };
+ int recvcounts[comm_size];
int i=0,recv_sum=0;
- MPI_Datatype MPI_CURRENT_TYPE2;
+ MPI_Datatype MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
if(action[4+comm_size] && action[5+comm_size]) {
MPI_CURRENT_TYPE=decode_datatype(action[4+comm_size]);
MPI_CURRENT_TYPE2=decode_datatype(action[5+comm_size]);
} else {
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
- MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
}
void *send = smpi_get_tmp_sendbuffer(send_size* smpi_datatype_size(MPI_CURRENT_TYPE));
void *recv = nullptr;
for(i=0;i<comm_size;i++) {
recvcounts[i] = atoi(action[i+3]);
recv_sum=recv_sum+recvcounts[i];
- disps[i] = 0;
}
int root=atoi(action[3+comm_size]);
TRACE_smpi_collective_out(smpi_process_index(), -1, __FUNCTION__);
log_timed_action (action, clock);
- xbt_free(recvcounts);
- xbt_free(disps);
}
static void action_reducescatter(const char *const *action) {
/* The structure of the reducescatter action for the rank 0 (total 4 processes) is the following:
-0 reduceScatter 275427 275427 275427 204020 11346849 0
-
- where:
- 1) The first four values after the name of the action declare the recvcounts array
- 2) The value 11346849 is the amount of instructions
- 3) The last value corresponds to the datatype, see decode_datatype().
-
- We analyze a MPI_Reduce_scatter call to one MPI_Reduce and one MPI_Scatterv. */
+ 0 reduceScatter 275427 275427 275427 204020 11346849 0
+ where:
+ 1) The first four values after the name of the action declare the recvcounts array
+ 2) The value 11346849 is the amount of instructions
+ 3) The last value corresponds to the datatype, see decode_datatype().
+*/
double clock = smpi_process_simulated_elapsed();
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
CHECK_ACTION_PARAMS(action, comm_size+1, 1)
int comp_size = parse_double(action[2+comm_size]);
- int *recvcounts = xbt_new0(int, comm_size);
- int *disps = xbt_new0(int, comm_size);
- int i=0;
+ int recvcounts[comm_size];
int rank = smpi_process_index();
int size = 0;
if(action[3+comm_size])
else
MPI_CURRENT_TYPE= MPI_DEFAULT_TYPE;
- for(i=0;i<comm_size;i++) {
+ for(int i=0;i<comm_size;i++) {
recvcounts[i] = atoi(action[i+2]);
- disps[i] = 0;
size+=recvcounts[i];
}
extra->type = TRACING_REDUCE_SCATTER;
extra->send_size = 0;
extra->recvcounts= xbt_new(int, comm_size);
- for(i=0; i< comm_size; i++)//copy data to avoid bad free
+ for(int i=0; i< comm_size; i++)//copy data to avoid bad free
extra->recvcounts[i] = recvcounts[i];
extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE, nullptr);
extra->comp_size = comp_size;
void *sendbuf = smpi_get_tmp_sendbuffer(size* smpi_datatype_size(MPI_CURRENT_TYPE));
void *recvbuf = smpi_get_tmp_recvbuffer(size* smpi_datatype_size(MPI_CURRENT_TYPE));
-
- mpi_coll_reduce_scatter_fun(sendbuf, recvbuf, recvcounts, MPI_CURRENT_TYPE, MPI_OP_NULL, MPI_COMM_WORLD);
- smpi_execute_flops(comp_size);
+
+ mpi_coll_reduce_scatter_fun(sendbuf, recvbuf, recvcounts, MPI_CURRENT_TYPE, MPI_OP_NULL, MPI_COMM_WORLD);
+ smpi_execute_flops(comp_size);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
- xbt_free(recvcounts);
- xbt_free(disps);
log_timed_action (action, clock);
}
static void action_allgather(const char *const *action) {
/* The structure of the allgather action for the rank 0 (total 4 processes) is the following:
- 0 allGather 275427 275427
-
- where:
- 1) 275427 is the sendcount
- 2) 275427 is the recvcount
- 3) No more values mean that the datatype for sent and receive buffer is the default one, see decode_datatype(). */
+ 0 allGather 275427 275427
+ where:
+ 1) 275427 is the sendcount
+ 2) 275427 is the recvcount
+ 3) No more values mean that the datatype for sent and receive buffer is the default one, see decode_datatype().
+ */
double clock = smpi_process_simulated_elapsed();
CHECK_ACTION_PARAMS(action, 2, 2)
int sendcount=atoi(action[2]);
int recvcount=atoi(action[3]);
- MPI_Datatype MPI_CURRENT_TYPE2;
+ MPI_Datatype MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
if(action[4] && action[5]) {
MPI_CURRENT_TYPE = decode_datatype(action[4]);
MPI_CURRENT_TYPE2 = decode_datatype(action[5]);
} else {
MPI_CURRENT_TYPE = MPI_DEFAULT_TYPE;
- MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
}
void *sendbuf = smpi_get_tmp_sendbuffer(sendcount* smpi_datatype_size(MPI_CURRENT_TYPE));
void *recvbuf = smpi_get_tmp_recvbuffer(recvcount* smpi_datatype_size(MPI_CURRENT_TYPE2));
static void action_allgatherv(const char *const *action) {
/* The structure of the allgatherv action for the rank 0 (total 4 processes) is the following:
-0 allGatherV 275427 275427 275427 275427 204020
-
- where:
- 1) 275427 is the sendcount
- 2) The next four elements declare the recvcounts array
- 3) No more values mean that the datatype for sent and receive buffer
- is the default one, see decode_datatype(). */
+ 0 allGatherV 275427 275427 275427 275427 204020
+ where:
+ 1) 275427 is the sendcount
+ 2) The next four elements declare the recvcounts array
+ 3) No more values mean that the datatype for sent and receive buffer is the default one, see decode_datatype().
+ */
double clock = smpi_process_simulated_elapsed();
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
CHECK_ACTION_PARAMS(action, comm_size+1, 2)
- int i=0;
int sendcount=atoi(action[2]);
- int *recvcounts = xbt_new0(int, comm_size);
- int *disps = xbt_new0(int, comm_size);
- int recv_sum=0;
- MPI_Datatype MPI_CURRENT_TYPE2;
+ int recvcounts[comm_size];
+ int disps[comm_size] = { 0 };
+ int recv_sum=0;
+ MPI_Datatype MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
if(action[3+comm_size] && action[4+comm_size]) {
MPI_CURRENT_TYPE = decode_datatype(action[3+comm_size]);
MPI_CURRENT_TYPE2 = decode_datatype(action[4+comm_size]);
} else {
MPI_CURRENT_TYPE = MPI_DEFAULT_TYPE;
- MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
}
void *sendbuf = smpi_get_tmp_sendbuffer(sendcount* smpi_datatype_size(MPI_CURRENT_TYPE));
- for(i=0;i<comm_size;i++) {
+ for(int i=0;i<comm_size;i++) {
recvcounts[i] = atoi(action[i+3]);
recv_sum=recv_sum+recvcounts[i];
}
extra->type = TRACING_ALLGATHERV;
extra->send_size = sendcount;
extra->recvcounts= xbt_new(int, comm_size);
- for(i=0; i< comm_size; i++)//copy data to avoid bad free
+ for(int i=0; i< comm_size; i++)//copy data to avoid bad free
extra->recvcounts[i] = recvcounts[i];
extra->datatype1 = encode_datatype(MPI_CURRENT_TYPE, nullptr);
extra->datatype2 = encode_datatype(MPI_CURRENT_TYPE2, nullptr);
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
log_timed_action (action, clock);
- xbt_free(recvcounts);
- xbt_free(disps);
}
static void action_allToAllv(const char *const *action) {
/* The structure of the allToAllV action for the rank 0 (total 4 processes) is the following:
- 0 allToAllV 100 1 7 10 12 100 1 70 10 5
-
- where:
- 1) 100 is the size of the send buffer *sizeof(int),
- 2) 1 7 10 12 is the sendcounts array
- 3) 100*sizeof(int) is the size of the receiver buffer
- 4) 1 70 10 5 is the recvcounts array */
+ 0 allToAllV 100 1 7 10 12 100 1 70 10 5
+ where:
+ 1) 100 is the size of the send buffer *sizeof(int),
+ 2) 1 7 10 12 is the sendcounts array
+ 3) 100*sizeof(int) is the size of the receiver buffer
+ 4) 1 70 10 5 is the recvcounts array
+ */
double clock = smpi_process_simulated_elapsed();
int comm_size = smpi_comm_size(MPI_COMM_WORLD);
CHECK_ACTION_PARAMS(action, 2*comm_size+2, 2)
- int *sendcounts = xbt_new0(int, comm_size);
- int *recvcounts = xbt_new0(int, comm_size);
- int *senddisps = xbt_new0(int, comm_size);
- int *recvdisps = xbt_new0(int, comm_size);
+ int sendcounts[comm_size];
+ int recvcounts[comm_size];
+ int senddisps[comm_size] = { 0 };
+ int recvdisps[comm_size] = { 0 };
- MPI_Datatype MPI_CURRENT_TYPE2;
+ MPI_Datatype MPI_CURRENT_TYPE2 = MPI_DEFAULT_TYPE;
int send_buf_size=parse_double(action[2]);
int recv_buf_size=parse_double(action[3+comm_size]);
}
else{
MPI_CURRENT_TYPE=MPI_DEFAULT_TYPE;
- MPI_CURRENT_TYPE2=MPI_DEFAULT_TYPE;
}
void *sendbuf = smpi_get_tmp_sendbuffer(send_buf_size* smpi_datatype_size(MPI_CURRENT_TYPE));
TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
log_timed_action (action, clock);
- xbt_free(sendcounts);
- xbt_free(recvcounts);
- xbt_free(senddisps);
- xbt_free(recvdisps);
}
void smpi_replay_run(int *argc, char***argv){
xbt_replay_action_runner(*argc, *argv);
/* and now, finalize everything */
- double sim_time= 1.;
/* One active process will stop. Decrease the counter*/
XBT_DEBUG("There are %zu elements in reqq[*]", get_reqq_self()->size());
if (!get_reqq_self()->empty()){
i++;
}
smpi_mpi_waitall(count_requests, requests, status);
- active_processes--;
- } else {
- active_processes--;
}
+ active_processes--;
if(active_processes==0){
- /* Last process alive speaking */
- /* end the simulated timer */
- sim_time = smpi_process_simulated_elapsed();
- XBT_INFO("Simulation time %f", sim_time);
+ /* Last process alive speaking: end the simulated timer */
+ XBT_INFO("Simulation time %f", smpi_process_simulated_elapsed());
_xbt_replay_action_exit();
xbt_free(sendbuffer);
xbt_free(recvbuffer);