{
tracker_answer_t a = (tracker_answer_t)data;
xbt_dynar_free(&a->peers);
- free(a);
+ xbt_free(a);
}
static int is_in_list(const_xbt_dynar_t peers, int id)
static void broadcaster_destroy(broadcaster_t bc)
{
- free(bc->pending_sends);
- free(bc->mailboxes);
- free(bc);
+ xbt_free(bc->pending_sends);
+ xbt_free(bc->mailboxes);
+ xbt_free(bc);
}
/** Emitter function */
p->total_pieces = msg->num_pieces;
XBT_DEBUG("Peer %s got a 'BUILD_CHAIN' message (prev: %s / next: %s)", sg_mailbox_get_name(p->me),
p->prev ? sg_mailbox_get_name(p->prev) : NULL, p->next ? sg_mailbox_get_name(p->next) : NULL);
- free(msg);
+ xbt_free(msg);
}
static void peer_forward_file(peer_t p)
static void peer_delete(peer_t p)
{
- free(p->pending_recvs);
- free(p->pending_sends);
+ xbt_free(p->pending_recvs);
+ xbt_free(p->pending_sends);
- free(p);
+ xbt_free(p);
}
void peer(int argc, char* argv[])
double* payload = (double*)sg_mailbox_get(mailbox);
if (*payload == FINALIZE) {
- free(payload); /* - Exit if 'finalize' is received */
+ xbt_free(payload); /* - Exit if 'finalize' is received */
break;
}
sg_actor_execute(*payload); /* - Otherwise, process the received number of flops*/
- free(payload);
+ xbt_free(payload);
}
XBT_INFO("I'm done. See you!");
}
XBT_INFO("%s received from mailbox(%s)", pr_name, mbox_name);
if (*payload == FINALIZE) {
- free(payload);
+ xbt_free(payload);
break;
}
sg_actor_execute(*payload);
XBT_INFO("%s executed", pr_name);
- free(payload);
+ xbt_free(payload);
}
}
}
XBT_INFO("# Goodbye now!");
- free(vms);
+ xbt_free(vms);
}
int main(int argc, char* argv[])
for (int i = 0; i < 2; i++)
worker_pms[i] = pms[i + 1];
- free(pms);
+ xbt_free(pms);
sg_actor_t actor = sg_actor_init("master", master_pm);
sg_actor_set_data(actor, worker_pms);
simgrid_run();
XBT_INFO("Bye (simulation time %g)", simgrid_get_clock());
- free(worker_pms);
+ xbt_free(worker_pms);
return 0;
}
XBT_INFO("%s:%s to %s:%s => %g sec", sg_host_get_name(priv->tx_host), sg_actor_get_name(priv->tx_proc), host_name,
pr_name, clock_end - priv->clock_sta);
- free(priv);
+ xbt_free(priv);
}
static void launch_communication_worker(sg_host_t tx_host, sg_host_t rx_host)
/* Now that all message exchanges were initiated, wait for their completion in one single call */
sg_comm_wait_all(pending_comms, pending_comms_count);
- free(pending_comms);
- free(mboxes);
+ xbt_free(pending_comms);
+ xbt_free(mboxes);
XBT_INFO("Goodbye now!");
}
changed_pos);
}
- free(pending_comms);
- free(mboxes);
+ xbt_free(pending_comms);
+ xbt_free(mboxes);
XBT_INFO("Goodbye now!");
}
const kademlia_message_t msg = (kademlia_message_t)message;
if (msg)
answer_free(msg->answer);
- free(msg);
+ xbt_free(msg);
}
} else {
handle_find_node(node, msg);
}
- answer_free(msg->answer);
- free(msg);
+ free_message(msg);
} else {
sg_actor_sleep_for(1);
}
timeout += simgrid_get_clock() - time_beginreceive;
time_beginreceive = simgrid_get_clock();
}
- answer_free(msg->answer);
- free(msg);
+ free_message(msg);
} else {
sg_actor_sleep_for(1);
}
XBT_INFO("%d activities remain pending", pending_execs_count);
}
- free(pending_execs);
+ xbt_free(pending_execs);
}
int main(int argc, char* argv[])
break;
case SG_ERROR_NETWORK:
XBT_INFO("Mmh. The communication with '%s' failed. Nevermind. Let's keep going!", mailbox_name);
- free(payload);
+ xbt_free(payload);
break;
case SG_ERROR_TIMEOUT:
XBT_INFO("Mmh. Got timeouted while speaking to '%s'. Nevermind. Let's keep going!", mailbox_name);
- free(payload);
+ xbt_free(payload);
break;
default:
xbt_die("Unexpected behavior");
switch (sg_comm_wait_for(comm, 1.0)) {
case SG_ERROR_NETWORK:
XBT_INFO("Mmh. Can't reach '%s'! Nevermind. Let's keep going!", mailbox_name);
- free(payload);
+ xbt_free(payload);
break;
case SG_ERROR_TIMEOUT:
XBT_INFO("Mmh. Got timeouted while speaking to '%s'. Nevermind. Let's keep going!", mailbox_name);
- free(payload);
+ xbt_free(payload);
break;
case SG_OK:
/* nothing */
sg_error_t retcode = sg_comm_wait(comm);
if (retcode == SG_OK) {
if (*payload == FINALIZE) {
- free(payload);
+ xbt_free(payload);
break;
} else {
double comp_size = *payload;
- free(payload);
+ xbt_free(payload);
XBT_INFO("Start execution...");
sg_actor_execute(comp_size);
XBT_INFO("Execution complete.");
e.load_platform(argv[1]);
/* - Display Host properties */
- for (auto h : e.get_all_hosts()) {
+ for (auto const* h : e.get_all_hosts()) {
XBT_INFO("*** %s properties ****", h->get_cname());
for (auto const& [key, value] : *h->get_properties())
XBT_INFO(" %s -> %s", key.c_str(), value.c_str());
std::vector<simgrid::kernel::routing::ClusterZone*> clusters =
e.get_filtered_netzones<simgrid::kernel::routing::ClusterZone>();
- for (auto c : clusters) {
+ for (auto const* c : clusters) {
XBT_INFO("%s", c->get_cname());
std::vector<sg4::Host*> hosts = c->get_all_hosts();
- for (auto h : hosts)
+ for (auto const* h : hosts)
XBT_INFO(" %s", h->get_cname());
}
std::vector<simgrid::kernel::routing::DragonflyZone*> dragonfly_clusters =
e.get_filtered_netzones<simgrid::kernel::routing::DragonflyZone>();
- for (auto d : dragonfly_clusters) {
+ for (auto const* d : dragonfly_clusters) {
XBT_INFO("%s' dragonfly topology:", d->get_cname());
for (size_t i = 0; i < d->get_host_count(); i++) {
const simgrid::kernel::routing::DragonflyZone::Coords coords = d->rankId_to_coords(i);
"Check if there is a communication ready to be consumed from a mailbox.")
.def(
"put",
- [](Mailbox* self, py::object data, int size, double timeout) {
+ [](Mailbox* self, py::object data, uint64_t size, double timeout) {
data.inc_ref();
self->put(data.ptr(), size, timeout);
},
py::call_guard<py::gil_scoped_release>(), "Blocking data transmission with a timeout")
.def(
"put",
- [](Mailbox* self, py::object data, int size) {
+ [](Mailbox* self, py::object data, uint64_t size) {
data.inc_ref();
self->put(data.ptr(), size);
},
py::call_guard<py::gil_scoped_release>(), "Blocking data transmission")
.def(
"put_async",
- [](Mailbox* self, py::object data, int size) {
+ [](Mailbox* self, py::object data, uint64_t size) {
data.inc_ref();
return self->put_async(data.ptr(), size);
},
py::call_guard<py::gil_scoped_release>(), "Non-blocking data transmission")
.def(
"put_init",
- [](Mailbox* self, py::object data, int size) {
+ [](Mailbox* self, py::object data, uint64_t size) {
data.inc_ref();
return self->put_init(data.ptr(), size);
},
FILE* in_file = fopen(filename.c_str(), "r");
xbt_assert(in_file != nullptr, "Failed to open file: %s", filename.c_str());
- Agraph_t* dag_dot = agread(in_file, NIL(Agdisc_t*));
+ Agraph_t* dag_dot = agread(in_file, nullptr);
std::unordered_map<std::string, ActivityPtr> activities;
std::vector<ActivityPtr> dag;
xbt_assert(models_prio_.find(model_name) == models_prio_.end(),
"Model %s already exists, use model.set_name() to change its name", model_name.c_str());
- for (const auto dep : dependencies) {
+ for (const auto* dep : dependencies) {
xbt_assert(models_prio_.find(dep->get_name()) != models_prio_.end(),
"Model %s doesn't exists. Impossible to use it as dependency.", dep->get_name().c_str());
}
src->get_cname(), gw_src->get_cname()));
const auto* netzone_src = get_netzone_recursive(src);
- if (not netzone_src->is_component_recursive(gw_src))
+ if (not netzone_src || not netzone_src->is_component_recursive(gw_src))
throw std::invalid_argument(xbt::string_printf(
"Invalid NetzoneRoute from %s@%s to %s: gw_src %s belongs to %s, not to %s.", src_name, gw_src->get_cname(),
dst_name, gw_src->get_cname(), gw_src->get_englobing_zone()->get_cname(), src_name));
dst->get_cname(), gw_dst->get_cname()));
const auto* netzone_dst = get_netzone_recursive(dst);
- if (not netzone_dst->is_component_recursive(gw_dst))
+ if (not netzone_dst || not netzone_dst->is_component_recursive(gw_dst))
throw std::invalid_argument(xbt::string_printf(
"Invalid NetzoneRoute from %s@%s to %s: gw_dst %s belongs to %s, not to %s.", dst_name, gw_dst->get_cname(),
src_name, gw_dst->get_cname(), gw_dst->get_englobing_zone()->get_cname(), dst_name));
// For each enabled transition in the property automaton, push a
// (application_state, automaton_state) pair to the exploration stack:
for (int i = xbt_dynar_length(current_pair->automaton_state->out) - 1; i >= 0; i--) {
- auto transition_succ_label = Api::get().get_automaton_transition_label(current_pair->automaton_state->out, i);
- auto transition_succ_dst = Api::get().get_automaton_transition_dst(current_pair->automaton_state->out, i);
+ const auto* transition_succ_label =
+ Api::get().get_automaton_transition_label(current_pair->automaton_state->out, i);
+ auto* transition_succ_dst = Api::get().get_automaton_transition_dst(current_pair->automaton_state->out, i);
if (evaluate_label(transition_succ_label, *prop_values))
exploration_stack_.push_back(this->create_pair(current_pair.get(), transition_succ_dst, prop_values));
}
// in case no fullpath is provided ... just pick the first mountpoint.
if (size_t found = fullname.find('/'); found == std::string::npos || fullname.rfind("./", 1) != std::string::npos) {
- auto disk = simgrid::s4u::Host::current()->get_disks().front();
+ const auto* disk = simgrid::s4u::Host::current()->get_disks().front();
std::string mount;
if (disk->get_host() != simgrid::s4u::Host::current())
mount = disk->extension<simgrid::s4u::FileSystemDiskExt>()->get_mount_point(disk->get_host());
double angle = 0;
auto nb_stations = static_cast<double>(wifizone->get_all_hosts().size() - 1);
double step = 2 * M_PI / nb_stations;
- for (auto station_host : wifizone->get_all_hosts()) {
+ for (const auto* station_host : wifizone->get_all_hosts()) {
station_netpoint_ns3 = station_host->get_netpoint()->extension<NetPointNs3>();
if (station_netpoint_ns3 == access_point_netpoint_ns3)
continue;
action.set_latency(0.0);
}
if ((action.get_latency() <= 0.0) && (action.is_suspended() == 0)) {
- action.updateBound();
+ action.update_bound();
get_maxmin_system()->update_variable_penalty(action.get_variable(), 1.0);
action.set_last_update();
}
L07Action::L07Action(Model* model, const std::vector<s4u::Host*>& host_list, const double* flops_amount,
const double* bytes_amount, double rate)
- : CpuAction(model, 1.0, false), computationAmount_(flops_amount), communicationAmount_(bytes_amount), rate_(rate)
+ : CpuAction(model, 1.0, false)
+ , host_list_(host_list)
+ , computation_amount_(flops_amount)
+ , communication_amount_(bytes_amount)
+ , rate_(rate)
{
- size_t link_nb = 0;
- size_t used_host_nb = 0; /* Only the hosts with something to compute (>0 flops) are counted) */
- double latency = 0.0;
+ size_t link_nb = 0;
+ const size_t host_nb = host_list_.size();
+ size_t used_host_nb = 0; /* Only the hosts with something to compute (>0 flops) are counted) */
+ double latency = 0.0;
this->set_last_update();
- hostList_.insert(hostList_.end(), host_list.begin(), host_list.end());
-
if (flops_amount != nullptr)
- used_host_nb += std::count_if(flops_amount, flops_amount + host_list.size(), [](double x) { return x > 0.0; });
+ used_host_nb += std::count_if(flops_amount, flops_amount + host_nb, [](double x) { return x > 0.0; });
/* Compute the number of affected resources... */
if (bytes_amount != nullptr) {
std::unordered_set<const char*> affected_links;
- for (size_t k = 0; k < host_list.size() * host_list.size(); k++) {
+ for (size_t k = 0; k < host_nb * host_nb; k++) {
if (bytes_amount[k] <= 0)
continue;
double lat = 0.0;
std::vector<StandardLinkImpl*> route;
- hostList_[k / host_list.size()]->route_to(hostList_[k % host_list.size()], route, &lat);
+ host_list_[k / host_nb]->route_to(host_list_[k % host_nb], route, &lat);
latency = std::max(latency, lat);
for (auto const& link : route)
link_nb = affected_links.size();
}
- XBT_DEBUG("Creating a parallel task (%p) with %zu hosts and %zu unique links.", this, host_list.size(), link_nb);
+ XBT_DEBUG("Creating a parallel task (%p) with %zu hosts and %zu unique links.", this, host_nb, link_nb);
latency_ = latency;
- set_variable(
- model->get_maxmin_system()->variable_new(this, 1.0, (rate > 0 ? rate : -1.0), host_list.size() + link_nb));
+ set_variable(model->get_maxmin_system()->variable_new(this, 1.0, (rate > 0 ? rate : -1.0), host_nb + link_nb));
if (latency_ > 0)
model->get_maxmin_system()->update_variable_penalty(get_variable(), 0.0);
/* Expand it for the CPUs even if there is nothing to compute, to make sure that it gets expended even if there is no
* communication either */
- for (size_t i = 0; i < host_list.size(); i++) {
+ for (size_t i = 0; i < host_nb; i++) {
model->get_maxmin_system()->expand(host_list[i]->get_cpu()->get_constraint(), get_variable(),
(flops_amount == nullptr ? 0.0 : flops_amount[i]));
}
if (bytes_amount != nullptr) {
- for (size_t k = 0; k < host_list.size() * host_list.size(); k++) {
+ for (size_t k = 0; k < host_nb * host_nb; k++) {
if (bytes_amount[k] <= 0.0)
continue;
std::vector<StandardLinkImpl*> route;
- hostList_[k / host_list.size()]->route_to(hostList_[k % host_list.size()], route, nullptr);
+ host_list_[k / host_nb]->route_to(host_list_[k % host_nb], route, nullptr);
for (auto const& link : route)
model->get_maxmin_system()->expand(link->get_constraint(), this->get_variable(), bytes_amount[k]);
this->set_remains(0.0);
}
/* finally calculate the initial bound value */
- updateBound();
+ update_bound();
}
Action* NetworkL07Model::communicate(s4u::Host* src, s4u::Host* dst, double size, double rate)
get_model()->get_maxmin_system()->update_constraint_bound(get_constraint(), get_core_count() * speed_.peak * speed_.scale);
while (const auto* var = get_constraint()->get_variable(&elem)) {
- auto* action = static_cast<L07Action*>(var->get_id());
- action->updateBound();
+ const auto* action = static_cast<L07Action*>(var->get_id());
+ action->update_bound();
}
CpuImpl::on_speed_change();
latency_.peak = value;
while (const auto* var = get_constraint()->get_variable(&elem)) {
- auto* action = static_cast<L07Action*>(var->get_id());
- action->updateBound();
+ const auto* action = static_cast<L07Action*>(var->get_id());
+ action->update_bound();
}
}
LinkL07::~LinkL07() = default;
L07Action::~L07Action()
{
if (free_arrays_) {
- delete[] computationAmount_;
- delete[] communicationAmount_;
+ delete[] computation_amount_;
+ delete[] communication_amount_;
}
}
-double L07Action::calculateNetworkBound()
+double L07Action::calculate_network_bound() const
{
double lat_current = 0.0;
double lat_bound = std::numeric_limits<double>::max();
- size_t host_count = hostList_.size();
+ size_t host_count = host_list_.size();
- if (communicationAmount_ == nullptr) {
+ if (communication_amount_ == nullptr) {
return lat_bound;
}
for (size_t i = 0; i < host_count; i++) {
for (size_t j = 0; j < host_count; j++) {
- if (communicationAmount_[i * host_count + j] > 0) {
+ if (communication_amount_[i * host_count + j] > 0) {
double lat = 0.0;
std::vector<StandardLinkImpl*> route;
- hostList_.at(i)->route_to(hostList_.at(j), route, &lat);
+ host_list_.at(i)->route_to(host_list_.at(j), route, &lat);
- lat_current = std::max(lat_current, lat * communicationAmount_[i * host_count + j]);
+ lat_current = std::max(lat_current, lat * communication_amount_[i * host_count + j]);
}
}
}
return lat_bound;
}
-double L07Action::calculateCpuBound()
+double L07Action::calculate_cpu_bound() const
{
double cpu_bound = std::numeric_limits<double>::max();
- if (computationAmount_ == nullptr) {
+ if (computation_amount_ == nullptr) {
return cpu_bound;
}
- for (size_t i = 0; i < hostList_.size(); i++) {
- if (computationAmount_[i] > 0) {
- cpu_bound = std::min(cpu_bound, hostList_[i]->get_cpu()->get_speed(1.0) *
- hostList_[i]->get_cpu()->get_speed_ratio() / computationAmount_[i]);
+ for (size_t i = 0; i < host_list_.size(); i++) {
+ if (computation_amount_[i] > 0) {
+ cpu_bound = std::min(cpu_bound, host_list_[i]->get_cpu()->get_speed(1.0) *
+ host_list_[i]->get_cpu()->get_speed_ratio() / computation_amount_[i]);
}
}
return cpu_bound;
}
-void L07Action::updateBound()
+void L07Action::update_bound() const
{
- double bound = std::min(calculateNetworkBound(), calculateCpuBound());
+ double bound = std::min(calculate_network_bound(), calculate_cpu_bound());
XBT_DEBUG("action (%p) : bound = %g", this, bound);
* Action *
**********/
class L07Action : public CpuAction {
- std::vector<s4u::Host*> hostList_;
- bool free_arrays_ = false; // By default, computationAmount_ and friends are freed by caller. But not for sequential
+ const std::vector<s4u::Host*> host_list_;
+ bool free_arrays_ = false; // By default, computation_amount_ and friends are freed by caller. But not for sequential
// exec and regular comms
- const double* computationAmount_; /* pointer to the data that lives in s4u action -- do not free unless if
- * free_arrays */
- const double* communicationAmount_; /* pointer to the data that lives in s4u action -- do not free unless if
- * free_arrays */
+ const double* computation_amount_; /* pointer to the data that lives in s4u action -- do not free unless if
+ * free_arrays */
+ const double* communication_amount_; /* pointer to the data that lives in s4u action -- do not free unless if
+ * free_arrays */
double latency_;
double rate_;
* The task is bounded by the slowest CPU running the ptask, considering the current pstate of each CPU.
* Return MAX_DOUBLE if ptask has no computation.
*/
- double calculateCpuBound();
+ double calculate_cpu_bound() const;
/**
* @brief Calculate the network bound for the parallel task
* The network bound depends on the largest latency between the communication in the ptask.
* Return MAX_DOUBLE if latency is 0 (or ptask doesn't have any communication)
*/
- double calculateNetworkBound();
+ double calculate_network_bound() const;
public:
L07Action() = delete;
L07Action& operator=(const L07Action&) = delete;
~L07Action() override;
- void updateBound();
+ void update_bound() const;
double get_latency() const { return latency_; }
void set_latency(double latency) { latency_ = latency; }
void update_latency(double delta, double precision) { double_update(&latency_, delta, precision); }
std::sort(hosts.begin(), hosts.end(),
[](const sg4::Host* a, const sg4::Host* b) { return a->get_name() < b->get_name(); });
- for (auto h : hosts) {
+ for (auto const* h : hosts) {
std::printf(" <host id=\"%s\" speed=\"%.0f\"", h->get_cname(), h->get_speed());
const std::unordered_map<std::string, std::string>* props = h->get_properties();
if (h->get_core_count() > 1) {
std::sort(links.begin(), links.end(),
[](const sg4::Link* a, const sg4::Link* b) { return a->get_name() < b->get_name(); });
- for (auto link : links) {
+ for (auto const* link : links) {
std::printf(" <link id=\"");
std::printf("%s\" bandwidth=\"%.0f\" latency=\"%.9f\"", link->get_cname(), link->get_bandwidth(),
return a->get_name() < b->get_name();
});
- for (auto src_host : hosts) { // Routes from host
+ for (auto const* src_host : hosts) { // Routes from host
const simgrid::kernel::routing::NetPoint* src = src_host->get_netpoint();
- for (auto dst_host : hosts) { // Routes to host
+ for (auto const* dst_host : hosts) { // Routes to host
std::vector<simgrid::kernel::resource::StandardLinkImpl*> route;
const simgrid::kernel::routing::NetPoint* dst = dst_host->get_netpoint();
simgrid::kernel::routing::NetZoneImpl::get_global_route(src, dst, route, nullptr);
std::printf("<link_ctn id=\"%s\"/>", link->get_cname());
std::printf("\n </route>\n");
}
- for (auto dst_host : hosts) { // Routes to host
+ for (auto const* dst_host : hosts) { // Routes to host
std::printf(" <route src=\"%s\" dst=\"%s\">\n ", value1->get_cname(), dst_host->get_cname());
std::vector<simgrid::kernel::resource::StandardLinkImpl*> route;
const simgrid::kernel::routing::NetPoint* netcardDst = dst_host->get_netpoint();
h1->route_to(h2, route, &latency);
XBT_INFO("Route size %zu", route.size());
- for (auto link: route) {
+ for (auto const* link : route) {
double bandwidth = link->get_bandwidth();
XBT_INFO(" Link %s: latency = %f, bandwidth = %f", link->get_cname(), link->get_latency(), bandwidth);
if (bandwidth < min_bandwidth || min_bandwidth < 0.0)
h1->route_to(h2, route, &latency);
XBT_INFO(" Route size %zu", route.size());
- for (auto link: route) {
+ for (auto const* link : route) {
double bandwidth = link->get_bandwidth();
XBT_INFO(" Link %s: latency = %f, bandwidth = %f", link->get_cname(), link->get_latency(), bandwidth);
if (bandwidth < min_bandwidth || min_bandwidth < 0.0)
batchinfo=batchinfo)
def teardown(self):
- subprocess.run("find -type f -a -executable | xargs rm -f", shell=True, check=True) # Remove generated cruft (binary files)
+ subprocess.run("find -type f -a -executable -exec rm -f {} +", shell=True, check=True) # Remove generated cruft (binary files)
subprocess.run("rm -f smpitmp-* core", shell=True, check=True)
def parse(self, cachefile):
printf("Alltoallv TEST COMPLETE.\n");
fflush(stdout);
}
- free(sdispls);
- free(rdispls);
- free(recvcounts);
- free(sendcounts);
- free(rbuf);
- free(sbuf);
+ xbt_free(sdispls);
+ xbt_free(rdispls);
+ xbt_free(recvcounts);
+ xbt_free(sendcounts);
+ xbt_free(rbuf);
+ xbt_free(sbuf);
MPI_Finalize();
return 0;
fflush(stdout);
}
}
- free(sb);
- free(rb);
+ xbt_free(sb);
+ xbt_free(rb);
MPI_Finalize();
return EXIT_SUCCESS;
}
the_global_rank);
SMPI_thread_create();
t->f(t->param);
- free(t);
+ xbt_free(t);
}
static void mpi_thread_create(const char* name, void* (*f)(void*), void* param)
MPI_Error_string(ret, err_string, &length);
XBT_INFO("%d request done, return %s", rank, err_string);
XBT_INFO("%d still has MPI rank %d and global variable %d", param->rank, rank, global_rank);
- free(param);
+ xbt_free(param);
return NULL;
}
static void setup_recvbuf(int nprocs, int** recvbuf, int** displs, int** counts, int** rcounts)
{
- *recvbuf = malloc(BUFSIZE * nprocs * sizeof(int));
+ *recvbuf = xbt_malloc(BUFSIZE * nprocs * sizeof(int));
for (int i = 0; i < BUFSIZE * nprocs; i++)
(*recvbuf)[i] = i;
- *displs = malloc(nprocs * sizeof(int));
- *counts = malloc(nprocs * sizeof(int));
- *rcounts = malloc(nprocs * sizeof(int));
+ *displs = xbt_malloc(nprocs * sizeof(int));
+ *counts = xbt_malloc(nprocs * sizeof(int));
+ *rcounts = xbt_malloc(nprocs * sizeof(int));
for (int i = 0; i < nprocs; i++) {
(*displs)[i] = i * BUFSIZE;
(*counts)[i] = BOUNDED(i);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- int* sendbuf = malloc(BUFSIZE * nprocs * sizeof(int));
+ int* sendbuf = xbt_malloc(BUFSIZE * nprocs * sizeof(int));
for (int i = 0; i < BUFSIZE * nprocs; i++)
sendbuf[i] = rank;
- int* alltoallvcounts = malloc(nprocs * sizeof(int));
+ int* alltoallvcounts = xbt_malloc(nprocs * sizeof(int));
for (int i = 0; i < nprocs; i++)
alltoallvcounts[i] = BOUNDED(i + rank);
- int* dummy_buffer = malloc(sizeof(int));
+ int* dummy_buffer = xbt_malloc(sizeof(int));
// initialize buffers with an invalid value (we want to trigger a valgrind error if they are used)
int* recvbuf = dummy_buffer + 1;
int* displs = dummy_buffer + 1;
MPI_Scatterv(recvbuf, counts, displs, MPI_INT, sendbuf, BOUNDED(rank), MPI_INT, 0, MPI_COMM_WORLD);
MPI_Reduce(sendbuf, recvbuf, BUFSIZE, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
- free(dummy_buffer);
+ xbt_free(dummy_buffer);
if (rank != 0)
setup_recvbuf(nprocs, &recvbuf, &displs, &counts, &rcounts);
MPI_Exscan(sendbuf, recvbuf, BUFSIZE, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
- free(alltoallvcounts);
- free(sendbuf);
- free(recvbuf);
- free(displs);
- free(counts);
- free(rcounts);
+ xbt_free(alltoallvcounts);
+ xbt_free(sendbuf);
+ xbt_free(recvbuf);
+ xbt_free(displs);
+ xbt_free(counts);
+ xbt_free(rcounts);
MPI_Finalize();
return 0;