#if SIMGRID_HAVE_MC
-#include <stdexcept>
+#include <algorithm>
#include <cstddef>
#include <cstring>
#include <iterator>
+#include <stdexcept>
#include <xbt/sysdep.h>
~string()
{
if (string_data::data != &NUL)
- std::free(string_data::data);
+ delete[] string_data::data;
}
// Ctors
string_data::data = const_cast<char*>(&NUL);
} else {
string_data::len = size;
- string_data::data = static_cast<char*>(std::malloc(string_data::len + 1));
- memcpy(string_data::data, s, string_data::len);
+ string_data::data = new char[string_data::len + 1];
+ std::copy_n(s, string_data::len, string_data::data);
string_data::data[string_data::len] = '\0';
}
}
void assign(const char* s, size_t size)
{
if (string_data::data != &NUL) {
- std::free(string_data::data);
+ delete[] string_data::data;
string_data::data = nullptr;
string_data::len = 0;
}
if (size != 0) {
string_data::len = size;
- string_data::data = (char*) std::malloc(string_data::len + 1);
- std::memcpy(string_data::data, s, string_data::len);
+ string_data::data = new char[string_data::len + 1];
+ std::copy_n(s, string_data::len, string_data::data);
string_data::data[string_data::len] = '\0';
}
}
s_xbt_dynar_t dynar;
process->read_bytes(&dynar, sizeof(dynar), remote_dynar);
- smx_actor_t* data = (smx_actor_t*)malloc(dynar.elmsize * dynar.used);
+ smx_actor_t* data = static_cast<smx_actor_t*>(::operator new(dynar.elmsize * dynar.used));
process->read_bytes(data, dynar.elmsize * dynar.used, dynar.data);
// Load each element of the vector from the MCed process:
process->read_bytes(&info.copy, sizeof(info.copy), remote(data[i]));
target.push_back(std::move(info));
}
- free(data);
+ ::operator delete(data);
}
namespace simgrid {
namespace mc {
bool stack_alloc = size < 64;
const bool region1_need_buffer = region1==nullptr || region1->storage_type()==simgrid::mc::StorageType::Flat;
const bool region2_need_buffer = region2==nullptr || region2->storage_type()==simgrid::mc::StorageType::Flat;
- void* buffer1a = region1_need_buffer ? nullptr : stack_alloc ? alloca(size) : malloc(size);
- void* buffer2a = region2_need_buffer ? nullptr : stack_alloc ? alloca(size) : malloc(size);
+ void* buffer1a = region1_need_buffer ? nullptr : stack_alloc ? alloca(size) : ::operator new(size);
+ void* buffer2a = region2_need_buffer ? nullptr : stack_alloc ? alloca(size) : ::operator new(size);
const void* buffer1 = MC_region_read(region1, buffer1a, addr1, size);
const void* buffer2 = MC_region_read(region2, buffer2a, addr2, size);
int res;
else
res = memcmp(buffer1, buffer2, size);
if (not stack_alloc) {
- free(buffer1a);
- free(buffer2a);
+ ::operator delete(buffer1a);
+ ::operator delete(buffer2a);
}
return res;
}
xbt_dict_t MSG_storage_get_content(msg_storage_t storage)
{
std::map<std::string, sg_size_t>* content = storage->getContent();
- xbt_dict_t content_as_dict = xbt_dict_new_homogeneous(xbt_free_f);
+ // Note: ::operator delete is ok here (no destructor called) since the dict elements are of POD type sg_size_t.
+ xbt_dict_t content_as_dict = xbt_dict_new_homogeneous(::operator delete);
for (auto const& entry : *content) {
- sg_size_t* psize = static_cast<sg_size_t*>(malloc(sizeof(sg_size_t)));
+ sg_size_t* psize = new sg_size_t;
*psize = entry.second;
xbt_dict_set(content_as_dict, entry.first.c_str(), psize, nullptr);
}
/********************************* Task **************************************/
struct s_simdata_task_t {
- ~s_simdata_task_t()
- {
- /* parallel tasks only */
- xbt_free(this->host_list);
- }
+ ~s_simdata_task_t() { delete[] this->host_list; /* parallel tasks only */ }
void setUsed();
void setNotUsed() { this->isused = false; }
#include "msg_private.hpp"
#include "src/simix/smx_private.hpp"
+#include <algorithm>
extern "C" {
/* Simulator Data specific to parallel tasks */
simdata->host_nb = host_nb;
- simdata->host_list = xbt_new0(sg_host_t, host_nb);
+ simdata->host_list = new sg_host_t[host_nb];
simdata->flops_parallel_amount = flops_amount;
simdata->bytes_parallel_amount = bytes_amount;
- for (int i = 0; i < host_nb; i++)
- simdata->host_list[i] = host_list[i];
+ std::copy_n(host_list, host_nb, simdata->host_list);
return task;
}
{
int sendto, recvfrom, blockcount, i;
unsigned int distance;
- int *new_rcounts = NULL, *new_rdispls = NULL;
- int *new_scounts = NULL, *new_sdispls = NULL;
ptrdiff_t slb, rlb, sext, rext;
char *tmpsend = NULL, *tmprecv = NULL;
MPI_Datatype new_rdtype = MPI_DATATYPE_NULL, new_sdtype = MPI_DATATYPE_NULL;
- blockcount doubles until the last step when only the remaining data is
exchanged.
*/
- new_rcounts = (int*) calloc(4*size, sizeof(int));
- new_rdispls = new_rcounts + size;
- new_scounts = new_rdispls + size;
- new_sdispls = new_scounts + size;
+ int* new_rcounts = new int[4 * size];
+ int* new_rdispls = new_rcounts + size;
+ int* new_scounts = new_rdispls + size;
+ int* new_sdispls = new_scounts + size;
for (distance = 1; distance < size; distance<<=1) {
}
- free(new_rcounts);
+ delete[] new_rcounts;
return MPI_SUCCESS;
else {
MPI_Request* requests;
- requests = (MPI_Request*)malloc( size * sizeof(MPI_Request) );
+ requests = new MPI_Request[size];
for (i = 1; i < size; ++i) {
requests[i] = Request::irecv(NULL, 0, MPI_BYTE, MPI_ANY_SOURCE,
COLL_TAG_BARRIER, comm
);
}
Request::waitall( size-1, requests+1, MPI_STATUSES_IGNORE );
- free( requests );
+ delete[] requests;
}
/* All done */
tmpbuf = (char *) buffer;
if( tree->tree_nextsize != 0 ) {
- send_reqs = xbt_new(MPI_Request, tree->tree_nextsize );
+ send_reqs = new MPI_Request[tree->tree_nextsize];
}
/* Root code */
Request::wait( &recv_reqs[req_index], MPI_STATUS_IGNORE );
}
- if( NULL != send_reqs ) free(send_reqs);
- xbt_free(tree);
+ delete[] send_reqs;
+ ompi_coll_tuned_topo_destroy_tree(&tree);
return (MPI_SUCCESS);
}
comm, MPI_STATUS_IGNORE);
}
}
- xbt_free(tree);
+ ompi_coll_tuned_topo_destroy_tree(&tree);
return (MPI_SUCCESS);
size = comm->size();
rank = comm->rank();
- tree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t));
+ tree = new ompi_coll_tree_t;
if (not tree) {
XBT_DEBUG("coll:tuned:topo_build_tree PANIC::out of memory");
return NULL;
size = comm->size();
rank = comm->rank();
- tree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t));
+ tree = new ompi_coll_tree_t;
if (not tree) {
XBT_DEBUG("coll:tuned:topo_build_tree PANIC::out of memory");
return NULL;
ptr = *tree;
- free (ptr);
+ delete ptr;
*tree = NULL; /* mark tree as gone */
return MPI_SUCCESS;
index = rank -root;
- bmtree = (ompi_coll_tree_t*)malloc(sizeof(ompi_coll_tree_t));
+ bmtree = new ompi_coll_tree_t;
if (not bmtree) {
XBT_DEBUG("coll:tuned:topo:build_bmtree PANIC out of memory");
return NULL;
vrank = (rank - root + size) % size;
- bmtree = (ompi_coll_tree_t*)xbt_malloc(sizeof(ompi_coll_tree_t));
+ bmtree = new ompi_coll_tree_t;
if (not bmtree) {
XBT_DEBUG("coll:tuned:topo:build_bmtree PANIC out of memory");
return NULL;
/*
* Allocate space for topology arrays if needed
*/
- chain = (ompi_coll_tree_t*)malloc( sizeof(ompi_coll_tree_t) );
+ chain = new ompi_coll_tree_t;
if (not chain) {
XBT_DEBUG("coll:tuned:topo:build_chain PANIC out of memory");
fflush(stdout);
/* other non-leaf nodes */
smpi_free_tmp_buffer(tempbuf);
}
- xbt_free(bmtree);
+ ompi_coll_tuned_topo_destroy_tree(&bmtree);
return MPI_SUCCESS;
err_hndl:
- Waitall for all the second segments to complete.
*/
char* ptmp;
- MPI_Request *reqs = NULL, first_segment_req;
- reqs = (MPI_Request*)calloc(size, sizeof(MPI_Request));
+ MPI_Request first_segment_req;
+ MPI_Request* reqs = new (std::nothrow) MPI_Request[size];
if (NULL == reqs) {
ret = -1;
line = __LINE__;
ret = Request::waitall(size, reqs, MPI_STATUSES_IGNORE);
if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
- free(reqs);
+ delete[] reqs;
}
/* All done */
else {
int creq = 0;
- MPI_Request* sreq = NULL;
-
- sreq = (MPI_Request*) calloc( max_outstanding_reqs,
- sizeof(MPI_Request ) );
+ MPI_Request* sreq = new (std::nothrow) MPI_Request[max_outstanding_reqs];
if (NULL == sreq) { line = __LINE__; ret = -1; goto error_hndl; }
/* post first group of requests */
MPI_STATUSES_IGNORE );
/* free requests */
- free(sreq);
+ delete[] sreq;
}
}
- free(tree);
+ ompi_coll_tuned_topo_destroy_tree(&tree);
return MPI_SUCCESS;
error_hndl: /* error handler */
MPI_Type_extent(mpi_datatype, &typelng);
scrlng = typelng * count;
#ifdef NO_CACHE_OPTIMIZATION
- scr1buf = static_cast<char*>(xbt_malloc(scrlng));
- scr2buf = static_cast<char*>(xbt_malloc(scrlng));
- scr3buf = static_cast<char*>(xbt_malloc(scrlng));
+ scr1buf = new char[scrlng];
+ scr2buf = new char[scrlng];
+ scr3buf = new char[scrlng];
#else
# ifdef SCR_LNG_OPTIM
scrlng = SCR_LNG_OPTIM(scrlng);
# endif
- scr2buf = static_cast<char*>(xbt_malloc(3*scrlng)); /* To test cache problems. */
+ scr2buf = new char[3 * scrlng]; /* To test cache problems. */
scr1buf = scr2buf + 1*scrlng; /* scr1buf and scr3buf must not*/
scr3buf = scr2buf + 2*scrlng; /* be used for malloc because */
/* they are interchanged below.*/
}
# ifdef NO_CACHE_TESTING
- xbt_free(scr1buf); xbt_free(scr2buf); xbt_free(scr3buf);
+ delete[] scr1buf;
+ delete[] scr2buf;
+ delete[] scr3buf;
# else
- xbt_free(scr2buf); /* scr1buf and scr3buf are part of scr2buf */
+ delete[] scr2buf; /* scr1buf and scr3buf are part of scr2buf */
# endif
return(MPI_SUCCESS);
} /* new_prot */
if (NULL != tempbuf)
smpi_free_tmp_buffer(tempbuf);
// not FIXME : store the tree, as done in ompi, instead of calculating it each time ?
- xbt_free(bmtree);
+ ompi_coll_tuned_topo_destroy_tree(&bmtree);
return MPI_SUCCESS;
int range = 0;
int range_threshold = 0;
int is_commutative = 0;
- int *disps = static_cast<int*>(xbt_malloc(comm_size * sizeof (int)));
+ int* disps = new int[comm_size];
if(mv2_red_scat_thresholds_table==NULL)
init_mv2_reduce_scatter_tables_stampede();
recvcnts, datatype,
op, comm);
}
- xbt_free(disps);
+ delete[] disps;
return mpi_errno;
}
void smpi_coll_cleanup_mvapich2()
{
- int i = 0;
if (mv2_alltoall_thresholds_table)
- xbt_free(mv2_alltoall_thresholds_table[i]);
- xbt_free(mv2_alltoall_thresholds_table);
- xbt_free(mv2_size_alltoall_tuning_table);
- xbt_free(mv2_alltoall_table_ppn_conf);
+ delete[] mv2_alltoall_thresholds_table[0];
+ delete[] mv2_alltoall_thresholds_table;
+ delete[] mv2_size_alltoall_tuning_table;
+ delete[] mv2_alltoall_table_ppn_conf;
- xbt_free(mv2_gather_thresholds_table);
+ delete[] mv2_gather_thresholds_table;
if (mv2_allgather_thresholds_table)
- xbt_free(mv2_allgather_thresholds_table[0]);
- xbt_free(mv2_size_allgather_tuning_table);
- xbt_free(mv2_allgather_table_ppn_conf);
- xbt_free(mv2_allgather_thresholds_table);
-
- xbt_free(mv2_allgatherv_thresholds_table);
- xbt_free(mv2_reduce_thresholds_table);
- xbt_free(mv2_red_scat_thresholds_table);
- xbt_free(mv2_allreduce_thresholds_table);
- xbt_free(mv2_bcast_thresholds_table);
+ delete[] mv2_allgather_thresholds_table[0];
+ delete[] mv2_size_allgather_tuning_table;
+ delete[] mv2_allgather_table_ppn_conf;
+ delete[] mv2_allgather_thresholds_table;
+
+ delete[] mv2_allgatherv_thresholds_table;
+ delete[] mv2_reduce_thresholds_table;
+ delete[] mv2_red_scat_thresholds_table;
+ delete[] mv2_allreduce_thresholds_table;
+ delete[] mv2_bcast_thresholds_table;
if (mv2_scatter_thresholds_table)
- xbt_free(mv2_scatter_thresholds_table[0]);
- xbt_free(mv2_scatter_thresholds_table);
- xbt_free(mv2_size_scatter_tuning_table);
- xbt_free(mv2_scatter_table_ppn_conf);
+ delete[] mv2_scatter_thresholds_table[0];
+ delete[] mv2_scatter_thresholds_table;
+ delete[] mv2_size_scatter_tuning_table;
+ delete[] mv2_scatter_table_ppn_conf;
}
/************ Alltoall variables and initializers */
+#ifndef SMPI_MVAPICH2_SELECTOR_STAMPEDE_HPP
+#define SMPI_MVAPICH2_SELECTOR_STAMPEDE_HPP
+
+#include <algorithm>
+
#define MV2_MAX_NB_THRESHOLDS 32
XBT_PUBLIC(void) smpi_coll_cleanup_mvapich2(void);
static void init_mv2_alltoall_tables_stampede()
{
- int i;
int agg_table_sum = 0;
mv2_alltoall_tuning_table** table_ptrs = NULL;
mv2_alltoall_num_ppn_conf = 3;
if (simgrid::smpi::Colls::smpi_coll_cleanup_callback == NULL)
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
- mv2_alltoall_thresholds_table = static_cast<mv2_alltoall_tuning_table**>(
- xbt_malloc(sizeof(mv2_alltoall_tuning_table*) * mv2_alltoall_num_ppn_conf));
- table_ptrs = static_cast<mv2_alltoall_tuning_table**>(
- xbt_malloc(sizeof(mv2_alltoall_tuning_table*) * mv2_alltoall_num_ppn_conf));
- mv2_size_alltoall_tuning_table = static_cast<int*>(xbt_malloc(sizeof(int) * mv2_alltoall_num_ppn_conf));
- mv2_alltoall_table_ppn_conf = static_cast<int*>(xbt_malloc(mv2_alltoall_num_ppn_conf * sizeof(int)));
+ mv2_alltoall_thresholds_table = new mv2_alltoall_tuning_table*[mv2_alltoall_num_ppn_conf];
+ table_ptrs = new mv2_alltoall_tuning_table*[mv2_alltoall_num_ppn_conf];
+ mv2_size_alltoall_tuning_table = new int[mv2_alltoall_num_ppn_conf];
+ mv2_alltoall_table_ppn_conf = new int[mv2_alltoall_num_ppn_conf];
mv2_alltoall_table_ppn_conf[0] = 1;
mv2_size_alltoall_tuning_table[0] = 6;
mv2_alltoall_tuning_table mv2_tmp_alltoall_thresholds_table_1ppn[] = {
};
table_ptrs[2] = mv2_tmp_alltoall_thresholds_table_16ppn;
agg_table_sum = 0;
- for (i = 0; i < mv2_alltoall_num_ppn_conf; i++) {
+ for (int i = 0; i < mv2_alltoall_num_ppn_conf; i++) {
agg_table_sum += mv2_size_alltoall_tuning_table[i];
}
- mv2_alltoall_thresholds_table[0] =
- static_cast<mv2_alltoall_tuning_table*>(xbt_malloc(agg_table_sum * sizeof(mv2_alltoall_tuning_table)));
- memcpy(mv2_alltoall_thresholds_table[0], table_ptrs[0],
- (sizeof(mv2_alltoall_tuning_table) * mv2_size_alltoall_tuning_table[0]));
- for (i = 1; i < mv2_alltoall_num_ppn_conf; i++) {
+ mv2_alltoall_thresholds_table[0] = new mv2_alltoall_tuning_table[agg_table_sum];
+ std::copy_n(table_ptrs[0], mv2_size_alltoall_tuning_table[0], mv2_alltoall_thresholds_table[0]);
+ for (int i = 1; i < mv2_alltoall_num_ppn_conf; i++) {
mv2_alltoall_thresholds_table[i] = mv2_alltoall_thresholds_table[i - 1] + mv2_size_alltoall_tuning_table[i - 1];
- memcpy(mv2_alltoall_thresholds_table[i], table_ptrs[i],
- (sizeof(mv2_alltoall_tuning_table) * mv2_size_alltoall_tuning_table[i]));
+ std::copy_n(table_ptrs[i], mv2_size_alltoall_tuning_table[i], mv2_alltoall_thresholds_table[i]);
}
- xbt_free(table_ptrs);
+ delete[] table_ptrs;
}
/************ Allgather variables and initializers */
static void init_mv2_allgather_tables_stampede()
{
- int i;
int agg_table_sum = 0;
if (simgrid::smpi::Colls::smpi_coll_cleanup_callback == NULL)
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_allgather_tuning_table** table_ptrs = NULL;
mv2_allgather_num_ppn_conf = 3;
- mv2_allgather_thresholds_table = static_cast<mv2_allgather_tuning_table**>(
- xbt_malloc(sizeof(mv2_allgather_tuning_table*) * mv2_allgather_num_ppn_conf));
- table_ptrs = static_cast<mv2_allgather_tuning_table**>(
- xbt_malloc(sizeof(mv2_allgather_tuning_table*) * mv2_allgather_num_ppn_conf));
- mv2_size_allgather_tuning_table = static_cast<int*>(xbt_malloc(sizeof(int) * mv2_allgather_num_ppn_conf));
- mv2_allgather_table_ppn_conf = static_cast<int*>(xbt_malloc(mv2_allgather_num_ppn_conf * sizeof(int)));
+ mv2_allgather_thresholds_table = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf];
+ table_ptrs = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf];
+ mv2_size_allgather_tuning_table = new int[mv2_allgather_num_ppn_conf];
+ mv2_allgather_table_ppn_conf = new int[mv2_allgather_num_ppn_conf];
mv2_allgather_table_ppn_conf[0] = 1;
mv2_size_allgather_tuning_table[0] = 6;
mv2_allgather_tuning_table mv2_tmp_allgather_thresholds_table_1ppn[] = {
};
table_ptrs[2] = mv2_tmp_allgather_thresholds_table_16ppn;
agg_table_sum = 0;
- for (i = 0; i < mv2_allgather_num_ppn_conf; i++) {
+ for (int i = 0; i < mv2_allgather_num_ppn_conf; i++) {
agg_table_sum += mv2_size_allgather_tuning_table[i];
}
- mv2_allgather_thresholds_table[0] =
- static_cast<mv2_allgather_tuning_table*>(xbt_malloc(agg_table_sum * sizeof(mv2_allgather_tuning_table)));
- memcpy(mv2_allgather_thresholds_table[0], table_ptrs[0],
- (sizeof(mv2_allgather_tuning_table) * mv2_size_allgather_tuning_table[0]));
- for (i = 1; i < mv2_allgather_num_ppn_conf; i++) {
+ mv2_allgather_thresholds_table[0] = new mv2_allgather_tuning_table[agg_table_sum];
+ std::copy_n(table_ptrs[0], mv2_size_allgather_tuning_table[0], mv2_allgather_thresholds_table[0]);
+ for (int i = 1; i < mv2_allgather_num_ppn_conf; i++) {
mv2_allgather_thresholds_table[i] = mv2_allgather_thresholds_table[i - 1] + mv2_size_allgather_tuning_table[i - 1];
- memcpy(mv2_allgather_thresholds_table[i], table_ptrs[i],
- (sizeof(mv2_allgather_tuning_table) * mv2_size_allgather_tuning_table[i]));
+ std::copy_n(table_ptrs[i], mv2_size_allgather_tuning_table[i], mv2_allgather_thresholds_table[i]);
}
- xbt_free(table_ptrs);
+ delete[] table_ptrs;
}
/************ Gather variables and initializers */
if (simgrid::smpi::Colls::smpi_coll_cleanup_callback == NULL)
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_gather_tuning_table = 7;
- mv2_gather_thresholds_table =
- static_cast<mv2_gather_tuning_table*>(xbt_malloc(mv2_size_gather_tuning_table * sizeof(mv2_gather_tuning_table)));
+ mv2_gather_thresholds_table = new mv2_gather_tuning_table[mv2_size_gather_tuning_table];
mv2_gather_tuning_table mv2_tmp_gather_thresholds_table[] = {
{16,
2,
{{0, -1, &MPIR_Gather_intra}}},
};
- memcpy(mv2_gather_thresholds_table, mv2_tmp_gather_thresholds_table,
- mv2_size_gather_tuning_table * sizeof(mv2_gather_tuning_table));
+ std::copy_n(mv2_tmp_gather_thresholds_table, mv2_size_gather_tuning_table, mv2_gather_thresholds_table);
}
/************ Allgatherv variables and initializers */
if (simgrid::smpi::Colls::smpi_coll_cleanup_callback == NULL)
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_allgatherv_tuning_table = 6;
- mv2_allgatherv_thresholds_table = static_cast<mv2_allgatherv_tuning_table*>(
- xbt_malloc(mv2_size_allgatherv_tuning_table * sizeof(mv2_allgatherv_tuning_table)));
+ mv2_allgatherv_thresholds_table = new mv2_allgatherv_tuning_table[mv2_size_allgatherv_tuning_table];
mv2_allgatherv_tuning_table mv2_tmp_allgatherv_thresholds_table[] = {
{
16,
},
};
- memcpy(mv2_allgatherv_thresholds_table, mv2_tmp_allgatherv_thresholds_table,
- mv2_size_allgatherv_tuning_table * sizeof(mv2_allgatherv_tuning_table));
+ std::copy_n(mv2_tmp_allgatherv_thresholds_table, mv2_size_allgatherv_tuning_table, mv2_allgatherv_thresholds_table);
}
/************ Allreduce variables and initializers */
if (simgrid::smpi::Colls::smpi_coll_cleanup_callback == NULL)
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_allreduce_tuning_table = 8;
- mv2_allreduce_thresholds_table = static_cast<mv2_allreduce_tuning_table*>(
- xbt_malloc(mv2_size_allreduce_tuning_table * sizeof(mv2_allreduce_tuning_table)));
+ mv2_allreduce_thresholds_table = new mv2_allreduce_tuning_table[mv2_size_allreduce_tuning_table];
mv2_allreduce_tuning_table mv2_tmp_allreduce_thresholds_table[] = {
{
16,
},
};
- memcpy(mv2_allreduce_thresholds_table, mv2_tmp_allreduce_thresholds_table,
- mv2_size_allreduce_tuning_table * sizeof(mv2_allreduce_tuning_table));
+ std::copy_n(mv2_tmp_allreduce_thresholds_table, mv2_size_allreduce_tuning_table, mv2_allreduce_thresholds_table);
}
struct mv2_bcast_tuning_element {
if (simgrid::smpi::Colls::smpi_coll_cleanup_callback == NULL)
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_bcast_tuning_table = 8;
- mv2_bcast_thresholds_table =
- static_cast<mv2_bcast_tuning_table*>(xbt_malloc(mv2_size_bcast_tuning_table * sizeof(mv2_bcast_tuning_table)));
+ mv2_bcast_thresholds_table = new mv2_bcast_tuning_table[mv2_size_bcast_tuning_table];
mv2_bcast_tuning_table mv2_tmp_bcast_thresholds_table[] = {
{16,
{32768, 524288, &MPIR_Shmem_Bcast_MV2, -1},
{524288, -1, &MPIR_Shmem_Bcast_MV2, -1}}}};
- memcpy(mv2_bcast_thresholds_table, mv2_tmp_bcast_thresholds_table,
- mv2_size_bcast_tuning_table * sizeof(mv2_bcast_tuning_table));
+ std::copy_n(mv2_tmp_bcast_thresholds_table, mv2_size_bcast_tuning_table, mv2_bcast_thresholds_table);
}
/************ Reduce variables and initializers */
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
/*Stampede*/
mv2_size_reduce_tuning_table = 8;
- mv2_reduce_thresholds_table =
- static_cast<mv2_reduce_tuning_table*>(xbt_malloc(mv2_size_reduce_tuning_table * sizeof(mv2_reduce_tuning_table)));
+ mv2_reduce_thresholds_table = new mv2_reduce_tuning_table[mv2_size_reduce_tuning_table];
mv2_reduce_tuning_table mv2_tmp_reduce_thresholds_table[] = {
{
16,
},
};
- memcpy(mv2_reduce_thresholds_table, mv2_tmp_reduce_thresholds_table,
- mv2_size_reduce_tuning_table * sizeof(mv2_reduce_tuning_table));
+ std::copy_n(mv2_tmp_reduce_thresholds_table, mv2_size_reduce_tuning_table, mv2_reduce_thresholds_table);
}
/************ Reduce scatter variables and initializers */
if (simgrid::smpi::Colls::smpi_coll_cleanup_callback == NULL)
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_red_scat_tuning_table = 6;
- mv2_red_scat_thresholds_table = static_cast<mv2_red_scat_tuning_table*>(
- xbt_malloc(mv2_size_red_scat_tuning_table * sizeof(mv2_red_scat_tuning_table)));
+ mv2_red_scat_thresholds_table = new mv2_red_scat_tuning_table[mv2_size_red_scat_tuning_table];
mv2_red_scat_tuning_table mv2_tmp_red_scat_thresholds_table[] = {
{
16,
},
};
- memcpy(mv2_red_scat_thresholds_table, mv2_tmp_red_scat_thresholds_table,
- mv2_size_red_scat_tuning_table * sizeof(mv2_red_scat_tuning_table));
+ std::copy_n(mv2_tmp_red_scat_thresholds_table, mv2_size_red_scat_tuning_table, mv2_red_scat_thresholds_table);
}
/************ Scatter variables and initializers */
simgrid::smpi::Colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
int agg_table_sum = 0;
- int i;
mv2_scatter_tuning_table** table_ptrs = NULL;
mv2_scatter_num_ppn_conf = 3;
- mv2_scatter_thresholds_table =
- static_cast<mv2_scatter_tuning_table**>(xbt_malloc(sizeof(mv2_scatter_tuning_table*) * mv2_scatter_num_ppn_conf));
- table_ptrs =
- static_cast<mv2_scatter_tuning_table**>(xbt_malloc(sizeof(mv2_scatter_tuning_table*) * mv2_scatter_num_ppn_conf));
- mv2_size_scatter_tuning_table = static_cast<int*>(xbt_malloc(sizeof(int) * mv2_scatter_num_ppn_conf));
- mv2_scatter_table_ppn_conf = static_cast<int*>(xbt_malloc(mv2_scatter_num_ppn_conf * sizeof(int)));
+ mv2_scatter_thresholds_table = new mv2_scatter_tuning_table*[mv2_scatter_num_ppn_conf];
+ table_ptrs = new mv2_scatter_tuning_table*[mv2_scatter_num_ppn_conf];
+ mv2_size_scatter_tuning_table = new int[mv2_scatter_num_ppn_conf];
+ mv2_scatter_table_ppn_conf = new int[mv2_scatter_num_ppn_conf];
mv2_scatter_table_ppn_conf[0] = 1;
mv2_size_scatter_tuning_table[0] = 6;
mv2_scatter_tuning_table mv2_tmp_scatter_thresholds_table_1ppn[] = {
};
table_ptrs[2] = mv2_tmp_scatter_thresholds_table_16ppn;
agg_table_sum = 0;
- for (i = 0; i < mv2_scatter_num_ppn_conf; i++) {
+ for (int i = 0; i < mv2_scatter_num_ppn_conf; i++) {
agg_table_sum += mv2_size_scatter_tuning_table[i];
}
- mv2_scatter_thresholds_table[0] =
- static_cast<mv2_scatter_tuning_table*>(xbt_malloc(agg_table_sum * sizeof(mv2_scatter_tuning_table)));
- memcpy(mv2_scatter_thresholds_table[0], table_ptrs[0],
- (sizeof(mv2_scatter_tuning_table) * mv2_size_scatter_tuning_table[0]));
- for (i = 1; i < mv2_scatter_num_ppn_conf; i++) {
+ mv2_scatter_thresholds_table[0] = new mv2_scatter_tuning_table[agg_table_sum];
+ std::copy_n(table_ptrs[0], mv2_size_scatter_tuning_table[0], mv2_scatter_thresholds_table[0]);
+ for (int i = 1; i < mv2_scatter_num_ppn_conf; i++) {
mv2_scatter_thresholds_table[i] = mv2_scatter_thresholds_table[i - 1] + mv2_size_scatter_tuning_table[i - 1];
- memcpy(mv2_scatter_thresholds_table[i], table_ptrs[i],
- (sizeof(mv2_scatter_tuning_table) * mv2_size_scatter_tuning_table[i]));
+ std::copy_n(table_ptrs[i], mv2_size_scatter_tuning_table[i], mv2_scatter_thresholds_table[i]);
}
- xbt_free(table_ptrs);
+ delete[] table_ptrs;
}
+
+#endif
#define SMPI_GROUP_HPP_INCLUDED
#include "smpi_f2c.hpp"
+#include <smpi/smpi.h>
namespace simgrid{
namespace smpi{
#define SMPI_KEYVALS_HPP_INCLUDED
#include "smpi/smpi.h"
-#include "xbt/ex.hpp"
#include <unordered_map>
template <typename T> int Keyval::keyval_create(smpi_copy_fn copy_fn, smpi_delete_fn delete_fn, int* keyval, void* extra_state){
- smpi_key_elem value = (smpi_key_elem)xbt_new0(s_smpi_key_elem_t, 1);
+ smpi_key_elem value = new s_smpi_key_elem_t;
value->copy_fn=copy_fn;
value->delete_fn=delete_fn;
}
if(elem->refcount==1){
T::keyvals_.erase(*keyval);
- xbt_free(elem);
+ delete elem;
}else{
elem->refcount--;
}
return;
}
- smpi_privatization_regions = static_cast<smpi_privatization_region_t>(
- xbt_malloc(smpi_process_count() * sizeof(s_smpi_privatization_region_t)));
+ smpi_privatization_regions = new s_smpi_privatization_region_t[smpi_process_count()];
for (int i=0; i< smpi_process_count(); i++){
// create SIMIX_process_count() mappings of this size with the same data inside
XBT_WARN("Unmapping of fd %d failed: %s", smpi_privatization_regions[i].file_descriptor, strerror(errno));
close(smpi_privatization_regions[i].file_descriptor);
}
- xbt_free(smpi_privatization_regions);
+ delete[] smpi_privatization_regions;
#endif
}
class smpi_source_location : public std::string {
public:
+ smpi_source_location() = default;
smpi_source_location(const char* filename, int line) : std::string(std::string(filename) + ":" + std::to_string(line))
{
}
xbt_free(huge_page_filename);
}
if(smpi_shared_malloc_bogusfile == -1) {
- char *name = xbt_strdup("/tmp/simgrid-shmalloc-XXXXXX");
+ char name[] = "/tmp/simgrid-shmalloc-XXXXXX";
smpi_shared_malloc_bogusfile = mkstemp(name);
XBT_DEBUG("bogusfile : %s\n", name);
unlink(name);
- xbt_free(name);
- char* dumb = (char*)calloc(1, smpi_shared_malloc_blocksize);
+ char* dumb = new char[smpi_shared_malloc_blocksize](); // zero initialized
ssize_t err = write(smpi_shared_malloc_bogusfile, dumb, smpi_shared_malloc_blocksize);
if(err<0)
xbt_die("Could not write bogus file for shared malloc");
- xbt_free(dumb);
+ delete[] dumb;
}
int mmap_base_flag = MAP_FIXED | MAP_SHARED | MAP_POPULATE;
shared_metadata_t newmeta;
//register metadata for memcpy avoidance
- shared_data_key_type* data = (shared_data_key_type*)xbt_malloc(sizeof(shared_data_key_type));
+ shared_data_key_type* data = new shared_data_key_type;
data->second.fd = -1;
data->second.count = 1;
newmeta.size = size;
size_t shared_block_offsets[2] = {0, size};
return smpi_shared_malloc_partial(size, shared_block_offsets, nb_shared_blocks);
}
- XBT_DEBUG("Classic malloc %zu", size);
- return xbt_malloc(size);
+ XBT_DEBUG("Classic allocation of %zu bytes", size);
+ return ::operator new(size);
}
int smpi_is_shared(void* ptr, std::vector<std::pair<size_t, size_t>> &private_blocks, size_t *offset){
if (meta != allocs_metadata.end()){
meta->second.data->second.count--;
if(meta->second.data->second.count==0)
- xbt_free(meta->second.data);
+ delete meta->second.data;
}
munmap(ptr, meta->second.size);
} else {
- XBT_DEBUG("Classic free of %p", ptr);
- xbt_free(ptr);
+ XBT_DEBUG("Classic deallocation of %p", ptr);
+ ::operator delete(ptr);
}
}
#endif
#include "smpi_datatype_derived.hpp"
#include "smpi_op.hpp"
+#include <xbt/log.h>
XBT_LOG_EXTERNAL_CATEGORY(smpi_datatype);
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
-#include "smpi_comm.hpp"
#include "smpi_group.hpp"
+#include "smpi_comm.hpp"
+#include <xbt/log.h>
XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_group, smpi, "Logging specific to SMPI (group)");
int oldsize = size_;
int newsize = oldsize - n;
*newgroup = new Group(newsize);
- int* to_exclude=xbt_new0(int, size_);
+ int* to_exclude = new int[size_];
for (int i = 0; i < oldsize; i++)
to_exclude[i]=0;
for (int i = 0; i < n; i++)
j++;
}
}
- xbt_free(to_exclude);
+ delete[] to_exclude;
return MPI_SUCCESS;
}