sg_host_route_latency(h2, h1) + comm_amount21 / sg_host_route_bandwidth(h2, h1));
/* creation of the tasks and their dependencies */
- SD_task_t taskA = SD_task_create("Task A", NULL, 10.0);
- SD_task_t taskB = SD_task_create("Task B", NULL, 40.0);
- SD_task_t taskC = SD_task_create("Task C", NULL, 30.0);
- SD_task_t taskD = SD_task_create("Task D", NULL, 60.0);
+ SD_task_t taskA = SD_task_create("Task A", nullptr, 10.0);
+ SD_task_t taskB = SD_task_create("Task B", nullptr, 40.0);
+ SD_task_t taskC = SD_task_create("Task C", nullptr, 30.0);
+ SD_task_t taskD = SD_task_create("Task D", nullptr, 60.0);
/* try to attach and retrieve user data to a task */
SD_task_set_data(taskA, static_cast<void*>(&comp_amount1));
if (argc == 4) {
XBT_INFO("Hi, I'm going to join the network with id %u", node.getId());
- unsigned int known_id = static_cast<unsigned int>(strtoul(argv[2], NULL, 0));
+ unsigned int known_id = static_cast<unsigned int>(strtoul(argv[2], nullptr, 0));
join_success = node.join(known_id);
deadline = std::stod(argv[3]);
} else {
/* Specify which code should be executed by maestro on another thread, once this current thread is affected to an
* actor by the subsequent sg_actor_attach(). This must be done before the creation of the engine. */
- SIMIX_set_maestro(maestro, NULL);
+ SIMIX_set_maestro(maestro, nullptr);
simgrid::s4u::Engine e(&argc, argv);
std::string full_name = simgrid::s4u::this_actor::get_name() + ":" + file_name;
ACT_DEBUG("Entering Open: %s (filename: %s)", NAME.c_str(), file_name.c_str());
- auto* file = new simgrid::s4u::File(file_name, NULL);
+ auto* file = new simgrid::s4u::File(file_name, nullptr);
opened_files.insert({full_name, file});
args.emplace_back("java");
for (int index = 1; index < argc; index++) {
jstring jval = (jstring)env->GetObjectArrayElement(jargs, index - 1);
- const char* tmp = env->GetStringUTFChars(jval, 0);
+ const char* tmp = env->GetStringUTFChars(jval, nullptr);
args.emplace_back(tmp);
env->ReleaseStringUTFChars(jval, tmp);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_createEnvironment(JNIEnv* env, jclass, jstring jplatformFile)
{
- const char *platformFile = env->GetStringUTFChars(jplatformFile, 0);
+ const char* platformFile = env->GetStringUTFChars(jplatformFile, nullptr);
simgrid_load_platform(platformFile);
JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_debug(JNIEnv* env, jclass, jstring js)
{
- const char *s = env->GetStringUTFChars(js, 0);
+ const char* s = env->GetStringUTFChars(js, nullptr);
XBT_DEBUG("%s", s);
env->ReleaseStringUTFChars(js, s);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_verb(JNIEnv* env, jclass, jstring js)
{
- const char *s = env->GetStringUTFChars(js, 0);
+ const char* s = env->GetStringUTFChars(js, nullptr);
XBT_VERB("%s", s);
env->ReleaseStringUTFChars(js, s);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_info(JNIEnv* env, jclass, jstring js)
{
- const char *s = env->GetStringUTFChars(js, 0);
+ const char* s = env->GetStringUTFChars(js, nullptr);
XBT_INFO("%s", s);
env->ReleaseStringUTFChars(js, s);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_warn(JNIEnv* env, jclass, jstring js)
{
- const char *s = env->GetStringUTFChars(js, 0);
+ const char* s = env->GetStringUTFChars(js, nullptr);
XBT_WARN("%s", s);
env->ReleaseStringUTFChars(js, s);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_error(JNIEnv* env, jclass, jstring js)
{
- const char *s = env->GetStringUTFChars(js, 0);
+ const char* s = env->GetStringUTFChars(js, nullptr);
XBT_ERROR("%s", s);
env->ReleaseStringUTFChars(js, s);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_critical(JNIEnv* env, jclass, jstring js)
{
- const char *s = env->GetStringUTFChars(js, 0);
+ const char* s = env->GetStringUTFChars(js, nullptr);
XBT_CRITICAL("%s", s);
env->ReleaseStringUTFChars(js, s);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Msg_deployApplication(JNIEnv* env, jclass, jstring jdeploymentFile)
{
- const char *deploymentFile = env->GetStringUTFChars(jdeploymentFile, 0);
+ const char* deploymentFile = env->GetStringUTFChars(jdeploymentFile, nullptr);
simgrid_register_default(java_main);
simgrid_load_deployment(deploymentFile);
jxbt_throw_notbound(env, "as", jas);
return nullptr;
}
- const char *name = env->GetStringUTFChars(static_cast<jstring>(jname), 0);
+ const char* name = env->GetStringUTFChars(static_cast<jstring>(jname), nullptr);
const char* property = sg_zone_get_property_value(as, name);
if (not property) {
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_File_open(JNIEnv *env, jobject jfile, jobject jpath) {
- const char *path = env->GetStringUTFChars((jstring) jpath, 0);
+ const char* path = env->GetStringUTFChars((jstring)jpath, nullptr);
sg_file_t file = sg_file_open(path, nullptr);
jfile_bind(env, jfile, file);
jxbt_throw_null(env, "No host can have a null name");
return nullptr;
}
- const char *name = env->GetStringUTFChars(jname, 0);
+ const char* name = env->GetStringUTFChars(jname, nullptr);
/* get the host by name (the hosts are created during the grid resolution) */
sg_host_t host = sg_host_by_name(name);
jxbt_throw_notbound(env, "host", jhost);
return nullptr;
}
- const char *name = env->GetStringUTFChars((jstring) jname, 0);
+ const char* name = env->GetStringUTFChars((jstring)jname, nullptr);
const char* property = sg_host_get_property_value(host, name);
if (not property) {
jxbt_throw_notbound(env, "host", jhost);
return;
}
- const char *name = env->GetStringUTFChars((jstring) jname, 0);
- const char *value_java = env->GetStringUTFChars((jstring) jvalue, 0);
+ const char* name = env->GetStringUTFChars((jstring)jname, nullptr);
+ const char* value_java = env->GetStringUTFChars((jstring)jvalue, nullptr);
const char* value = xbt_strdup(value_java);
sg_host_set_property_value(host, name, value);
if (not host) {
jxbt_throw_notbound(env, "host", jhost);
- return 0;
+ return nullptr;
}
int index = 0;
if (not host) {
jxbt_throw_notbound(env, "host", jhost);
- return 0;
+ return nullptr;
}
jobjectArray jtable;
if (not host) {
jxbt_throw_notbound(env, "host", jhost);
- return 0;
+ return nullptr;
}
return (jobjectArray)sg_host_get_storage_content(host);
}
JNIEXPORT void JNICALL Java_org_simgrid_msg_Host_setAsyncMailbox(JNIEnv * env, jclass cls_arg, jobject jname)
{
- const char *name = env->GetStringUTFChars((jstring) jname, 0);
+ const char* name = env->GetStringUTFChars((jstring)jname, nullptr);
sg_mailbox_set_receiver(name);
env->ReleaseStringUTFChars((jstring) jname, name);
}
/* Actually build the MSG process */
jstring jname = (jstring)env->GetObjectField(jprocess, jprocess_field_Process_name);
- const char* name = env->GetStringUTFChars(jname, 0);
+ const char* name = env->GetStringUTFChars(jname, nullptr);
auto actor_code = [jprocess]() { simgrid::kernel::context::java_main_jprocess(jprocess); };
smx_actor_t self = SIMIX_process_self();
sg_host_t host = jhost_get_native(env, jhost);
jxbt_throw_notbound(env, "process", jprocess);
return nullptr;
}
- const char *name = env->GetStringUTFChars((jstring)jname, 0);
+ const char* name = env->GetStringUTFChars((jstring)jname, nullptr);
const char* property = actor->get_property(name);
if (not property)
jxbt_throw_null(env, "No host can have a null name");
return nullptr;
}
- const char *name = env->GetStringUTFChars(jname, 0);
+ const char* name = env->GetStringUTFChars(jname, nullptr);
storage = sg_storage_get_by_name(name);
if (not storage) { /* invalid name */
jxbt_throw_notbound(env, "storage", jstorage);
return nullptr;
}
- const char *name = env->GetStringUTFChars((jstring) jname, 0);
+ const char* name = env->GetStringUTFChars((jstring)jname, nullptr);
const char* property = sg_storage_get_property_value(storage, name);
if (not property) {
jxbt_throw_notbound(env, "storage", jstorage);
return;
}
- const char *name = env->GetStringUTFChars((jstring) jname, 0);
- const char *value_java = env->GetStringUTFChars((jstring) jvalue, 0);
+ const char* name = env->GetStringUTFChars((jstring)jname, nullptr);
+ const char* value_java = env->GetStringUTFChars((jstring)jvalue, nullptr);
storage->set_property(name, std::string(value_java));
const char *name = nullptr; /* the name of the task */
if (jname)
- name = env->GetStringUTFChars(jname, 0);
+ name = env->GetStringUTFChars(jname, nullptr);
msg_task_t task = MSG_task_create(name, jflopsAmount, jbytesAmount, jtask);
if (jname)
env->ReleaseStringUTFChars(jname, name);
{
int host_count = env->GetArrayLength(jhosts);
- jdouble* jcomputeDurations = env->GetDoubleArrayElements(jcomputeDurations_arg, 0);
+ jdouble* jcomputeDurations = env->GetDoubleArrayElements(jcomputeDurations_arg, nullptr);
msg_host_t* hosts = new msg_host_t[host_count];
double* computeDurations = new double[host_count];
for (int index = 0; index < host_count; index++) {
}
env->ReleaseDoubleArrayElements(jcomputeDurations_arg, jcomputeDurations, 0);
- jdouble* jmessageSizes = env->GetDoubleArrayElements(jmessageSizes_arg, 0);
+ jdouble* jmessageSizes = env->GetDoubleArrayElements(jmessageSizes_arg, nullptr);
double* messageSizes = new double[host_count * host_count];
for (int index = 0; index < host_count * host_count; index++) {
messageSizes[index] = jmessageSizes[index];
env->ReleaseDoubleArrayElements(jmessageSizes_arg, jmessageSizes, 0);
/* get the C string from the java string */
- const char* name = env->GetStringUTFChars(jname, 0);
+ const char* name = env->GetStringUTFChars(jname, nullptr);
msg_task_t task = MSG_parallel_task_create(name, host_count, hosts, computeDurations, messageSizes, jtask);
env->ReleaseStringUTFChars(jname, name);
jxbt_throw_notbound(env, "task", jtask);
return;
}
- const char *name = env->GetStringUTFChars((jstring) jname, 0);
+ const char* name = env->GetStringUTFChars((jstring)jname, nullptr);
env->SetObjectField(jtask, jtask_field_Task_name, jname);
MSG_task_set_name(task, name);
/* Add a global ref into the Ctask so that the receiver can use it */
MSG_task_set_data(task, (void *) env->NewGlobalRef(jtask));
- const char* alias = env->GetStringUTFChars(jalias, 0);
+ const char* alias = env->GetStringUTFChars(jalias, nullptr);
msg_error_t res = MSG_task_send_with_timeout_bounded(task, alias, jtimeout, maxrate);
env->ReleaseStringUTFChars(jalias, alias);
{
msg_task_t task = nullptr;
- const char *alias = env->GetStringUTFChars(jalias, 0);
+ const char* alias = env->GetStringUTFChars(jalias, nullptr);
msg_error_t rv;
if (not simgrid::ForcefulKillException::try_n_catch(
[&rv, &task, &alias, &jtimeout]() { rv = MSG_task_receive_with_timeout(&task, alias, (double)jtimeout); })) {
return nullptr;
}
- const char* mailbox = env->GetStringUTFChars(jmailbox, 0);
+ const char* mailbox = env->GetStringUTFChars(jmailbox, nullptr);
msg_comm_t comm = MSG_task_irecv(task, mailbox);
env->ReleaseStringUTFChars(jmailbox, mailbox);
{
msg_task_t task = nullptr;
- const char *alias = env->GetStringUTFChars(jalias, 0);
+ const char* alias = env->GetStringUTFChars(jalias, nullptr);
msg_error_t res = MSG_task_receive_with_timeout_bounded(&task, alias, jtimeout, rate);
if (env->ExceptionOccurred())
return nullptr;
return nullptr;
}
- const char* mailbox = env->GetStringUTFChars(jmailbox, 0);
+ const char* mailbox = env->GetStringUTFChars(jmailbox, nullptr);
msg_comm_t comm = MSG_task_irecv_bounded(task, mailbox, rate);
env->ReleaseStringUTFChars(jmailbox, mailbox);
return nullptr;
jobject jcomm = env->NewObject(comm_class, jtask_method_Comm_constructor);
- const char* mailbox = env->GetStringUTFChars(jmailbox, 0);
+ const char* mailbox = env->GetStringUTFChars(jmailbox, nullptr);
msg_task_t task = jtask_to_native(jtask, env);
return nullptr;
jcomm = env->NewObject(comm_class, jtask_method_Comm_constructor);
- mailbox = env->GetStringUTFChars(jmailbox, 0);
+ mailbox = env->GetStringUTFChars(jmailbox, nullptr);
task = jtask_to_native(jtask, env);
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_dsend(JNIEnv * env, jobject jtask, jstring jalias)
{
- const char *alias = env->GetStringUTFChars(jalias, 0);
+ const char* alias = env->GetStringUTFChars(jalias, nullptr);
msg_task_t task = jtask_to_native(jtask, env);
JNIEXPORT void JNICALL Java_org_simgrid_msg_Task_dsendBounded(JNIEnv * env, jobject jtask, jstring jalias,
jdouble maxrate)
{
- const char *alias = env->GetStringUTFChars(jalias, 0);
+ const char* alias = env->GetStringUTFChars(jalias, nullptr);
msg_task_t task = jtask_to_native(jtask, env);
JNIEXPORT jboolean JNICALL Java_org_simgrid_msg_Task_listen(JNIEnv * env, jclass cls, jstring jalias)
{
- const char *alias = env->GetStringUTFChars(jalias, 0);
+ const char* alias = env->GetStringUTFChars(jalias, nullptr);
int rv = MSG_task_listen(alias);
env->ReleaseStringUTFChars(jalias, alias);
JNIEXPORT jint JNICALL Java_org_simgrid_msg_Task_listenFrom(JNIEnv * env, jclass cls, jstring jalias)
{
- const char *alias = env->GetStringUTFChars(jalias, 0);
+ const char* alias = env->GetStringUTFChars(jalias, nullptr);
int rv = MSG_task_listen_from(alias);
env->ReleaseStringUTFChars(jalias, alias);
{
sg_host_t host = jhost_get_native(env, jHost);
- const char* name = env->GetStringUTFChars(jname, 0);
+ const char* name = env->GetStringUTFChars(jname, nullptr);
sg_vm_t vm = sg_vm_create_migratable(host, name, static_cast<int>(coreAmount), static_cast<int>(jramsize),
static_cast<int>(jmig_netspeed), static_cast<int>(jdp_intensity));
env->ReleaseStringUTFChars(jname, name);
jxbt_throw_null(env, "No VM can have a null name");
return nullptr;
}
- const char* name = env->GetStringUTFChars(jname, 0);
+ const char* name = env->GetStringUTFChars(jname, nullptr);
/* get the VM by name (VMs are just special hosts, unfortunately) */
auto const* host = sg_host_by_name(name);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostStateDeclare(JNIEnv * env, jclass cls, jstring js)
{
- const char *s = env->GetStringUTFChars(js, 0);
+ const char* s = env->GetStringUTFChars(js, nullptr);
TRACE_host_state_declare(s);
env->ReleaseStringUTFChars(js, s);
}
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostStateDeclareValue (JNIEnv *env, jclass cls, jstring js_state,
jstring js_value, jstring js_color)
{
- const char *state = env->GetStringUTFChars(js_state, 0);
- const char *value = env->GetStringUTFChars(js_value, 0);
- const char *color = env->GetStringUTFChars(js_color, 0);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
+ const char* value = env->GetStringUTFChars(js_value, nullptr);
+ const char* color = env->GetStringUTFChars(js_color, nullptr);
TRACE_host_state_declare_value(state, value, color);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostSetState (JNIEnv *env, jclass cls, jstring js_host,
jstring js_state, jstring js_value)
{
- const char *host = env->GetStringUTFChars(js_host, 0);
- const char *state = env->GetStringUTFChars(js_state, 0);
- const char *value = env->GetStringUTFChars(js_value, 0);
+ const char* host = env->GetStringUTFChars(js_host, nullptr);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
+ const char* value = env->GetStringUTFChars(js_value, nullptr);
TRACE_host_set_state(host, state, value);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostPushState (JNIEnv *env, jclass cls, jstring js_host,
jstring js_state, jstring js_value)
{
- const char *host = env->GetStringUTFChars(js_host, 0);
- const char *state = env->GetStringUTFChars(js_state, 0);
- const char *value = env->GetStringUTFChars(js_value, 0);
+ const char* host = env->GetStringUTFChars(js_host, nullptr);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
+ const char* value = env->GetStringUTFChars(js_value, nullptr);
TRACE_host_push_state(host, state, value);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostPopState (JNIEnv *env, jclass cls, jstring js_host,
jstring js_state)
{
- const char *host = env->GetStringUTFChars(js_host, 0);
- const char *state = env->GetStringUTFChars(js_state, 0);
+ const char* host = env->GetStringUTFChars(js_host, nullptr);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
TRACE_host_pop_state(host, state);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostVariableDeclare (JNIEnv *env, jclass cls, jstring js_state)
{
- const char *state = env->GetStringUTFChars(js_state, 0);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
TRACE_host_variable_declare(state);
env->ReleaseStringUTFChars(js_state, state);
}
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostVariableSet (JNIEnv *env, jclass cls, jstring js_host,
jstring js_state, jdouble value)
{
- const char *host = env->GetStringUTFChars(js_host, 0);
- const char *state = env->GetStringUTFChars(js_state, 0);
+ const char* host = env->GetStringUTFChars(js_host, nullptr);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
TRACE_host_variable_set(host, state, value);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostVariableSub (JNIEnv *env, jclass cls, jstring js_host,
jstring js_state, jdouble value)
{
- const char *host = env->GetStringUTFChars(js_host, 0);
- const char *state = env->GetStringUTFChars(js_state, 0);
+ const char* host = env->GetStringUTFChars(js_host, nullptr);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
TRACE_host_variable_sub(host, state, value);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_vmVariableDeclare (JNIEnv *env, jclass cls, jstring js_state)
{
- const char *state = env->GetStringUTFChars(js_state, 0);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
TRACE_vm_variable_declare(state);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_vmVariableSet (JNIEnv *env, jclass cls, jstring js_vm,
jstring js_state, jdouble value)
{
- const char *vm = env->GetStringUTFChars(js_vm, 0);
- const char *state = env->GetStringUTFChars(js_state, 0);
+ const char* vm = env->GetStringUTFChars(js_vm, nullptr);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
TRACE_vm_variable_set(vm, state, value);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_hostVariableAdd (JNIEnv *env, jclass cls, jstring js_host,
jstring js_state, jdouble value)
{
- const char *host = env->GetStringUTFChars(js_host, 0);
- const char *state = env->GetStringUTFChars(js_state, 0);
+ const char* host = env->GetStringUTFChars(js_host, nullptr);
+ const char* state = env->GetStringUTFChars(js_state, nullptr);
TRACE_host_variable_set(host, state, value);
}
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_linkVariableDeclare (JNIEnv *env, jclass cls, jstring jvar) {
- const char *variable = env->GetStringUTFChars(jvar, 0);
+ const char* variable = env->GetStringUTFChars(jvar, nullptr);
TRACE_link_variable_declare (variable);
env->ReleaseStringUTFChars(jvar, variable);
}
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_linkVariableDeclareWithColor (JNIEnv *env, jclass cls, jstring jvar, jstring jcolor) {
- const char *variable = env->GetStringUTFChars(jvar, 0);
- const char *color = env->GetStringUTFChars(jcolor, 0);
+ const char* variable = env->GetStringUTFChars(jvar, nullptr);
+ const char* color = env->GetStringUTFChars(jcolor, nullptr);
TRACE_link_variable_declare_with_color(variable,color);
env->ReleaseStringUTFChars(jvar, variable);
env->ReleaseStringUTFChars(jcolor, color);
}
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_linkVariableSet (JNIEnv *env, jclass cls, jstring jlink, jstring jvar, jdouble jvalue) {
- const char *link = env->GetStringUTFChars(jlink, 0);
- const char *variable = env->GetStringUTFChars(jvar, 0);
+ const char* link = env->GetStringUTFChars(jlink, nullptr);
+ const char* variable = env->GetStringUTFChars(jvar, nullptr);
TRACE_link_variable_set(link, variable, jvalue);
env->ReleaseStringUTFChars(jlink, link);
env->ReleaseStringUTFChars(jvar, variable);
JNIEXPORT void JNICALL Java_org_simgrid_trace_Trace_linkSrcDstVariableSet
(JNIEnv *env, jclass cls, jstring jsrc, jstring jdst, jstring jvar, jdouble jval)
{
- const char *src = env->GetStringUTFChars(jsrc, 0);
- const char *dst = env->GetStringUTFChars(jdst, 0);
- const char *variable = env->GetStringUTFChars(jvar, 0);
+ const char* src = env->GetStringUTFChars(jsrc, nullptr);
+ const char* dst = env->GetStringUTFChars(jdst, nullptr);
+ const char* variable = env->GetStringUTFChars(jvar, nullptr);
TRACE_link_srcdst_variable_set(src,dst,variable, jval);
env->ReleaseStringUTFChars(jsrc, src);
env->ReleaseStringUTFChars(jdst, dst);
jmethodID id;
if (not cls)
- return 0;
+ return nullptr;
id = env->GetMethodID(cls, name, signature);
if (not id) {
jmethodID tostr_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;");
jstring jclassname = (jstring) env->CallObjectMethod(cls, tostr_id, nullptr);
- const char *classname = env->GetStringUTFChars(jclassname, 0);
+ const char* classname = env->GetStringUTFChars(jclassname, nullptr);
env->ReleaseStringUTFChars(jclassname, classname);
jxbt_throw_jni(env, std::string("Cannot find method") + name + "(" + signature + ") in " + classname);
- return 0;
+ return nullptr;
}
return id;
jmethodID id;
if (not cls)
- return 0;
+ return nullptr;
id = env->GetStaticMethodID(cls, name, signature);
if (not id) {
jmethodID tostr_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;");
jstring jclassname = (jstring) env->CallObjectMethod(cls, tostr_id, nullptr);
- const char *classname = env->GetStringUTFChars(jclassname, 0);
+ const char* classname = env->GetStringUTFChars(jclassname, nullptr);
env->ReleaseStringUTFChars(jclassname, classname);
jxbt_throw_jni(env, std::string("Cannot find static method") + name + "(" + signature + ") in " + classname);
- return 0;
+ return nullptr;
}
return id;
cls = jxbt_get_class(env, classname);
if (not cls)
- return 0;
+ return nullptr;
id = env->GetStaticMethodID(cls, name, signature);
if (not id) {
jxbt_throw_jni(env, std::string("Cannot find static method") + name + "(" + signature + ") in " + classname);
- return 0;
+ return nullptr;
}
return id;
}
cls = jxbt_get_class(env, classname);
if (not cls)
- return 0;
+ return nullptr;
id = env->GetMethodID(cls, name, signature);
if (not id) {
jxbt_throw_jni(env, std::string("Cannot find method") + name + "(" + signature + ") in " + classname);
- return 0;
+ return nullptr;
}
return id;
}
jfieldID id;
if (not cls)
- return 0;
+ return nullptr;
id = env->GetFieldID(cls, name, signature);
if (not id) {
jmethodID getname_id = env->GetMethodID(cls, "getName", "()Ljava/lang/String;");
jstring jclassname = (jstring) env->CallObjectMethod(cls, getname_id, nullptr);
- const char *classname = env->GetStringUTFChars(jclassname, 0);
+ const char* classname = env->GetStringUTFChars(jclassname, nullptr);
env->ReleaseStringUTFChars(jclassname, classname);
jxbt_throw_jni(env, std::string("Cannot find field") + signature + " " + name + " in " + classname);
- return 0;
+ return nullptr;
}
return id;
jfieldID id;
if (not cls)
- return 0;
+ return nullptr;
id = env->GetFieldID(cls, name, signature);
if (not id) {
jxbt_throw_jni(env, std::string("Cannot find field") + signature + " " + name + " in " + classname);
- return 0;
+ return nullptr;
}
return id;
return 1;
}
-static const luaL_Reg host_meta[] = {
- {"__tostring", l_host_tostring},
- {0, 0}
-};
+static const luaL_Reg host_meta[] = {{"__tostring", l_host_tostring}, {nullptr, nullptr}};
/**
* @brief Registers the host functions into the table simgrid.host.
}
if (timeout < 0.0) {
- simcall->timeout_cb_ = NULL;
+ simcall->timeout_cb_ = nullptr;
} else {
simcall->timeout_cb_ = simgrid::simix::Timer::set(SIMIX_get_clock() + timeout, [simcall]() {
simcall->timeout_cb_ = nullptr;
static bool MC_dwarf_attr_flag(Dwarf_Die* die, int attribute, bool integrate)
{
Dwarf_Attribute attr;
- if ((integrate ? dwarf_attr_integrate(die, attribute, &attr) : dwarf_attr(die, attribute, &attr)) == 0)
+ if ((integrate ? dwarf_attr_integrate(die, attribute, &attr) : dwarf_attr(die, attribute, &attr)) == nullptr)
return false;
bool result;
base_ = event_base_new();
socket_event_ = event_new(base_, get_channel().get_socket(), EV_READ | EV_PERSIST, handler, this);
- event_add(socket_event_, NULL);
+ event_add(socket_event_, nullptr);
signal_event_ = event_new(base_, SIGCHLD, EV_SIGNAL | EV_PERSIST, handler, this);
- event_add(signal_event_, NULL);
+ event_add(signal_event_, nullptr);
}
void CheckerSide::dispatch()
Engine* Engine::get_instance()
{
if (Engine::instance_ == nullptr) {
- auto e = new Engine(0, nullptr);
+ auto e = new Engine(nullptr, nullptr);
xbt_assert(Engine::instance_ == e);
}
return Engine::instance_;
*flag=1;
simgrid::smpi::Status::empty(status);
return MPI_SUCCESS;
- } else if (flag==NULL || status ==NULL){
+ } else if (flag == nullptr || status == nullptr) {
return MPI_ERR_ARG;
}
return simgrid::smpi::Request::get_status(request,flag,status);
/*When data in each socket is different*/
if (not comm->is_uniform()) {
- int *node_sizes = NULL;
- int i = 0;
+ int* node_sizes = nullptr;
+ int i = 0;
- node_sizes = comm->get_non_uniform_map();
+ node_sizes = comm->get_non_uniform_map();
- int* displs = new int[leader_comm_size];
- int* recvcnts = new int[leader_comm_size];
- recvcnts[0] = node_sizes[0] * recvcnt;
- displs[0] = 0;
-
- for (i = 1; i < leader_comm_size; i++) {
- displs[i] = displs[i - 1] + node_sizes[i - 1] * recvcnt;
- recvcnts[i] = node_sizes[i] * recvcnt;
- }
+ int* displs = new int[leader_comm_size];
+ int* recvcnts = new int[leader_comm_size];
+ recvcnts[0] = node_sizes[0] * recvcnt;
+ displs[0] = 0;
+ for (i = 1; i < leader_comm_size; i++) {
+ displs[i] = displs[i - 1] + node_sizes[i - 1] * recvcnt;
+ recvcnts[i] = node_sizes[i] * recvcnt;
+ }
void* sendbuf=((char*)recvbuf)+recvtype->get_extent()*displs[leader_comm->rank()];
int i, even_rank;
int err = 0;
ptrdiff_t slb, rlb, sext, rext;
- char *tmpsend = NULL, *tmprecv = NULL;
+ char *tmpsend = nullptr, *tmprecv = nullptr;
size = comm->size();
rank = comm->rank();
MPI_Comm comm)
{
- char * sbuf = NULL, * rbuf = NULL;
+ char *sbuf = nullptr, *rbuf = nullptr;
int soffset, roffset;
int torecv=0, tosend=0, min, rank, comm_size;
int sendnow, recvnow;
int sendto, recvfrom, blockcount, i;
unsigned int distance;
ptrdiff_t slb, rlb, sext, rext;
- char *tmpsend = NULL, *tmprecv = NULL;
+ char *tmpsend = nullptr, *tmprecv = nullptr;
MPI_Datatype new_rdtype = MPI_DATATYPE_NULL, new_sdtype = MPI_DATATYPE_NULL;
unsigned int size = comm->size();
int i, even_rank;
int err = 0;
ptrdiff_t slb, rlb, sext, rext;
- char *tmpsend = NULL, *tmprecv = NULL;
-
+ char *tmpsend = nullptr, *tmprecv = nullptr;
size = comm->size();
rank = comm->rank();
int local_rank = -1, local_size = 0;
//if not set (use of the algo directly, without mvapich2 selector)
- if(MV2_Allreduce_intra_function==NULL)
+ if (MV2_Allreduce_intra_function == nullptr)
MV2_Allreduce_intra_function = allreduce__mpich;
- if(MV2_Allreducection==NULL)
+ if (MV2_Allreducection == nullptr)
MV2_Allreducection = allreduce__rdb;
if(comm->get_leaders_comm()==MPI_COMM_NULL){
int block_count;
unsigned int inbi;
size_t typelng;
- char *tmpsend = NULL, *tmprecv = NULL;
+ char *tmpsend = nullptr, *tmprecv = nullptr;
unsigned char* inbuf[2] = {nullptr, nullptr};
ptrdiff_t true_extent, extent;
ptrdiff_t block_offset, max_real_segsize;
- MPI_Request reqs[2] = {NULL, NULL};
+ MPI_Request reqs[2] = {nullptr, nullptr};
const size_t segsize = 1 << 20; /* 1 MB */
int size = comm->size();
int rank = comm->rank();
/* Allocate and initialize temporary buffers */
inbuf[0] = smpi_get_tmp_sendbuffer(max_real_segsize);
- if (NULL == inbuf[0]) { ret = -1; line = __LINE__; goto error_hndl; }
+ if (nullptr == inbuf[0]) {
+ ret = -1;
+ line = __LINE__;
+ goto error_hndl;
+ }
if (size > 2) {
inbuf[1] = smpi_get_tmp_recvbuffer(max_real_segsize);
if (nullptr == inbuf[1]) {
send_chunk = send_type->get_extent();
recv_chunk = recv_type->get_extent();
- win=new Win(recv_buff, num_procs * recv_chunk * send_count, recv_chunk, 0,
- comm);
+ win = new Win(recv_buff, num_procs * recv_chunk * send_count, recv_chunk, nullptr, comm);
send_chunk *= send_count;
recv_chunk *= recv_count;
shmem_comm = comm->get_intra_comm();
local_rank = shmem_comm->rank();
/* do the intranode barrier on all nodes */
- if (shmem_comm != NULL) {
- mpi_errno = barrier__mpich(shmem_comm);
- if (mpi_errno) {
- mpi_errno_ret+=mpi_errno;
- }
+ if (shmem_comm != nullptr) {
+ mpi_errno = barrier__mpich(shmem_comm);
+ if (mpi_errno) {
+ mpi_errno_ret += mpi_errno;
+ }
}
leader_comm = comm->get_leaders_comm();
/* do the barrier across roots of all nodes */
- if (leader_comm != NULL && local_rank == 0) {
- mpi_errno = barrier__mpich(leader_comm);
- if (mpi_errno) {
- mpi_errno_ret+=mpi_errno;
- }
+ if (leader_comm != nullptr && local_rank == 0) {
+ mpi_errno = barrier__mpich(leader_comm);
+ if (mpi_errno) {
+ mpi_errno_ret += mpi_errno;
+ }
}
/* release the local processes on each node with a 1-byte
* broadcast (0-byte broadcast just returns without doing
* anything) */
- if (shmem_comm != NULL) {
- int i = 0;
- mpi_errno = bcast__mpich(&i, 1, MPI_BYTE, 0, shmem_comm);
- if (mpi_errno) {
- mpi_errno_ret+=mpi_errno;
- }
+ if (shmem_comm != nullptr) {
+ int i = 0;
+ mpi_errno = bcast__mpich(&i, 1, MPI_BYTE, 0, shmem_comm);
+ if (mpi_errno) {
+ mpi_errno_ret += mpi_errno;
+ }
}
if (mpi_errno_ret)
if (rank < surfeit) {
/* get the fanin letter from the upper "half" process: */
dst = N2_prev + rank;
- Request::recv(NULL, 0, MPI_BYTE, dst, COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, dst, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* combine on embedded N2_prev power-of-two processes */
for (d = 1; d < N2_prev; d <<= 1) {
dst = (rank ^ d);
- Request::sendrecv(NULL, 0, MPI_BYTE, dst, COLL_TAG_BARRIER, NULL,
- 0, MPI_BYTE, dst, COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, dst, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, dst, COLL_TAG_BARRIER,
+ comm, MPI_STATUS_IGNORE);
}
/* fanout data to nodes above N2_prev... */
if (rank < surfeit) {
dst = N2_prev + rank;
- Request::send(NULL, 0, MPI_BYTE, dst, COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, dst, COLL_TAG_BARRIER, comm);
}
} else {
/* fanin data to power of 2 subset */
src = rank - N2_prev;
- Request::sendrecv(NULL, 0, MPI_BYTE, src, COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, src, COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, src, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, src, COLL_TAG_BARRIER,
+ comm, MPI_STATUS_IGNORE);
}
return mpi_errno;
right = ((rank+1)%size);
if (rank > 0) { /* receive message from the left */
- Request::recv((void*)NULL, 0, MPI_BYTE, left,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, left, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* Send message to the right */
- Request::send((void*)NULL, 0, MPI_BYTE, right,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, right, COLL_TAG_BARRIER, comm);
/* root needs to receive from the last node */
if (rank == 0) {
- Request::recv((void*)NULL, 0, MPI_BYTE, left,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, left, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* Allow nodes to exit */
if (rank > 0) { /* post Receive from left */
- Request::recv((void*)NULL, 0, MPI_BYTE, left,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, left, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* send message to the right one */
- Request::send((void*)NULL, 0, MPI_BYTE, right,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, right, COLL_TAG_BARRIER, comm);
/* rank 0 post receive from the last node */
if (rank == 0) {
- Request::recv((void*)NULL, 0, MPI_BYTE, left,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, left, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
return MPI_SUCCESS;
if (rank >= adjsize) {
/* send message to lower ranked node */
remote = rank - adjsize;
- Request::sendrecv(NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, remote,
+ COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
} else if (rank < (size - adjsize)) {
/* receive message from high level rank */
- Request::recv((void*)NULL, 0, MPI_BYTE, rank+adjsize,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
-
+ Request::recv(nullptr, 0, MPI_BYTE, rank + adjsize, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
}
if (remote >= adjsize) continue;
/* post receive from the remote node */
- Request::sendrecv(NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, remote,
+ COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
}
if (rank < (size - adjsize)) {
/* send enter message to higher ranked node */
remote = rank + adjsize;
- Request::send((void*)NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- comm);
-
+ Request::send(nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER, comm);
}
}
to = (rank + distance) % size;
/* send message to lower ranked node */
- Request::sendrecv(NULL, 0, MPI_BYTE, to,
- COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, from,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, to, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, from, COLL_TAG_BARRIER,
+ comm, MPI_STATUS_IGNORE);
}
return MPI_SUCCESS;
"ompi_coll_tuned_barrier_ompi_two_procs rank %d", remote);
remote = (remote + 1) & 0x1;
- Request::sendrecv(NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- NULL, 0, MPI_BYTE, remote,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::sendrecv(nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER, nullptr, 0, MPI_BYTE, remote, COLL_TAG_BARRIER,
+ comm, MPI_STATUS_IGNORE);
return (MPI_SUCCESS);
}
/* All non-root send & receive zero-length message. */
if (rank > 0) {
- Request::send (NULL, 0, MPI_BYTE, 0,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, 0, COLL_TAG_BARRIER, comm);
- Request::recv (NULL, 0, MPI_BYTE, 0,
- COLL_TAG_BARRIER,
- comm, MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, 0, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
/* The root collects and broadcasts the messages. */
requests = new MPI_Request[size];
for (i = 1; i < size; ++i) {
- requests[i] = Request::irecv(NULL, 0, MPI_BYTE, i, COLL_TAG_BARRIER, comm);
+ requests[i] = Request::irecv(nullptr, 0, MPI_BYTE, i, COLL_TAG_BARRIER, comm);
}
Request::waitall( size-1, requests+1, MPI_STATUSES_IGNORE );
for (i = 1; i < size; ++i) {
- requests[i] = Request::isend(NULL, 0, MPI_BYTE, i,
- COLL_TAG_BARRIER,
- comm
- );
+ requests[i] = Request::isend(nullptr, 0, MPI_BYTE, i, COLL_TAG_BARRIER, comm);
}
Request::waitall( size-1, requests+1, MPI_STATUSES_IGNORE );
delete[] requests;
partner = rank ^ jump;
if (!(partner & (jump-1)) && partner < size) {
if (partner > rank) {
- Request::recv (NULL, 0, MPI_BYTE, partner,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, partner, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
} else if (partner < rank) {
- Request::send (NULL, 0, MPI_BYTE, partner,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, partner, COLL_TAG_BARRIER, comm);
}
}
}
partner = rank ^ jump;
if (!(partner & (jump-1)) && partner < size) {
if (partner > rank) {
- Request::send (NULL, 0, MPI_BYTE, partner,
- COLL_TAG_BARRIER,
- comm);
+ Request::send(nullptr, 0, MPI_BYTE, partner, COLL_TAG_BARRIER, comm);
} else if (partner < rank) {
- Request::recv (NULL, 0, MPI_BYTE, partner,
- COLL_TAG_BARRIER, comm,
- MPI_STATUS_IGNORE);
+ Request::recv(nullptr, 0, MPI_BYTE, partner, COLL_TAG_BARRIER, comm, MPI_STATUS_IGNORE);
}
}
}
rank = comm->rank();
//comm_size = comm->size();
-
- if (MV2_Bcast_function==NULL){
+ if (MV2_Bcast_function == nullptr) {
MV2_Bcast_function = bcast__mpich;
}
- if (MV2_Bcast_intra_node_function==NULL){
+ if (MV2_Bcast_intra_node_function == nullptr) {
MV2_Bcast_intra_node_function = bcast__mpich;
}
int mpi_errno = MPI_SUCCESS;
int src, dst, mask, relative_rank;
int k;
- if (MV2_Bcast_function==NULL){
+ if (MV2_Bcast_function == nullptr) {
MV2_Bcast_function = bcast__mpich;
}
- if (MV2_Bcast_intra_node_function==NULL){
+ if (MV2_Bcast_intra_node_function == nullptr) {
MV2_Bcast_intra_node_function = bcast__mpich;
}
if (count == 0)
return MPI_SUCCESS;
- if (MV2_Bcast_function==NULL){
+ if (MV2_Bcast_function == nullptr) {
MV2_Bcast_function = bcast__mpich;
}
- if (MV2_Bcast_intra_node_function==NULL){
+ if (MV2_Bcast_intra_node_function == nullptr) {
MV2_Bcast_intra_node_function = bcast__mpich;
}
char *tmpbuf;
ptrdiff_t extent;
MPI_Request recv_reqs[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
- MPI_Request *send_reqs = NULL;
+ MPI_Request* send_reqs = nullptr;
int req_index;
/**
if (fanout<1) {
XBT_DEBUG("coll:tuned:topo_build_tree invalid fanout %d", fanout);
- return NULL;
+ return nullptr;
}
if (fanout>MAXTREEFANOUT) {
XBT_DEBUG("coll:tuned:topo_build_tree invalid fanout %d bigger than max %d", fanout, MAXTREEFANOUT);
- return NULL;
+ return nullptr;
}
/*
tree = new ompi_coll_tree_t;
if (not tree) {
XBT_DEBUG("coll:tuned:topo_build_tree PANIC::out of memory");
- return NULL;
+ return nullptr;
}
/*
tree = new ompi_coll_tree_t;
if (not tree) {
XBT_DEBUG("coll:tuned:topo_build_tree PANIC::out of memory");
- return NULL;
+ return nullptr;
}
/*
parent = size - 1;
delta = 0;
- while ( 1 ) {
- /* Compute the size of the right subtree */
- int rightsize = size >> 1;
-
- /* Determine the left and right child of this parent */
- int lchild = -1;
- int rchild = -1;
- if (size - 1 > 0) {
- lchild = parent - 1;
- if (lchild > 0) {
- rchild = rightsize - 1;
- }
+ while (true) {
+ /* Compute the size of the right subtree */
+ int rightsize = size >> 1;
+
+ /* Determine the left and right child of this parent */
+ int lchild = -1;
+ int rchild = -1;
+ if (size - 1 > 0) {
+ lchild = parent - 1;
+ if (lchild > 0) {
+ rchild = rightsize - 1;
}
+ }
- /* The following cases are possible: myrank can be
- - a parent,
- - belong to the left subtree, or
- - belong to the right subtee
- Each of the cases need to be handled differently.
+ /* The following cases are possible: myrank can be
+ - a parent,
+ - belong to the left subtree, or
+ - belong to the right subtee
+ Each of the cases need to be handled differently.
+ */
+
+ if (myrank == parent) {
+ /* I am the parent:
+ - compute real ranks of my children, and exit the loop. */
+ if (lchild >= 0)
+ tree->tree_next[0] = lchild + delta;
+ if (rchild >= 0)
+ tree->tree_next[1] = rchild + delta;
+ break;
+ }
+ if (myrank > rchild) {
+ /* I belong to the left subtree:
+ - If I am the left child, compute real rank of my parent
+ - Iterate down through tree:
+ compute new size, shift ranks down, and update delta.
*/
-
- if (myrank == parent) {
- /* I am the parent:
- - compute real ranks of my children, and exit the loop. */
- if (lchild >= 0) tree->tree_next[0] = lchild + delta;
- if (rchild >= 0) tree->tree_next[1] = rchild + delta;
- break;
+ if (myrank == lchild) {
+ tree->tree_prev = parent + delta;
}
- if (myrank > rchild) {
- /* I belong to the left subtree:
- - If I am the left child, compute real rank of my parent
- - Iterate down through tree:
- compute new size, shift ranks down, and update delta.
- */
- if (myrank == lchild) {
- tree->tree_prev = parent + delta;
- }
- size = size - rightsize - 1;
- delta = delta + rightsize;
- myrank = myrank - rightsize;
- parent = size - 1;
-
- } else {
- /* I belong to the right subtree:
- - If I am the right child, compute real rank of my parent
- - Iterate down through tree:
- compute new size and parent,
- but the delta and rank do not need to change.
- */
- if (myrank == rchild) {
- tree->tree_prev = parent + delta;
- }
- size = rightsize;
- parent = rchild;
+ size = size - rightsize - 1;
+ delta = delta + rightsize;
+ myrank = myrank - rightsize;
+ parent = size - 1;
+
+ } else {
+ /* I belong to the right subtree:
+ - If I am the right child, compute real rank of my parent
+ - Iterate down through tree:
+ compute new size and parent,
+ but the delta and rank do not need to change.
+ */
+ if (myrank == rchild) {
+ tree->tree_prev = parent + delta;
}
+ size = rightsize;
+ parent = rchild;
+ }
}
if (tree->tree_next[0] >= 0) { tree->tree_nextsize = 1; }
ptr = *tree;
delete ptr;
- *tree = NULL; /* mark tree as gone */
+ *tree = nullptr; /* mark tree as gone */
return MPI_SUCCESS;
}
bmtree = new ompi_coll_tree_t;
if (not bmtree) {
XBT_DEBUG("coll:tuned:topo:build_bmtree PANIC out of memory");
- return NULL;
+ return nullptr;
}
bmtree->tree_bmtree = 1;
if (children==MAXTREEFANOUT) {
XBT_DEBUG("coll:tuned:topo:build_bmtree max fanout incorrect %d needed %d", MAXTREEFANOUT, children);
delete bmtree;
- return NULL;
+ return nullptr;
}
bmtree->tree_next[children] = remote;
mask <<= 1;
if (not bmtree) {
XBT_DEBUG("coll:tuned:topo:build_bmtree PANIC out of memory");
delete bmtree;
- return NULL;
+ return nullptr;
}
bmtree->tree_bmtree = 1;
if (children == MAXTREEFANOUT) {
XBT_DEBUG("coll:tuned:topo:build_bmtree max fanout incorrect %d needed %d", MAXTREEFANOUT, children);
delete bmtree;
- return NULL;
+ return nullptr;
}
}
mask <<= 1;
if (not chain) {
XBT_DEBUG("coll:tuned:topo:build_chain PANIC out of memory");
fflush(stdout);
- return NULL;
+ return nullptr;
}
for(i=0;i<fanout;i++) chain->tree_next[i] = -1;
int root,
MPI_Comm comm)
{
- unsigned char* leader_gather_buf = NULL;
+ unsigned char* leader_gather_buf = nullptr;
int comm_size, rank;
int local_rank, local_size;
int leader_comm_rank = -1, leader_comm_size = 0;
MPI_Aint sendtype_extent = 0, recvtype_extent = 0; /* Datatype extent */
MPI_Aint true_lb = 0, sendtype_true_extent = 0, recvtype_true_extent = 0;
MPI_Comm shmem_comm, leader_comm;
- unsigned char* tmp_buf = NULL;
+ unsigned char* tmp_buf = nullptr;
// if not set (use of the algo directly, without mvapich2 selector)
- if (MV2_Gather_intra_node_function == NULL)
+ if (MV2_Gather_intra_node_function == nullptr)
MV2_Gather_intra_node_function = gather__mpich;
if (comm->get_leaders_comm() == MPI_COMM_NULL) {
} else {
tmp_buf = smpi_get_tmp_sendbuffer(sendcnt * std::max(sendtype_extent, sendtype_true_extent) * local_size);
}
- if (tmp_buf == NULL) {
- mpi_errno = MPI_ERR_OTHER;
- return mpi_errno;
+ if (tmp_buf == nullptr) {
+ mpi_errno = MPI_ERR_OTHER;
+ return mpi_errno;
}
}
/*while testing mpich2 gather test, we see that
*/
if (not comm->is_uniform()) {
if (local_rank == 0) {
- int* displs = NULL;
- int* recvcnts = NULL;
+ int* displs = nullptr;
+ int* recvcnts = nullptr;
int* node_sizes;
int i = 0;
/* Node leaders have all the data. But, different nodes can have
leader_gather_buf =
smpi_get_tmp_sendbuffer(sendcnt * std::max(sendtype_extent, sendtype_true_extent) * comm_size);
}
- if (leader_gather_buf == NULL) {
+ if (leader_gather_buf == nullptr) {
mpi_errno = MPI_ERR_OTHER;
return mpi_errno;
}
/* The root of the Gather operation is not a node-level leader
*/
leader_gather_buf = smpi_get_tmp_sendbuffer(nbytes * comm_size);
- if (leader_gather_buf == NULL) {
- mpi_errno = MPI_ERR_OTHER;
- return mpi_errno;
+ if (leader_gather_buf == nullptr) {
+ mpi_errno = MPI_ERR_OTHER;
+ return mpi_errno;
}
}
if (root == leader_of_root) {
/* check if multiple threads are calling this collective function */
if (local_rank == 0 ) {
- if (tmp_buf != NULL) {
- smpi_free_tmp_buffer(tmp_buf);
- }
- if (leader_gather_buf != NULL) {
- smpi_free_tmp_buffer(leader_gather_buf);
- }
+ if (tmp_buf != nullptr) {
+ smpi_free_tmp_buffer(tmp_buf);
+ }
+ if (leader_gather_buf != nullptr) {
+ smpi_free_tmp_buffer(leader_gather_buf);
+ }
}
return (mpi_errno);
/* root is not on 0, allocate temp buffer for recv,
* rotate data at the end */
tempbuf = smpi_get_tmp_recvbuffer(rtrue_extent + (rcount * size - 1) * rextent);
- if (NULL == tempbuf) {
+ if (nullptr == tempbuf) {
err = MPI_ERR_OTHER;
line = __LINE__;
goto err_hndl;
* children, the most we need is half of the total data elements due
* to the property of binomial tree */
tempbuf = smpi_get_tmp_sendbuffer(strue_extent + (scount * size - 1) * sextent);
- if (NULL == tempbuf) {
+ if (nullptr == tempbuf) {
err = MPI_ERR_OTHER;
line = __LINE__;
goto err_hndl;
return MPI_SUCCESS;
err_hndl:
- if (NULL != tempbuf)
- smpi_free_tmp_buffer(tempbuf);
+ if (nullptr != tempbuf)
+ smpi_free_tmp_buffer(tempbuf);
- XBT_DEBUG("%s:%4d\tError occurred %d, rank %2d", __FILE__, line, err, rank);
- return err;
+ XBT_DEBUG("%s:%4d\tError occurred %d, rank %2d", __FILE__, line, err, rank);
+ return err;
}
/*
char* ptmp;
MPI_Request first_segment_req;
MPI_Request* reqs = new (std::nothrow) MPI_Request[size];
- if (NULL == reqs) {
+ if (nullptr == reqs) {
ret = -1;
line = __LINE__;
- goto error_hndl; }
+ goto error_hndl;
+ }
typelng=rdtype->size();
rdtype->extent(&lb, &extent);
int mask=0x1, k, comm_size, src, rank, relative_rank, lroot=0;
int orig_mask=0x1;
int recv_iter=0, send_iter=0;
- int *knomial_reduce_src_array=NULL;
+ int* knomial_reduce_src_array = nullptr;
comm_size = comm->size();
rank = comm->rank();
MPI_Aint true_lb, true_extent, extent;
MPI_Status status;
int recv_iter=0, dst=-1, expected_send_count, expected_recv_count;
- int *src_array=NULL;
+ int* src_array = nullptr;
if (count == 0) return MPI_SUCCESS;
delete[] requests;
}
- if(src_array != NULL) {
+ if (src_array != nullptr) {
delete[] src_array;
}
int intra_node_root=0;
//if not set (use of the algo directly, without mvapich2 selector)
- if(MV2_Reduce_function==NULL)
+ if (MV2_Reduce_function == nullptr)
MV2_Reduce_function = reduce__mpich;
- if(MV2_Reduce_intra_function==NULL)
+ if (MV2_Reduce_intra_function == nullptr)
MV2_Reduce_intra_function = reduce__mpich;
if(comm->get_leaders_comm()==MPI_COMM_NULL){
int creq = 0;
MPI_Request* sreq = new (std::nothrow) MPI_Request[max_outstanding_reqs];
- if (NULL == sreq) { line = __LINE__; ret = -1; goto error_hndl; }
+ if (nullptr == sreq) {
+ line = __LINE__;
+ ret = -1;
+ goto error_hndl;
+ }
/* post first group of requests */
for (segindex = 0; segindex < max_outstanding_reqs; segindex++) {
if ((root == rank) && (MPI_IN_PLACE == sendbuf)) {
tmp_sendbuf = smpi_get_tmp_sendbuffer(text + (count - 1) * ext);
- if (NULL == tmp_sendbuf) {
+ if (nullptr == tmp_sendbuf) {
return MPI_ERR_INTERN;
}
Datatype::copy(recvbuf, count, datatype, tmp_sendbuf, count, datatype);
use_this_sendbuf = tmp_sendbuf;
} else if (io_root == rank) {
tmp_recvbuf = smpi_get_tmp_recvbuffer(text + (count - 1) * ext);
- if (NULL == tmp_recvbuf) {
+ if (nullptr == tmp_recvbuf) {
return MPI_ERR_INTERN;
}
use_this_recvbuf = tmp_recvbuf;
int tag = COLL_TAG_REDUCE,temporary_buffer=0;
unsigned char *send_ptr, *recv_ptr, *tmp_buf;
- cnts = NULL;
- disps = NULL;
+ cnts = nullptr;
+ disps = nullptr;
MPI_Aint extent;
/* Allocate temporary receive buffer. */
unsigned char* recv_buf_free = smpi_get_tmp_recvbuffer(buf_size);
unsigned char* recv_buf = recv_buf_free - lb;
- if (NULL == recv_buf_free) {
- err = MPI_ERR_OTHER;
- goto cleanup;
+ if (nullptr == recv_buf_free) {
+ err = MPI_ERR_OTHER;
+ goto cleanup;
}
/* allocate temporary buffer for results */
cleanup:
delete[] disps;
- if (NULL != recv_buf_free) smpi_free_tmp_buffer(recv_buf_free);
- if (NULL != result_buf_free) smpi_free_tmp_buffer(result_buf_free);
+ if (nullptr != recv_buf_free)
+ smpi_free_tmp_buffer(recv_buf_free);
+ if (nullptr != result_buf_free)
+ smpi_free_tmp_buffer(result_buf_free);
return err;
}
{
int ret, line, rank, size, i, k, recv_from, send_to, total_count, max_block_count;
int inbi;
- unsigned char *tmpsend = NULL, *tmprecv = NULL, *accumbuf = NULL, *accumbuf_free = NULL;
- unsigned char *inbuf_free[2] = {NULL, NULL}, *inbuf[2] = {NULL, NULL};
+ unsigned char *tmpsend = nullptr, *tmprecv = nullptr, *accumbuf = nullptr, *accumbuf_free = nullptr;
+ unsigned char *inbuf_free[2] = {nullptr, nullptr}, *inbuf[2] = {nullptr, nullptr};
ptrdiff_t true_lb, true_extent, lb, extent, max_real_segsize;
- MPI_Request reqs[2] = {NULL, NULL};
+ MPI_Request reqs[2] = {nullptr, nullptr};
size = comm->size();
rank = comm->rank();
max_real_segsize = true_extent + (ptrdiff_t)(max_block_count - 1) * extent;
accumbuf_free = smpi_get_tmp_recvbuffer(true_extent + (ptrdiff_t)(total_count - 1) * extent);
- if (NULL == accumbuf_free) { ret = -1; line = __LINE__; goto error_hndl; }
+ if (nullptr == accumbuf_free) {
+ ret = -1;
+ line = __LINE__;
+ goto error_hndl;
+ }
accumbuf = accumbuf_free - lb;
inbuf_free[0] = smpi_get_tmp_sendbuffer(max_real_segsize);
- if (NULL == inbuf_free[0]) { ret = -1; line = __LINE__; goto error_hndl; }
+ if (nullptr == inbuf_free[0]) {
+ ret = -1;
+ line = __LINE__;
+ goto error_hndl;
+ }
inbuf[0] = inbuf_free[0] - lb;
if (size > 2) {
inbuf_free[1] = smpi_get_tmp_sendbuffer(max_real_segsize);
- if (NULL == inbuf_free[1]) {
+ if (nullptr == inbuf_free[1]) {
ret = -1;
line = __LINE__;
goto error_hndl;
if (ret < 0) { line = __LINE__; goto error_hndl; }
delete[] displs;
- if (NULL != accumbuf_free) smpi_free_tmp_buffer(accumbuf_free);
- if (NULL != inbuf_free[0]) smpi_free_tmp_buffer(inbuf_free[0]);
- if (NULL != inbuf_free[1]) smpi_free_tmp_buffer(inbuf_free[1]);
+ if (nullptr != accumbuf_free)
+ smpi_free_tmp_buffer(accumbuf_free);
+ if (nullptr != inbuf_free[0])
+ smpi_free_tmp_buffer(inbuf_free[0]);
+ if (nullptr != inbuf_free[1])
+ smpi_free_tmp_buffer(inbuf_free[1]);
return MPI_SUCCESS;
XBT_DEBUG( "%s:%4d\tRank %d Error occurred %d\n",
__FILE__, line, rank, ret);
delete[] displs;
- if (NULL != accumbuf_free) smpi_free_tmp_buffer(accumbuf_free);
- if (NULL != inbuf_free[0]) smpi_free_tmp_buffer(inbuf_free[0]);
- if (NULL != inbuf_free[1]) smpi_free_tmp_buffer(inbuf_free[1]);
+ if (nullptr != accumbuf_free)
+ smpi_free_tmp_buffer(accumbuf_free);
+ if (nullptr != inbuf_free[0])
+ smpi_free_tmp_buffer(inbuf_free[0]);
+ if (nullptr != inbuf_free[1])
+ smpi_free_tmp_buffer(inbuf_free[1]);
return ret;
}
}
int leader_root, leader_of_root = -1;
MPI_Comm shmem_comm, leader_comm;
//if not set (use of the algo directly, without mvapich2 selector)
- if(MV2_Scatter_intra_function==NULL)
+ if (MV2_Scatter_intra_function == nullptr)
MV2_Scatter_intra_function = scatter__mpich;
if(comm->get_leaders_comm()==MPI_COMM_NULL){
if (leader_comm_size > 1 && local_rank == 0) {
if (not comm->is_uniform()) {
- int* displs = NULL;
- int* sendcnts = NULL;
+ int* displs = nullptr;
+ int* sendcnts = nullptr;
int* node_sizes;
int i = 0;
node_sizes = comm->get_non_uniform_map();
//if not set (use of the algo directly, without mvapich2 selector)
- if(MV2_Scatter_intra_function==NULL)
+ if (MV2_Scatter_intra_function == nullptr)
MV2_Scatter_intra_function = scatter__mpich;
if(comm->get_leaders_comm()==MPI_COMM_NULL){
if (leader_comm_size > 1 && local_rank == 0) {
if (not comm->is_uniform()) {
- int* displs = NULL;
- int* sendcnts = NULL;
+ int* displs = nullptr;
+ int* sendcnts = nullptr;
int* node_sizes;
int i = 0;
node_sizes = comm->get_non_uniform_map();
MPI_Comm comm)
{
- if(mv2_alltoall_table_ppn_conf==NULL)
+ if (mv2_alltoall_table_ppn_conf == nullptr)
init_mv2_alltoall_tables_stampede();
int sendtype_size, recvtype_size, comm_size;
recvtype_size=recvtype->size();
nbytes = recvtype_size * recvcount;
- if(mv2_allgather_table_ppn_conf==NULL)
+ if (mv2_allgather_table_ppn_conf == nullptr)
init_mv2_allgather_tables_stampede();
if(comm->get_leaders_comm()==MPI_COMM_NULL){
MPI_Datatype recvtype,
int root, MPI_Comm comm)
{
- if(mv2_gather_thresholds_table==NULL)
+ if (mv2_gather_thresholds_table == nullptr)
init_mv2_gather_tables_stampede();
int mpi_errno = MPI_SUCCESS;
int range_threshold = 0;
long nbytes = 0;
- if(mv2_allgatherv_thresholds_table==NULL)
+ if (mv2_allgatherv_thresholds_table == nullptr)
init_mv2_allgatherv_tables_stampede();
comm_size = comm->size();
return MPI_SUCCESS;
}
- if (mv2_allreduce_thresholds_table == NULL)
+ if (mv2_allreduce_thresholds_table == nullptr)
init_mv2_allreduce_tables_stampede();
/* check if multiple threads are calling this collective function */
zcpy_knomial_factor = mv2_pipelined_zcpy_knomial_factor;
}
- if(MV2_Bcast_intra_node_function == NULL) {
- /* if tuning table do not have any intra selection, set func pointer to
- ** default one for mcast intra node */
- MV2_Bcast_intra_node_function = &MPIR_Shmem_Bcast_MV2;
+ if (MV2_Bcast_intra_node_function == nullptr) {
+ /* if tuning table do not have any intra selection, set func pointer to
+ ** default one for mcast intra node */
+ MV2_Bcast_intra_node_function = &MPIR_Shmem_Bcast_MV2;
}
/* Set value of pipeline segment size */
MPI_Datatype datatype,
MPI_Op op, int root, MPI_Comm comm)
{
- if(mv2_reduce_thresholds_table == NULL)
+ if (mv2_reduce_thresholds_table == nullptr)
init_mv2_reduce_tables_stampede();
int mpi_errno = MPI_SUCCESS;
0, nbytes = 0;
int* disps = new int[comm_size];
- if(mv2_red_scat_thresholds_table==NULL)
+ if (mv2_red_scat_thresholds_table == nullptr)
init_mv2_reduce_scatter_tables_stampede();
bool is_commutative = (op == MPI_OP_NULL || op->is_commutative());
int conf_index = 0;
MPI_Comm shmem_comm;
// MPID_Comm *shmem_commptr=NULL;
- if(mv2_scatter_thresholds_table==NULL)
- init_mv2_scatter_tables_stampede();
+ if (mv2_scatter_thresholds_table == nullptr)
+ init_mv2_scatter_tables_stampede();
- if(comm->get_leaders_comm()==MPI_COMM_NULL){
- comm->init_smp();
- }
+ if (comm->get_leaders_comm() == MPI_COMM_NULL) {
+ comm->init_smp();
+ }
comm_size = comm->size();
} else
#endif /*#if defined(_MCST_SUPPORT_) */
{
- if(mv2_scatter_thresholds_table[conf_index][range].inter_leader[range_threshold + 1].
- MV2_pt_Scatter_function != NULL) {
- MV2_Scatter_function = mv2_scatter_thresholds_table[conf_index][range].inter_leader[range_threshold + 1]
- .MV2_pt_Scatter_function;
- } else {
- /* Fallback! */
- MV2_Scatter_function = &MPIR_Scatter_MV2_Binomial;
- }
+ if (mv2_scatter_thresholds_table[conf_index][range].inter_leader[range_threshold + 1].MV2_pt_Scatter_function !=
+ nullptr) {
+ MV2_Scatter_function =
+ mv2_scatter_thresholds_table[conf_index][range].inter_leader[range_threshold + 1].MV2_pt_Scatter_function;
+ } else {
+ /* Fallback! */
+ MV2_Scatter_function = &MPIR_Scatter_MV2_Binomial;
+ }
}
}
};
int (*MV2_Alltoall_function)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
- MPI_Datatype recvtype, MPI_Comm comm_ptr) = NULL;
+ MPI_Datatype recvtype, MPI_Comm comm_ptr) = nullptr;
/* Indicates number of processes per node */
-int* mv2_alltoall_table_ppn_conf = NULL;
+int* mv2_alltoall_table_ppn_conf = nullptr;
/* Indicates total number of configurations */
int mv2_alltoall_num_ppn_conf = 1;
-int* mv2_size_alltoall_tuning_table = NULL;
-mv2_alltoall_tuning_table** mv2_alltoall_thresholds_table = NULL;
+int* mv2_size_alltoall_tuning_table = nullptr;
+mv2_alltoall_tuning_table** mv2_alltoall_thresholds_table = nullptr;
#define MPIR_Alltoall_bruck_MV2 simgrid::smpi::alltoall__bruck
#define MPIR_Alltoall_RD_MV2 simgrid::smpi::alltoall__rdb
static void init_mv2_alltoall_tables_stampede()
{
int agg_table_sum = 0;
- mv2_alltoall_tuning_table** table_ptrs = NULL;
+ mv2_alltoall_tuning_table** table_ptrs = nullptr;
mv2_alltoall_num_ppn_conf = 3;
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_alltoall_thresholds_table = new mv2_alltoall_tuning_table*[mv2_alltoall_num_ppn_conf];
table_ptrs = new mv2_alltoall_tuning_table*[mv2_alltoall_num_ppn_conf];
int (*MV2_Allgatherction)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm);
-int* mv2_allgather_table_ppn_conf = NULL;
+int* mv2_allgather_table_ppn_conf = nullptr;
int mv2_allgather_num_ppn_conf = 1;
-int* mv2_size_allgather_tuning_table = NULL;
-mv2_allgather_tuning_table** mv2_allgather_thresholds_table = NULL;
+int* mv2_size_allgather_tuning_table = nullptr;
+mv2_allgather_tuning_table** mv2_allgather_thresholds_table = nullptr;
static int MPIR_Allgather_RD_Allgather_Comm_MV2(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf,
int recvcount, MPI_Datatype recvtype, MPI_Comm comm_ptr)
{
int agg_table_sum = 0;
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_allgather_num_ppn_conf = 3;
mv2_allgather_thresholds_table = new mv2_allgather_tuning_table*[mv2_allgather_num_ppn_conf];
};
int mv2_size_gather_tuning_table = 7;
-mv2_gather_tuning_table* mv2_gather_thresholds_table = NULL;
+mv2_gather_tuning_table* mv2_gather_thresholds_table = nullptr;
typedef int (*MV2_Gather_function_ptr)(const void* sendbuf, int sendcnt, MPI_Datatype sendtype, void* recvbuf, int recvcnt,
MPI_Datatype recvtype, int root, MPI_Comm comm);
-MV2_Gather_function_ptr MV2_Gather_inter_leader_function = NULL;
-MV2_Gather_function_ptr MV2_Gather_intra_node_function = NULL;
+MV2_Gather_function_ptr MV2_Gather_inter_leader_function = nullptr;
+MV2_Gather_function_ptr MV2_Gather_intra_node_function = nullptr;
#define MPIR_Gather_MV2_Direct simgrid::smpi::gather__ompi_basic_linear
#define MPIR_Gather_MV2_two_level_Direct simgrid::smpi::gather__mvapich2_two_level
static void init_mv2_gather_tables_stampede()
{
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_gather_tuning_table = 7;
mv2_gather_thresholds_table = new mv2_gather_tuning_table[mv2_size_gather_tuning_table];
const int* displs, MPI_Datatype recvtype, MPI_Comm comm);
int mv2_size_allgatherv_tuning_table = 0;
-mv2_allgatherv_tuning_table* mv2_allgatherv_thresholds_table = NULL;
+mv2_allgatherv_tuning_table* mv2_allgatherv_thresholds_table = nullptr;
#define MPIR_Allgatherv_Rec_Doubling_MV2 simgrid::smpi::allgatherv__mpich_rdb
#define MPIR_Allgatherv_Bruck_MV2 simgrid::smpi::allgatherv__ompi_bruck
static void init_mv2_allgatherv_tables_stampede()
{
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_allgatherv_tuning_table = 6;
mv2_allgatherv_thresholds_table = new mv2_allgatherv_tuning_table[mv2_size_allgatherv_tuning_table];
};
int (*MV2_Allreducection)(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op,
- MPI_Comm comm) = NULL;
+ MPI_Comm comm) = nullptr;
int (*MV2_Allreduce_intra_function)(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op,
- MPI_Comm comm) = NULL;
+ MPI_Comm comm) = nullptr;
int mv2_size_allreduce_tuning_table = 0;
-mv2_allreduce_tuning_table* mv2_allreduce_thresholds_table = NULL;
+mv2_allreduce_tuning_table* mv2_allreduce_thresholds_table = nullptr;
static int MPIR_Allreduce_mcst_reduce_two_level_helper_MV2(const void* sendbuf, void* recvbuf, int count,
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
static void init_mv2_allreduce_tables_stampede()
{
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_allreduce_tuning_table = 8;
mv2_allreduce_thresholds_table = new mv2_allreduce_tuning_table[mv2_size_allreduce_tuning_table];
};
int mv2_size_bcast_tuning_table = 0;
-mv2_bcast_tuning_table* mv2_bcast_thresholds_table = NULL;
+mv2_bcast_tuning_table* mv2_bcast_thresholds_table = nullptr;
-int (*MV2_Bcast_function)(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm_ptr) = NULL;
+int (*MV2_Bcast_function)(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm_ptr) = nullptr;
int (*MV2_Bcast_intra_node_function)(void* buffer, int count, MPI_Datatype datatype, int root,
- MPI_Comm comm_ptr) = NULL;
+ MPI_Comm comm_ptr) = nullptr;
int zcpy_knomial_factor = 2;
int mv2_pipelined_zcpy_knomial_factor = -1;
static void init_mv2_bcast_tables_stampede()
{
// Stampede,
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_bcast_tuning_table = 8;
mv2_bcast_thresholds_table = new mv2_bcast_tuning_table[mv2_size_bcast_tuning_table];
{{0, 8192, &MPIR_Shmem_Bcast_MV2, 8},
{8192, 16384, &MPIR_Shmem_Bcast_MV2, 4},
{16384, 524288, &MPIR_Shmem_Bcast_MV2, 2},
- {524288, -1, NULL, -1}}},
+ {524288, -1, nullptr, -1}}},
{256,
8192,
4,
};
int mv2_size_reduce_tuning_table = 0;
-mv2_reduce_tuning_table* mv2_reduce_thresholds_table = NULL;
+mv2_reduce_tuning_table* mv2_reduce_thresholds_table = nullptr;
int mv2_reduce_intra_knomial_factor = -1;
int mv2_reduce_inter_knomial_factor = -1;
int (*MV2_Reduce_function)(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
- MPI_Comm comm_ptr) = NULL;
+ MPI_Comm comm_ptr) = nullptr;
-int (*MV2_Reduce_intra_function)(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root,
- MPI_Comm comm_ptr) = NULL;
+int (*MV2_Reduce_intra_function)(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op,
+ int root, MPI_Comm comm_ptr) = nullptr;
#define MPIR_Reduce_inter_knomial_wrapper_MV2 simgrid::smpi::reduce__mvapich2_knomial
#define MPIR_Reduce_intra_knomial_wrapper_MV2 simgrid::smpi::reduce__mvapich2_knomial
static void init_mv2_reduce_tables_stampede()
{
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
/*Stampede*/
mv2_size_reduce_tuning_table = 8;
};
int mv2_size_red_scat_tuning_table = 0;
-mv2_red_scat_tuning_table* mv2_red_scat_thresholds_table = NULL;
+mv2_red_scat_tuning_table* mv2_red_scat_thresholds_table = nullptr;
int (*MV2_Red_scat_function)(const void* sendbuf, void* recvbuf, const int* recvcnts, MPI_Datatype datatype, MPI_Op op,
MPI_Comm comm_ptr);
static void init_mv2_reduce_scatter_tables_stampede()
{
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
mv2_size_red_scat_tuning_table = 6;
mv2_red_scat_thresholds_table = new mv2_red_scat_tuning_table[mv2_size_red_scat_tuning_table];
mv2_scatter_tuning_element intra_node[MV2_MAX_NB_THRESHOLDS];
};
-int* mv2_scatter_table_ppn_conf = NULL;
+int* mv2_scatter_table_ppn_conf = nullptr;
int mv2_scatter_num_ppn_conf = 1;
-int* mv2_size_scatter_tuning_table = NULL;
-mv2_scatter_tuning_table** mv2_scatter_thresholds_table = NULL;
+int* mv2_size_scatter_tuning_table = nullptr;
+mv2_scatter_tuning_table** mv2_scatter_thresholds_table = nullptr;
int (*MV2_Scatter_function)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
- MPI_Datatype recvtype, int root, MPI_Comm comm) = NULL;
+ MPI_Datatype recvtype, int root, MPI_Comm comm) = nullptr;
-int (*MV2_Scatter_intra_function)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
- MPI_Datatype recvtype, int root, MPI_Comm comm) = NULL;
+int (*MV2_Scatter_intra_function)(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf,
+ int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) = nullptr;
int MPIR_Scatter_mcst_wrap_MV2(const void* sendbuf, int sendcnt, MPI_Datatype sendtype, void* recvbuf, int recvcnt,
MPI_Datatype recvtype, int root, MPI_Comm comm_ptr);
static void init_mv2_scatter_tables_stampede()
{
- if (simgrid::smpi::colls::smpi_coll_cleanup_callback == NULL)
+ if (simgrid::smpi::colls::smpi_coll_cleanup_callback == nullptr)
simgrid::smpi::colls::smpi_coll_cleanup_callback = &smpi_coll_cleanup_mvapich2;
int agg_table_sum = 0;
/* See MPI-1, 5.7.1. Freeing the keyval does not remove it if it
* is in use in an attribute */
smpi_key_elem elem = T::keyvals_.at(*keyval);
- if(elem==0){
+ if (elem == nullptr) {
return MPI_ERR_ARG;
}
if(elem->refcount==1){
return smpi_colors.find(state)->second.c_str();
}
for (const auto& pair : smpi_colors) { // Is an entry of our map a substring of this state name?
- if (std::strstr(state.c_str(), pair.first.c_str()) != 0)
+ if (std::strstr(state.c_str(), pair.first.c_str()) != nullptr)
return pair.second.c_str();
}
XBT_DEBUG("Copy %" PRIdMAX " bytes into %s", static_cast<intmax_t>(fdin_size), target.c_str());
#if SG_HAVE_SENDFILE
- ssize_t sent_size = sendfile(fdout, fdin, NULL, fdin_size);
+ ssize_t sent_size = sendfile(fdout, fdin, nullptr, fdin_size);
if (sent_size == fdin_size) {
close(fdin);
close(fdout);
/* First reserve memory area */
- void* allocated_ptr = mmap(NULL, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ void* allocated_ptr = mmap(nullptr, allocated_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
xbt_assert(allocated_ptr != MAP_FAILED, "Failed to allocate %zuMiB of memory. Run \"sysctl vm.overcommit_memory=1\" as root "
"to allow big allocations.\n",
int Request::grequest_complete(MPI_Request request)
{
- if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex==NULL)
+ if ((!(request->flags_ & MPI_REQ_GENERALIZED)) || request->generalized_funcs->mutex == nullptr)
return MPI_ERR_REQUEST;
request->generalized_funcs->mutex->lock();
request->flags_ |= MPI_REQ_COMPLETE; // in case wait would be called after complete
int Win::attach(void* /*base*/, MPI_Aint size)
{
- if (not(base_ == MPI_BOTTOM || base_ == 0))
+ if (not(base_ == MPI_BOTTOM || base_ == nullptr))
return MPI_ERR_ARG;
- base_=0;//actually the address will be given in the RMA calls, as being the disp.
+ base_ = nullptr; // actually the address will be given in the RMA calls, as being the disp.
size_+=size;
return MPI_SUCCESS;
}
ns3::NetDeviceContainer netA;
WifiZone* zone = WifiZone::by_name(name);
- xbt_assert(zone != 0, "Link name '%s' does not match the 'wifi_link' property of a host.", name.c_str());
+ xbt_assert(zone != nullptr, "Link name '%s' does not match the 'wifi_link' property of a host.", name.c_str());
NetPointNs3* netpoint_ns3 = zone->get_host()->get_netpoint()->extension<NetPointNs3>();
wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager", "ControlMode", ns3::StringValue("HtMcs0"), "DataMode",
flow->remaining_);
return;
}
- int amountSent = sock->Send(0, toWrite, 0);
+ int amountSent = sock->Send(nullptr, toWrite, 0);
xbt_assert(amountSent > 0, "Since TxAvailable>0, amountSent should also >0");
flow->buffered_bytes_ += amountSent;
XBT_DEBUG("<router id=\"%s\"/>", cluster->router_id.c_str());
if (cluster->router_id.empty())
cluster->router_id = std::string(cluster->prefix) + cluster->id + "_router" + cluster->suffix;
- current_as->router_ = sg_platf_new_router(cluster->router_id, NULL);
+ current_as->router_ = sg_platf_new_router(cluster->router_id, nullptr);
//Make the backbone
if ((cluster->bb_bw > 0) || (cluster->bb_lat > 0)) {
/* to be returned. */
std::string path = std::string("/proc/") + std::to_string(pid) + "/maps";
std::ifstream fp;
- fp.rdbuf()->pubsetbuf(0, 0);
+ fp.rdbuf()->pubsetbuf(nullptr, 0);
fp.open(path);
if (not fp) {
std::perror("open failed");
exec->wait();
XBT_INFO("%s:%s Exec 1 complete %g", host_name, pr_name, simgrid::s4u::Engine::get_clock() - clock_sta);
- exec = NULL;
+ exec = nullptr;
simgrid::s4u::this_actor::sleep_for(1);
auto* Sys = new simgrid::kernel::lmm::System(false);
for (int i = 0; i < nb_cnst; i++) {
- cnst[i] = Sys->constraint_new(NULL, simgrid::xbt::random::uniform_real(0.0, 10.0));
+ cnst[i] = Sys->constraint_new(nullptr, simgrid::xbt::random::uniform_real(0.0, 10.0));
int l;
if (rate_no_limit > simgrid::xbt::random::uniform_real(0.0, 1.0)) {
// Look at what happens when there is no concurrency limit
}
for (int i = 0; i < nb_var; i++) {
- var[i] = Sys->variable_new(NULL, 1.0, -1.0, nb_elem);
+ var[i] = Sys->variable_new(nullptr, 1.0, -1.0, nb_elem);
//Have a few variables with a concurrency share of two (e.g. cross-traffic in some cases)
short concurrency_share = 1 + static_cast<short>(simgrid::xbt::random::uniform_int(0, max_share - 1));
var[i]->set_concurrency_share(concurrency_share);
unsigned long mask = ~((unsigned long)xbt_pagesize - 1);
auto* addr = reinterpret_cast<void*>(((unsigned long)sbrk(0) + BUFFSIZE) & mask);
heapA = xbt_mheap_new(-1, addr);
- if (heapA == NULL) {
+ if (heapA == nullptr) {
perror("attach 1 failed");
fprintf(stderr, "bye\n");
exit(1);
}
timeout = atof(argv[2]);
if (argc == 4)
- modes = static_cast<unsigned>(strtoul(argv[2], NULL, 0));
+ modes = static_cast<unsigned>(strtoul(argv[2], nullptr, 0));
XBT_INFO("Parmap benchmark with %d workers (modes = %#x)...", nthreads, modes);
XBT_INFO("%s", "");