join(known_id_);
}
- if (!joined)
+ if (not joined)
return;
ChordMessage* message = nullptr;
void* data = nullptr;
while ((now < (start_time_ + deadline_)) && now < MAX_SIMULATION_TIME) {
data = nullptr;
simgrid::s4u::Comm& comm_receive = simgrid::s4u::this_actor::irecv(mailbox_, &data);
- while ((now < (start_time_ + deadline_)) && now < MAX_SIMULATION_TIME && !comm_receive.test()) {
+ while ((now < (start_time_ + deadline_)) && now < MAX_SIMULATION_TIME && not comm_receive.test()) {
// no task was received: make some periodic calls
if (now >= next_stabilize_date) {
stabilize();
void set_continuation(simgrid::xbt::Task<void()>&& continuation)
{
- xbt_assert(!continuation_);
+ xbt_assert(not continuation_);
switch (status_) {
case FutureStatus::done:
// This is not supposed to happen if continuation is set
* the future is ready
* @exception std::future_error no state is associated with the future
*/
- template<class F>
- auto then(F continuation)
- -> typename std::enable_if<
- !is_future<decltype(continuation(std::move(*this)))>::value,
- Future<decltype(continuation(std::move(*this)))>
- >::type
+ template <class F>
+ auto then(F continuation) -> typename std::enable_if<not is_future<decltype(continuation(std::move(*this)))>::value,
+ Future<decltype(continuation(std::move(*this)))>>::type
{
return this->thenNoUnwrap(std::move(continuation));
}
void wait(std::unique_lock<Mutex> & lock);
template <class P> void wait(std::unique_lock<Mutex> & lock, P pred)
{
- while (!pred())
+ while (not pred())
wait(lock);
}
std::cv_status wait_for(std::unique_lock<Mutex> & lock, double duration);
template <class P> bool wait_until(std::unique_lock<Mutex> & lock, double timeout_time, P pred)
{
- while (!pred())
+ while (not pred())
if (this->wait_until(lock, timeout_time) == std::cv_status::timeout)
return pred();
return true;
/** Returns if that host is currently up and running */
bool isOn();
/** Returns if that host is currently down and offline */
- bool isOff() { return !isOn(); }
+ bool isOff() { return not isOn(); }
double speed();
int coreCount();
bool valid() const { return future_.valid(); }
T get()
{
- if (!valid())
+ if (not valid())
throw std::future_error(std::future_errc::no_state);
smx_actor_t self = SIMIX_process_self();
simgrid::xbt::Result<T> result;
}
bool is_ready() const
{
- if (!valid())
+ if (not valid())
throw std::future_error(std::future_errc::no_state);
return future_.is_ready();
}
bindFlag(T& value, const char* name, const char* description,
F callback)
{
- declareFlag(name, description, value, [&value,callback](const T& val) {
- if (!callback(val))
+ declareFlag(name, description, value, [&value, callback](const T& val) {
+ if (not callback(val))
throw std::range_error("invalid value");
value = std::move(val);
});
class WithContext : public E, public WithContextException
{
public:
-
- static_assert(!std::is_base_of<WithContextException,E>::value,
- "Trying to appli WithContext twice");
+ static_assert(not std::is_base_of<WithContextException, E>::value, "Trying to appli WithContext twice");
WithContext(E exception) :
E(std::move(exception)) {}
vtable_ = &vtable;
}
- template<class F>
- typename std::enable_if<!canSBO<F>()>::type
- init(F code)
+ template <class F> typename std::enable_if<not canSBO<F>()>::type init(F code)
{
const static TaskVtable vtable {
// Call:
if (not TRACE_is_enabled() || not TRACE_needs_platform())
return;
- if (!(TRACE_categorized() && category != nullptr))
+ if (not(TRACE_categorized() && category != nullptr))
return;
//check if category is already created
continue;
current_name = pathname;
- if (!(reg.prot & PROT_READ) && (reg.prot & PROT_EXEC))
+ if (not(reg.prot & PROT_READ) && (reg.prot & PROT_EXEC))
continue;
const bool is_executable = not i;
// Heap access:
xbt_mheap_t get_heap()
{
- if (!(this->cache_flags_ & Process::cache_heap))
+ if (not(this->cache_flags_ & Process::cache_heap))
this->refresh_heap();
return this->heap.get();
}
const malloc_info* get_malloc_info()
{
- if (!(this->cache_flags_ & Process::cache_malloc))
+ if (not(this->cache_flags_ & Process::cache_malloc))
this->refresh_malloc_info();
return this->heap_info.data();
}
simgrid::mc::ActorInformation* resolveActorInfo(simgrid::mc::RemotePtr<simgrid::simix::ActorImpl> actor)
{
xbt_assert(mc_model_checker != nullptr);
- if (!actor)
+ if (not actor)
return nullptr;
this->refresh_simix();
for (auto& actor_info : this->smx_actors_infos)
void _mc_cfg_cb_timeout(const char *name)
{
- if (_sg_cfg_init_status && !(_sg_do_model_check || MC_record_path))
+ if (_sg_cfg_init_status && not(_sg_do_model_check || MC_record_path))
xbt_die("You are specifying a value to enable/disable timeout for wait requests after the initialization (through MSG_config?), but model-checking was not activated at config time (through bu the program was not runned under the model-checker (with simgrid-mc)). This won't work, sorry.");
_sg_mc_timeout = xbt_cfg_get_boolean(name);
size_t n = snapshot->snapshot_regions.size();
for (size_t i = 0; i != n; ++i) {
mc_mem_region_t region = snapshot->snapshot_regions[i].get();
- if (!(region && region->contain(simgrid::mc::remote(addr))))
+ if (not(region && region->contain(simgrid::mc::remote(addr))))
continue;
if (region->storage_type() == simgrid::mc::StorageType::Privatized) {
T* local() const { return (T*)address_; }
operator bool() const { return address_; }
- bool operator!() const { return !address_; }
+ bool operator!() const { return not address_; }
operator RemotePtr<void>() const { return RemotePtr<void>(address_); }
RemotePtr<T> operator+(std::uint64_t n) const { return RemotePtr<T>(address_ + n * sizeof(T)); }
RemotePtr<T> operator-(std::uint64_t n) const { return RemotePtr<T>(address_ - n * sizeof(T)); }
}
/* Determine neighbors, order in which blocks will arrive, etc. */
- even_rank = !(rank % 2);
+ even_rank = not(rank % 2);
if (even_rank) {
neighbor[0] = (rank + 1) % size;
neighbor[1] = (rank - 1 + size) % size;
{
ompi_coll_tree_t *ptr;
- if ((not tree) || (!*tree)) {
+ if ((tree == nullptr) || (*tree == nullptr)) {
return MPI_SUCCESS;
}
}
}
total_send = scount;
- } else if (!(vrank % 2)) {
+ } else if (not(vrank % 2)) {
/* non-root, non-leaf nodes, allocate temp buffer for recv
* the most we need is rcount*size/2 */
tempbuf = (char*)smpi_get_tmp_recvbuffer(rtrue_extent + (rcount * size - 1) * rextent);
ptmp = (char*)rbuf;
}
- if (!(vrank % 2)) {
+ if (not(vrank % 2)) {
if (rank != root) {
/* recv from parent on non-root */
Request::recv(ptmp, rcount * size, rdtype, bmtree->tree_prev, COLL_TAG_SCATTER, comm, &status);
if (MV2_Allgatherv_function == &MPIR_Allgatherv_Rec_Doubling_MV2)
{
- if(!(comm_size & (comm_size - 1)))
- {
- mpi_errno =
- MPIR_Allgatherv_Rec_Doubling_MV2(sendbuf, sendcount,
- sendtype, recvbuf,
- recvcounts, displs,
- recvtype, comm);
+ if (not(comm_size & (comm_size - 1))) {
+ mpi_errno =
+ MPIR_Allgatherv_Rec_Doubling_MV2(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
} else {
mpi_errno =
MPIR_Allgatherv_Bruck_MV2(sendbuf, sendcount,
*ret = MPI_SUCCESS;
if(datatype->name_)
name_ = xbt_strdup(datatype->name_);
-
- if(!(datatype->attributes()->empty())){
+
+ if (not datatype->attributes()->empty()) {
int flag;
void* value_out;
for(auto it = datatype->attributes()->begin(); it != datatype->attributes()->end(); it++){
if (datatype->refcount_ > 0)
datatype->refcount_--;
- if (datatype->refcount_ == 0 && !(datatype->flags_ & DT_FLAG_PREDEFINED))
+ if (datatype->refcount_ == 0 && not(datatype->flags_ & DT_FLAG_PREDEFINED))
delete datatype;
#if SIMGRID_HAVE_MC
recvcount *= recvtype->size();
count = sendcount < recvcount ? sendcount : recvcount;
- if(!(sendtype->flags() & DT_FLAG_DERIVED) && !(recvtype->flags() & DT_FLAG_DERIVED)) {
+ if (not(sendtype->flags() & DT_FLAG_DERIVED) && not(recvtype->flags() & DT_FLAG_DERIVED)) {
if (not smpi_process()->replaying())
memcpy(recvbuf, sendbuf, count);
- }
- else if (!(sendtype->flags() & DT_FLAG_DERIVED))
- {
+ } else if (not(sendtype->flags() & DT_FLAG_DERIVED)) {
recvtype->unserialize( sendbuf, recvbuf, recvcount/recvtype->size(), MPI_REPLACE);
- }
- else if (!(recvtype->flags() & DT_FLAG_DERIVED))
- {
+ } else if (not(recvtype->flags() & DT_FLAG_DERIVED)) {
sendtype->serialize(sendbuf, recvbuf, sendcount/sendtype->size());
}else{
void Type_Vector::serialize( void* noncontiguous_buf, void *contiguous_buf,
int count){
- int i;
char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf);
- for (i = 0; i < block_count_ * count; i++) {
- if (!(old_type_->flags() & DT_FLAG_DERIVED))
- memcpy(contiguous_buf_char, noncontiguous_buf_char, block_length_ * old_type_->size());
- else
- old_type_->serialize(noncontiguous_buf_char, contiguous_buf_char, block_length_);
+ for (int i = 0; i < block_count_ * count; i++) {
+ if (not(old_type_->flags() & DT_FLAG_DERIVED))
+ memcpy(contiguous_buf_char, noncontiguous_buf_char, block_length_ * old_type_->size());
+ else
+ old_type_->serialize(noncontiguous_buf_char, contiguous_buf_char, block_length_);
contiguous_buf_char += block_length_*old_type_->size();
if((i+1)%block_count_ ==0)
void Type_Vector::unserialize( void* contiguous_buf, void *noncontiguous_buf,
int count, MPI_Op op){
- int i;
char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf);
- for (i = 0; i < block_count_ * count; i++) {
- if (!(old_type_->flags() & DT_FLAG_DERIVED)){
+ for (int i = 0; i < block_count_ * count; i++) {
+ if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
if(op != MPI_OP_NULL)
op->apply(contiguous_buf_char, noncontiguous_buf_char, &block_length_,
old_type_);
void Type_Hvector::serialize( void* noncontiguous_buf, void *contiguous_buf,
int count){
- int i;
char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf);
- for (i = 0; i < block_count_ * count; i++) {
- if (!(old_type_->flags() & DT_FLAG_DERIVED))
+ for (int i = 0; i < block_count_ * count; i++) {
+ if (not(old_type_->flags() & DT_FLAG_DERIVED))
memcpy(contiguous_buf_char, noncontiguous_buf_char, block_length_ * old_type_->size());
else
old_type_->serialize( noncontiguous_buf_char, contiguous_buf_char, block_length_);
void Type_Hvector::unserialize( void* contiguous_buf, void *noncontiguous_buf,
int count, MPI_Op op){
- int i;
char* contiguous_buf_char = static_cast<char*>(contiguous_buf);
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf);
- for (i = 0; i < block_count_ * count; i++) {
- if (!(old_type_->flags() & DT_FLAG_DERIVED)){
+ for (int i = 0; i < block_count_ * count; i++) {
+ if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
if(op!=MPI_OP_NULL)
op->apply( contiguous_buf_char, noncontiguous_buf_char, &block_length_, old_type_);
}else
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+block_indices_[0] * old_type_->size();
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
- if (!(old_type_->flags() & DT_FLAG_DERIVED))
+ if (not(old_type_->flags() & DT_FLAG_DERIVED))
memcpy(contiguous_buf_char, noncontiguous_buf_char, block_lengths_[i] * old_type_->size());
else
old_type_->serialize( noncontiguous_buf_char, contiguous_buf_char, block_lengths_[i]);
static_cast<char*>(noncontiguous_buf)+block_indices_[0]*old_type_->get_extent();
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
- if (!(old_type_->flags() & DT_FLAG_DERIVED)){
+ if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
if(op!=MPI_OP_NULL)
op->apply( contiguous_buf_char, noncontiguous_buf_char, &block_lengths_[i],
old_type_);
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+ block_indices_[0];
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
- if (!(old_type_->flags() & DT_FLAG_DERIVED))
+ if (not(old_type_->flags() & DT_FLAG_DERIVED))
memcpy(contiguous_buf_char, noncontiguous_buf_char, block_lengths_[i] * old_type_->size());
else
old_type_->serialize(noncontiguous_buf_char, contiguous_buf_char,block_lengths_[i]);
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+ block_indices_[0];
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
- if (!(old_type_->flags() & DT_FLAG_DERIVED)){
+ if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
if(op!=MPI_OP_NULL)
op->apply( contiguous_buf_char, noncontiguous_buf_char, &block_lengths_[i],
old_type_);
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+ block_indices_[0];
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
- if (!(old_types_[i]->flags() & DT_FLAG_DERIVED))
+ if (not(old_types_[i]->flags() & DT_FLAG_DERIVED))
memcpy(contiguous_buf_char, noncontiguous_buf_char, block_lengths_[i] * old_types_[i]->size());
else
old_types_[i]->serialize( noncontiguous_buf_char,contiguous_buf_char,block_lengths_[i]);
char* noncontiguous_buf_char = static_cast<char*>(noncontiguous_buf)+ block_indices_[0];
for (int j = 0; j < count; j++) {
for (int i = 0; i < block_count_; i++) {
- if (!(old_types_[i]->flags() & DT_FLAG_DERIVED)){
+ if (not(old_types_[i]->flags() & DT_FLAG_DERIVED)) {
if(op!=MPI_OP_NULL)
op->apply( contiguous_buf_char, noncontiguous_buf_char, &block_lengths_[i], old_types_[i]);
}else
}
template <typename T> void Keyval::cleanup_attr(){
- if(!attributes()->empty()){
+ if (not attributes()->empty()) {
int flag=0;
for(auto it : attributes_){
try{
#define PROD_OP(a, b) (b) *= (a)
#define LAND_OP(a, b) (b) = (a) && (b)
#define LOR_OP(a, b) (b) = (a) || (b)
-#define LXOR_OP(a, b) (b) = (!(a) && (b)) || ((a) && !(b))
+#define LXOR_OP(a, b) (b) = (not(a) && (b)) || ((a) && not(b))
#define BAND_OP(a, b) (b) &= (a)
#define BOR_OP(a, b) (b) |= (a)
#define BXOR_OP(a, b) (b) ^= (a)
detached_ = 1;
XBT_DEBUG("Send request %p is detached", this);
refcount_++;
- if(!(old_type_->flags() & DT_FLAG_DERIVED)){
+ if (not(old_type_->flags() & DT_FLAG_DERIVED)) {
oldbuf = buf_;
if (not process->replaying() && oldbuf != nullptr && size_ != 0) {
if((smpi_privatize_global_variables != 0)
if (async_small_thresh != 0 || (flags_ & RMA) != 0)
xbt_mutex_acquire(mut);
- if (!(async_small_thresh != 0 || (flags_ & RMA) !=0)) {
+ if (not(async_small_thresh != 0 || (flags_ & RMA) != 0)) {
mailbox = process->mailbox();
} else if (((flags_ & RMA) != 0) || static_cast<int>(size_) < async_small_thresh) { // eager mode
mailbox = process->mailbox();
std::vector<int> map; /** Maps all matching comms back to their location in requests **/
for(i = 0; i < count; i++) {
- if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && !(requests[i]->flags_ & PREPARED)) {
- comms.push_back(requests[i]->action_);
- map.push_back(i);
+ if ((requests[i] != MPI_REQUEST_NULL) && requests[i]->action_ && not(requests[i]->flags_ & PREPARED)) {
+ comms.push_back(requests[i]->action_);
+ map.push_back(i);
}
}
if (not map.empty()) {
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
int flag=1;
for(int i=0; i<count; i++){
- if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags_ & PREPARED)) {
+ if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED)) {
if (test(&requests[i], pstat)!=1){
flag=0;
}else{
MPI_Request req = *request;
Status::empty(status);
- if(!((req->detached_ != 0) && ((req->flags_ & SEND) != 0)) && ((req->flags_ & PREPARED) == 0)){
+ if (not((req->detached_ != 0) && ((req->flags_ & SEND) != 0)) && ((req->flags_ & PREPARED) == 0)) {
if(status != MPI_STATUS_IGNORE) {
int src = req->src_ == MPI_ANY_SOURCE ? req->real_src_ : req->src_;
status->MPI_SOURCE = req->comm_->group()->rank(src);
map = xbt_new(int, count);
XBT_DEBUG("Wait for one of %d", count);
for(i = 0; i < count; i++) {
- if (requests[i] != MPI_REQUEST_NULL && !(requests[i]->flags_ & PREPARED) && !(requests[i]->flags_ & FINISHED)) {
+ if (requests[i] != MPI_REQUEST_NULL && not(requests[i]->flags_ & PREPARED) &&
+ not(requests[i]->flags_ & FINISHED)) {
if (requests[i]->action_ != nullptr) {
XBT_DEBUG("Waiting any %p ", requests[i]);
xbt_dynar_push(&comms, &requests[i]->action_);
if (i != -1) {
index = map[i];
//in case of an accumulate, we have to wait the end of all requests to apply the operation, ordered correctly.
- if ((requests[index] == MPI_REQUEST_NULL)
- || (!((requests[index]->flags_ & ACCUMULATE) && (requests[index]->flags_ & RECV)))){
+ if ((requests[index] == MPI_REQUEST_NULL) ||
+ (not((requests[index]->flags_ & ACCUMULATE) && (requests[index]->flags_ & RECV)))) {
finish_wait(&requests[index],status);
if (requests[i] != MPI_REQUEST_NULL && (requests[i]->flags_ & NON_PERSISTENT))
requests[index] = MPI_REQUEST_NULL;
}
int Win::attach (void *base, MPI_Aint size){
- if (!(base_ == MPI_BOTTOM || base_ == 0))
+ if (not(base_ == MPI_BOTTOM || base_ == 0))
return MPI_ERR_ARG;
base_=0;//actually the address will be given in the RMA calls, as being the disp.
size_+=size;
if(lock_type == MPI_LOCK_SHARED){//the window used to be exclusive, it's now shared.
xbt_mutex_release(target_win->lock_mut_);
}
- } else if(!(target_win->mode_==MPI_LOCK_SHARED && lock_type == MPI_LOCK_EXCLUSIVE))
- target_win->mode_+= lock_type; // don't set to exclusive if it's already shared
+ } else if (not(target_win->mode_ == MPI_LOCK_SHARED && lock_type == MPI_LOCK_EXCLUSIVE))
+ target_win->mode_ += lock_type; // don't set to exclusive if it's already shared
target_win->lockers_.push_back(comm_->rank());
static s_xbt_swag_t cnst_to_update;
- if (!(sys->modified))
+ if (not sys->modified)
return;
/* Init */
lmm_print(sys);
}
- if (!(sys->modified))
+ if (not sys->modified)
return;
/* Initialize lambda. */
lmm_variable_t lmm_get_var_from_cnst(lmm_system_t /*sys*/, lmm_constraint_t cnst, lmm_element_t * elem)
{
- if (!(*elem)) {
+ if (*elem == nullptr) {
// That is the first call, pick the first element among enabled_element_set (or disabled_element_set if
// enabled_element_set is empty)
*elem = (lmm_element_t) xbt_swag_getFirst(&(cnst->enabled_element_set));
- if (!(*elem))
+ if (*elem == nullptr)
*elem = (lmm_element_t) xbt_swag_getFirst(&(cnst->disabled_element_set));
} else {
//elem is not null, so we carry on
if(xbt_swag_belongs(*elem,&(cnst->enabled_element_set))){
//Look at enabled_element_set, and jump to disabled_element_set when finished
*elem = (lmm_element_t) xbt_swag_getNext(*elem, cnst->enabled_element_set.offset);
- if (!(*elem))
+ if (*elem == nullptr)
*elem = (lmm_element_t) xbt_swag_getFirst(&(cnst->disabled_element_set));
} else {
*elem = (lmm_element_t) xbt_swag_getNext(*elem, cnst->disabled_element_set.offset);
lmm_variable_t lmm_get_var_from_cnst_safe(lmm_system_t /*sys*/, lmm_constraint_t cnst, lmm_element_t * elem,
lmm_element_t * nextelem, int * numelem)
{
- if (!(*elem)){
+ if (*elem == nullptr) {
*elem = (lmm_element_t) xbt_swag_getFirst(&(cnst->enabled_element_set));
*numelem = xbt_swag_size(&(cnst->enabled_element_set))+xbt_swag_size(&(cnst->disabled_element_set))-1;
- if (!(*elem))
+ if (*elem == nullptr)
*elem = (lmm_element_t) xbt_swag_getFirst(&(cnst->disabled_element_set));
}else{
*elem = *nextelem;
if(xbt_swag_belongs(*elem,&(cnst->enabled_element_set))){
//Look at enabled_element_set, and jump to disabled_element_set when finished
*nextelem = (lmm_element_t) xbt_swag_getNext(*elem, cnst->enabled_element_set.offset);
- if (!(*nextelem))
+ if (*nextelem == nullptr)
*nextelem = (lmm_element_t) xbt_swag_getFirst(&(cnst->disabled_element_set));
} else {
*nextelem = (lmm_element_t) xbt_swag_getNext(*elem, cnst->disabled_element_set.offset);
double min_usage = -1;
double min_bound = -1;
- if (!(sys->modified))
+ if (not sys->modified)
return;
XBT_IN("(sys=%p)", sys);
double_update(&(deltap), action->latency_, sg_surf_precision);
action->latency_ = 0.0;
}
- if (action->latency_ <= 0.0 && !(action->isSuspended()))
+ if (action->latency_ <= 0.0 && not action->isSuspended())
lmm_update_variable_weight(maxminSystem_, action->getVariable(), action->weight_);
}
if (TRACE_is_enabled()) {
explicit DatedValue() = default;
explicit DatedValue(double d, double v) : date_(d), value_(v) {}
bool operator==(DatedValue e2);
- bool operator!=(DatedValue e2) { return !(*this == e2); }
+ bool operator!=(DatedValue e2) { return not(*this == e2); }
};
std::ostream& operator<<(std::ostream& out, const DatedValue& e);
XBT_DEBUG("REHASH (%d->%d)", oldsize, newsize);
for (unsigned i = 0; i < oldsize; i++, currcell++) {
- if (!*currcell) /* empty cell */
+ if (*currcell == nullptr) /* empty cell */
continue;
xbt_dictelm_t *twincell = currcell + oldsize;
if ((bucklet->hash_code & newsize) != i) { /* Move to b */
*pprev = bucklet->next;
bucklet->next = *twincell;
- if (!*twincell)
+ if (*twincell == nullptr)
dict->fill++;
*twincell = bucklet;
} else {
}
}
- if (!*currcell) /* everything moved */
+ if (*currcell == nullptr) /* everything moved */
dict->fill--;
}
}
xbt_dict_t head = xbt_dict_new_homogeneous(&free);
xbt_test_add("Fill %d elements, with keys being the number of element", NB_ELM);
for (int j = 0; j < NB_ELM; j++) {
- /* if (!(j%1000)) { printf("."); fflush(stdout); } */
char* key = (char*)xbt_malloc(10);
snprintf(key,10, "%d", j);
xbt_test_add("Remove my %d elements", NB_ELM);
key = (char*) xbt_malloc(10);
for (int j = 0; j < NB_ELM; j++) {
- /* if (!(j%10000)) printf("."); fflush(stdout); */
snprintf(key,10, "%d", j);
xbt_dict_remove(head, key);
}
c = 1;
if (v1 != 5678)
xbt_test_fail("v1 = %d (!= 5678)", v1);
- if (!(ex.category == 1 && ex.value == 2 && not strcmp(ex.what(), "blah")))
+ if (not(ex.category == 1 && ex.value == 2 && not strcmp(ex.what(), "blah")))
xbt_test_fail("unexpected exception contents");
}
if (not c)