/tools/doxygen.log
/doc
+/projects/Win/vc12/cds.opensdf
+/projects/Win/vc12/cds.sdf
+/projects/Win/vc12/cds.v12.suo
+/tests/cppunit/*.o
+*.o
+/todo-2.0.txt
+/tests/data/dictionary.txt
\endcode
*/
- template <class Duration = cds::chrono::milliseconds, typename Tag=void >
+ template <class Duration = std::chrono::milliseconds, typename Tag=void >
class delay
{
public:
The declaration <tt>cds::backoff::delay_of< 5 > bkoff</tt> is equal for
<tt>cds::backoff::delay<> bkoff(5)</tt>.
*/
- template <unsigned int Timeout, class Duration = cds::chrono::milliseconds >
+ template <unsigned int Timeout, class Duration = std::chrono::milliseconds >
class delay_of: public delay<Duration>
{
//@cond
// cas
static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas8_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas8_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT
}
static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas8_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas8_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT
// cas
static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas16_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas16_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT
}
static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas16_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas16_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT
// cas
static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas32_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas32_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT
}
static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas32_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas32_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT
// cas
static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas64_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas64_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT
}
static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas64_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas64_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) CDS_NOEXCEPT
// cas
static bool atomic_compare_exchange_weak_explicit( T * volatile * pDest, T * * expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas_ptr_weak( pDest, *expected, desired, mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak_explicit( T * * pDest, T * * expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas_ptr_weak( pDest, *expected, desired, mo_success, mo_fail );
}
static bool atomic_compare_exchange_weak( T * volatile * pDest, T ** expected, T * desired ) CDS_NOEXCEPT
}
static bool atomic_compare_exchange_strong_explicit( T * volatile * pDest, T ** expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas_ptr_strong( pDest, *expected, desired, mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong_explicit( T ** pDest, T ** expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
- assert( expected != NULL );
+ assert( expected );
return platform::cas_ptr_strong( pDest, *expected, desired, mo_success, mo_fail );
}
static bool atomic_compare_exchange_strong( T * volatile * pDest, T ** expected, T * desired ) CDS_NOEXCEPT
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
assert( cds::details::is_aligned( pSrc, 8 ));
T v = *pSrc;
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
assert( cds::details::is_aligned( pDest, 8 ));
if (order != memory_order_seq_cst) {
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
T * v = *pSrc;
fence_after_load( order );
|| order == memory_order_acquire \
|| order == memory_order_seq_cst \
) ; \
- assert( pSrc != NULL ) ; \
+ assert( pSrc ) ; \
T val ; \
__asm__ __volatile__ ( \
"ld" #n_bytes ".acq %[val] = [%[pSrc]] \n\t" \
|| order == memory_order_release \
|| order == memory_order_seq_cst \
) ; \
- assert( pDest != NULL ) ; \
+ assert( pDest ) ; \
if ( order == memory_order_seq_cst ) { \
__asm__ __volatile__ ( \
"st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \
static inline T exchange##n_bits( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT \
{ \
static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \
- assert( pDest != NULL ) ; \
+ assert( pDest ) ; \
T current ; \
switch(order) \
{ \
static inline T fetch32_add( T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T cur;
switch ( val ) {
static inline T fetch32_sub( T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T cur;
switch ( val ) {
case 1:
static inline T fetch64_add( T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T cur;
switch ( val ) {
static inline T fetch64_sub( T volatile * pDest, T val, memory_order order) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T cur;
switch ( val ) {
case 1:
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
T * val;
__asm__ __volatile__ (
"ld8.acq %[val] = [%[pSrc]] \n\t"
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
if ( order == memory_order_seq_cst ) {
__asm__ __volatile__ (
static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
static_assert( sizeof(T *) == 8, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T * current;
static inline T * exchange_ptr( T * volatile * pDest, T * val, memory_order order ) CDS_NOEXCEPT
{
static_assert( sizeof(T *) == 8, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T * current;
switch(order) {
template <> struct atomic_pointer_sizeof<void> { enum { value = 1 }; };
// It does not work properly
- // atomic.fetch_add( ... ) returns NULL, why?..
+ // atomic.fetch_add( ... ) returns nullptr, why?..
//# define CDS_ATOMIC_fetch_ptr_add_defined
template <typename T>
static inline T * fetch_ptr_add( T * volatile * pDest, ptrdiff_t val, memory_order order) CDS_NOEXCEPT
{
static_assert( sizeof(T *) == 8, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T * cur;
val *= atomic_pointer_sizeof<T>::value;
}
// It does not work properly
- // atomic.fetch_sub( ... ) returns NULL, why?..
+ // atomic.fetch_sub( ... ) returns nullptr, why?..
//# define CDS_ATOMIC_fetch_ptr_sub_defined
template <typename T>
static inline T * fetch_ptr_sub( T * volatile * pDest, ptrdiff_t val, memory_order order) CDS_NOEXCEPT
{
static_assert( sizeof(T *) == 8, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T * cur;
val *= atomic_pointer_sizeof<T>::value;
switch ( val ) {
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
fence_before(order);
*pDest = src;
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
fence_before(order);
T v = *pSrc;
static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
fence_before( mo_success );
__asm__ __volatile__(
static inline T exchange32( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 4, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
// This primitive could be implemented via "swap" instruction but "swap" is deprecated in UltraSparc
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
fence_before(order);
T v = *pSrc;
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
fence_before(order);
*pDest = val;
static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
fence_before( mo_success );
__asm__ __volatile__(
static inline T exchange64( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 8, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T cur = load64( pDest, memory_order_relaxed );
do {} while ( !cas64_strong( pDest, cur, v, order, memory_order_relaxed ));
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
fence_before( order );
*pDest = src;
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
fence_before( order );
T v = *pSrc;
static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
union u32 {
uint32_t w;
static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
union u32 {
uint32_t w;
static inline T exchange8( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 1, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T cur = load8( pDest, memory_order_relaxed );
do {} while ( !cas8_strong( pDest, cur, v, order, memory_order_relaxed ));
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
fence_before( order );
T v = *pSrc;
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
fence_before(order);
*pDest = src;
static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
union u32 {
uint32_t w;
static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
union u32 {
uint32_t w;
static inline T exchange16( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
{
static_assert( sizeof(T) == 2, "Illegal size of operand" );
- assert( pDest != NULL );
+ assert( pDest );
T cur = load16( pDest, memory_order_relaxed );
do {} while ( !cas16_strong( pDest, cur, v, order, memory_order_relaxed ));
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
fence_before(order);
*pDest = src;
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
fence_before( order );
T * v = *pSrc;
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
assert( cds::details::is_aligned( pSrc, 8 ));
T CDS_DATA_ALIGNMENT(8) v;
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
assert( cds::details::is_aligned( pDest, 8 ));
if ( order != memory_order_seq_cst ) {
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
T * v = *pSrc;
fence_after_load( order );
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
T v = *pSrc;
fence_after_load( order );
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
assert( cds::details::is_aligned( pDest, 2 ));
if ( order != memory_order_seq_cst ) {
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
assert( cds::details::is_aligned( pSrc, 2 ));
T v = *pSrc;
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
assert( cds::details::is_aligned( pDest, 4 ));
if ( order != memory_order_seq_cst ) {
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
assert( cds::details::is_aligned( pSrc, 4 ));
T v = *pSrc;
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
assert( cds::details::is_aligned( pSrc, 8 ));
T v = *pSrc;
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
assert( cds::details::is_aligned( pDest, 8 ));
if ( order != memory_order_seq_cst ) {
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
T * v = *pSrc;
fence_after_load( order );
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
T v = *pSrc;
fence_after_load( order );
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
assert( cds::details::is_aligned( pDest, 2 ));
if ( order != memory_order_seq_cst ) {
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
assert( cds::details::is_aligned( pSrc, 2 ));
T v = *pSrc;
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
assert( cds::details::is_aligned( pDest, 4 ));
if ( order != memory_order_seq_cst ) {
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
assert( cds::details::is_aligned( pSrc, 4 ));
T v( *pSrc );
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
assert( cds::details::is_aligned( pSrc, 8 ));
// Atomically loads 64bit value by SSE intrinsics
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
assert( cds::details::is_aligned( pDest, 8 ));
if ( order != memory_order_seq_cst ) {
|| order == memory_order_release
|| order == memory_order_seq_cst
);
- assert( pDest != NULL );
+ assert( pDest );
if ( order != memory_order_seq_cst ) {
fence_before( order );
|| order == memory_order_acquire
|| order == memory_order_seq_cst
);
- assert( pSrc != NULL );
+ assert( pSrc );
T * v = *pSrc;
fence_after_load( order );
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns NULL.
+ The function repeatedly calls \ref dequeue until it returns \p nullptr.
*/
void clear()
{
The <tt>insert(x)</tt> successively "kicks out" conflicting items until every key has a slot.
To add \p x, the method swaps \p x with \p y, the current occupant of <tt>table[0][h0(x)]</tt>.
- If the prior value was \p NULL, it is done. Otherwise, it swaps the newly nest-less value \p y
+ If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y
for the current occupant of <tt>table[1][h1(y)]</tt> in the same way. As before, if the prior value
- was \p NULL, it is done. Otherwise, the method continues swapping entries (alternating tables)
+ was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables)
until it finds an empty slot. We might not find an empty slot, either because the table is full,
or because the sequence of displacement forms a cycle. We therefore need an upper limit on the
number of successive displacements we are willing to undertake. When this limit is exceeded,
The <tt>insert(x)</tt> successively "kicks out" conflicting items until every key has a slot.
To add \p x, the method swaps \p x with \p y, the current occupant of <tt>table[0][h0(x)]</tt>.
- If the prior value was \p NULL, it is done. Otherwise, it swaps the newly nest-less value \p y
+ If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y
for the current occupant of <tt>table[1][h1(y)]</tt> in the same way. As before, if the prior value
- was \p NULL, it is done. Otherwise, the method continues swapping entries (alternating tables)
+ was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables)
until it finds an empty slot. We might not find an empty slot, either because the table is full,
or because the sequence of displacement forms a cycle. We therefore need an upper limit on the
number of successive displacements we are willing to undertake. When this limit is exceeded,
/// Finds \p key and return the item found
/** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
RCU should be locked before call the function.
Returned pointer is valid while RCU is locked.
/// Finds \p key and return the item found
/** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
RCU should be locked before call the function.
Returned pointer is valid while RCU is locked.
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
/// Finds \p key and return the item found
/** \anchor cds_nonintrusive_LazyKVList_rcu_get
The function searches the item with \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type.
/// Finds the key \p val and return the item found
/** \anchor cds_nonintrusive_LazyList_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Clear the deque
/**
- The function repeatedly calls \ref pop_back until it returns \p NULL.
+ The function repeatedly calls \ref pop_back until it returns \p nullptr.
*/
void clear()
{
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
/// Finds \p key and return the item found
/** \anchor cds_nonintrusive_MichaelKVList_rcu_get
The function searches the item with \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type.
value_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Value) : reinterpret_cast<value_ptr>(NULL);
+ return p ? &(p->m_Value) : nullptr;
}
value_ref operator *() const
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != NULL );
+ assert( pNode );
scoped_node_ptr p(pNode);
if ( base_class::insert_at( refHead, *pNode )) {
p.release();
node_type * m_pItemFound;
ensure_functor()
- : m_pItemFound( NULL )
+ : m_pItemFound( nullptr )
{}
void operator ()(bool, node_type& item, node_type& )
value_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Value) : reinterpret_cast<value_ptr>(NULL);
+ return p ? &(p->m_Value) : nullptr;
}
value_ref operator *() const
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
value_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Value) : reinterpret_cast<value_ptr>(NULL);
+ return p ? &(p->m_Value) : nullptr;
}
value_ref operator *() const
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
/// Finds the key \p val and return the item found
/** \anchor cds_nonintrusive_MichaelList_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != NULL );
+ assert( pNode );
scoped_node_ptr p(pNode);
if ( base_class::insert_at( refHead, *pNode )) {
p.release();
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
\anchor cds_nonintrusive_MichaelHashMap_how_touse
<b>How to use</b>
/// Finds \p key and return the item found
/** \anchor cds_nonintrusive_MichaelHashMap_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type.
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
<b>How to use</b>
/// Finds the key \p val and return the item found
/** \anchor cds_nonintrusive_MichaelHashSet_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns NULL.
+ The function repeatedly calls \ref dequeue until it returns nullptr.
The disposer defined in template \p Options is called for each item
that can be safely disposed.
*/
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns NULL.
+ The function repeatedly calls \ref dequeue until it returns \p nullptr.
*/
void clear()
{
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns NULL.
+ The function repeatedly calls \ref dequeue until it returns \p nullptr.
*/
void clear()
{
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \ cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \ cend member functions points to \p nullptr and should not be dereferenced.
*/
template <
/// Gets minimum key from the map
/**
- If the map is empty the function returns \p NULL
+ If the map is empty the function returns \p nullptr
*/
value_type * get_min() const
{
/// Gets maximum key from the map
/**
- The function returns \p NULL if the map is empty
+ The function returns \p nullptr if the map is empty
*/
value_type * get_max() const
{
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
*/
template <
/// Finds the key \p key and return the item found
/** \anchor cds_nonintrusive_SkipListMap_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
Note the compare functor in \p Traits class' template argument
should accept a parameter of type \p K that can be not the same as \p key_type.
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
*/
template <
/// Gets minimum key from the set
/**
- If the set is empty the function returns \p NULL
+ If the set is empty the function returns \p nullptr
*/
value_type * get_min() const
{
/// Gets maximum key from the set
/**
- The function returns \p NULL if the set is empty
+ The function returns \p nullptr if the set is empty
*/
value_type * get_max() const
{
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
*/
template <
typename RCU,
/// Finds \p key and return the item found
/** \anchor cds_nonintrusive_SkipListSet_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
Note the compare functor in \p Traits class' template argument
should accept a parameter of type \p Q that can be not the same as \p value_type.
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
\par Usage
/// Finds \p key and return the item found
/** \anchor cds_intrusive_SplitListMap_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p K that can be not the same as \p value_type.
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
\par Usage
/// Finds the key \p val and return the item found
/** \anchor cds_nonintrusive_SplitListSet_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns NULL.
+ The function repeatedly calls \ref dequeue until it returns \p nullptr.
*/
void clear()
{
#ifndef cds_bitop_complement32_DEFINED
static inline bool complement32( atomic32u_t * pArg, unsigned int nBit )
{
- assert( pArg != NULL );
+ assert( pArg );
atomic32u_t nVal = *pArg & (1 << nBit);
*pArg ^= 1 << nBit;
return nVal != 0;
#ifndef cds_bitop_complement64_DEFINED
static inline bool complement64( atomic64u_t * pArg, unsigned int nBit )
{
- assert( pArg != NULL );
+ assert( pArg );
atomic64u_t nVal = *pArg & (atomic64u_t(1) << nBit);
*pArg ^= atomic64u_t(1) << nBit;
return nVal != 0;
func_ref get()
{
- assert( m_func != NULL );
+ assert( m_func );
return *m_func;
}
};
func_ref get()
{
- assert( m_func != NULL );
+ assert( m_func );
return *m_func;
}
};
func_ref get()
{
- assert( m_func != NULL );
+ assert( m_func );
return *m_func;
}
};
func_ref get()
{
- assert( m_func != NULL );
+ assert( m_func );
return *m_func;
}
};
func_ref get()
{
- assert( m_func != NULL );
+ assert( m_func );
return *m_func;
}
};
return p1.m_p < p2.m_p;
}
- /// Default ctor initializes pointer to NULL
+ /// Default ctor initializes pointer to \p nullptr
retired_ptr()
- : m_p( NULL )
- , m_funcFree( NULL )
+ : m_p( nullptr )
+ , m_funcFree( nullptr )
{}
/// Ctor
/// Invokes destructor function for the pointer
void free()
{
- assert( m_funcFree != NULL );
- assert( m_p != NULL );
+ assert( m_funcFree );
+ assert( m_p );
m_funcFree( m_p );
CDS_STRICT_DO( m_p = nullptr );
return *value_cast()( m_guard.template get<guarded_type>() );
}
- /// Checks if the guarded pointer is \p NULL
+ /// Checks if the guarded pointer is \p nullptr
bool empty() const CDS_NOEXCEPT
{
return m_guard.template get<guarded_type>() == nullptr;
return base_class::operator =(p);
}
+ //@cond
+ std::nullptr_t assign( std::nullptr_t )
+ {
+ return base_class::operator =(nullptr);
+ }
+ //@endcond
+
/// Copy from \p src guard to \p this guard
void copy( Guard const& src )
{
/// Clear value of the guard
void clear()
{
- assign( reinterpret_cast<void *>(NULL) );
+ assign( nullptr );
}
/// Get the value currently protected
for all x where link[x] of node is reference-counted do
retry:
node1 := link[x];
- if node1 != NULL and node1.m_bDeleted then
+ if node1 != nullptr and node1.m_bDeleted then
node2 := node1->link[x];
pGC->CASRef( this->link[x], node1, node2 );
pGC->releaseRef( node2 );
void terminate( ThreadGC * pGC, bool bConcurrent)
if !bConcurrent
for all this->link where link is reference-counted do
- link := NULL;
+ link := nullptr;
else
for all this->link where link is reference-counted do
repeat node1 := link;
- until pGC->CASRef(link,node1,NULL);
+ until pGC->CASRef(link,node1,nullptr);
\endcode
*/
virtual void terminate( ThreadGC * pGC, bool bConcurrent ) = 0;
#endif
};
- /// "Global GC object is NULL" exception
+ /// "Global GC object is nullptr" exception
CDS_DECLARE_EXCEPTION( HRCGarbageCollectorEmpty, "Global cds::gc::hrc::GarbageCollector is NULL" );
/// Not enough required Hazard Pointer count
return p;
}
+ //@cond
+ std::nullptr_t operator=( std::nullptr_t ) CDS_NOEXCEPT
+ {
+ clear();
+ return nullptr;
+ }
+ //@endcond
+
/// Returns current value of hazard pointer
/**
Loading has acquire semantics
return m_arr[nIndex];
}
- /// Clears (sets to NULL) hazard pointer \p nIndex
+ /// Clears (sets to \p nullptr) hazard pointer \p nIndex
void clear( size_t nIndex ) CDS_NOEXCEPT
{
assert( nIndex < capacity() );
//@cond
hplist_node( const GarbageCollector& HzpMgr )
: HPRec( HzpMgr ),
- m_pNextNode(NULL),
+ m_pNextNode( nullptr ),
m_idOwner( OS::c_NullThreadId ),
m_bFree( true )
{}
/// Returns pointer to GarbageCollector instance
static GarbageCollector& instance()
{
- if ( m_pHZPManager == NULL )
+ if ( !m_pHZPManager )
throw HZPManagerEmpty();
return *m_pHZPManager;
}
/// Checks if global GC object is constructed and may be used
static bool isUsed()
{
- return m_pHZPManager != NULL;
+ return m_pHZPManager != nullptr;
}
/// Returns max Hazard Pointer count defined in construction time
public:
ThreadGC()
: m_HzpManager( GarbageCollector::instance() ),
- m_pHzpRec( NULL )
+ m_pHzpRec( nullptr )
{}
~ThreadGC()
{
}
/// Checks if thread GC is initialized
- bool isInitialized() const { return m_pHzpRec != NULL ; }
+ bool isInitialized() const { return m_pHzpRec != nullptr; }
/// Initialization. Repeat call is available
void init()
{
if ( m_pHzpRec ) {
details::HPRec * pRec = m_pHzpRec;
- m_pHzpRec = NULL;
+ m_pHzpRec = nullptr;
m_HzpManager.RetireHPRec( pRec );
}
}
/// Initializes HP guard \p guard
details::HPGuard& allocGuard()
{
- assert( m_pHzpRec != NULL );
+ assert( m_pHzpRec );
return m_pHzpRec->m_hzp.alloc();
}
/// Frees HP guard \p guard
void freeGuard( details::HPGuard& guard )
{
- assert( m_pHzpRec != NULL );
+ assert( m_pHzpRec );
m_pHzpRec->m_hzp.free( guard );
}
template <size_t Count>
void allocGuard( details::HPArray<Count>& arr )
{
- assert( m_pHzpRec != NULL );
+ assert( m_pHzpRec );
m_pHzpRec->m_hzp.alloc( arr );
}
template <size_t Count>
void freeGuard( details::HPArray<Count>& arr )
{
- assert( m_pHzpRec != NULL );
+ assert( m_pHzpRec );
m_pHzpRec->m_hzp.free( arr );
}
}
//@cond
+ std::nullptr_t operator =(std::nullptr_t)
+ {
+ return m_hp = nullptr;
+ }
+
hazard_ptr get() const
{
return m_hp;
return p;
}
+ //@cond
+ std::nullptr_t operator=(std::nullptr_t)
+ {
+ clear();
+ return nullptr;
+ }
+ //@endcond
+
public: // for ThreadGC.
/*
GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
{
return base_class::operator =<T>( p );
}
+
+ //@cond
+ std::nullptr_t operator=(std::nullptr_t)
+ {
+ return base_class::operator =(nullptr);
+ }
+ //@endcond
};
/// Array of guards
m_arr[nIndex].set( p );
}
- /// Clears (sets to NULL) the guard \p nIndex
+ /// Clears (sets to \p nullptr) the guard \p nIndex
void clear( size_t nIndex )
{
assert( nIndex < capacity() );
return base_class::operator =(p);
}
+ //@cond
+ std::nullptr_t assign( std::nullptr_t )
+ {
+ return base_class::operator =(nullptr);
+ }
+ //@endcond
+
/// Store marked pointer \p p to the guard
/**
The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
/// Dequeues a value from the queue
/** @anchor cds_intrusive_BasketQueue_dequeue
- If the queue is empty the function returns \p NULL.
+ If the queue is empty the function returns \p nullptr.
<b>Warning</b>: see MSQueue::deque note about item disposing
*/
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns \p NULL.
+ The function repeatedly calls \ref dequeue until it returns \p nullptr.
The disposer defined in template \p Options is called for each item
that can be safely disposed.
*/
The <tt>insert(x)</tt> successively "kicks out" conflicting items until every key has a slot.
To add \p x, the method swaps \p x with \p y, the current occupant of <tt>table[0][h0(x)]</tt>.
- If the prior value was \p NULL, it is done. Otherwise, it swaps the newly nest-less value \p y
+ If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y
for the current occupant of <tt>table[1][h1(y)]</tt> in the same way. As before, if the prior value
- was \p NULL, it is done. Otherwise, the method continues swapping entries (alternating tables)
+ was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables)
until it finds an empty slot. We might not find an empty slot, either because the table is full,
or because the sequence of displacement forms a cycle. We therefore need an upper limit on the
number of successive displacements we are willing to undertake. When this limit is exceeded,
The function searches an item with key equal to \p val in the set,
unlinks it from the set, and returns a pointer to unlinked item.
- If the item with key equal to \p val is not found the function return \p NULL.
+ If the item with key equal to \p val is not found the function return \p nullptr.
Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
*/
\endcode
The functor may be passed by reference with <tt>boost:ref</tt>
- If the item with key equal to \p val is not found the function return \p NULL.
+ If the item with key equal to \p val is not found the function return \p nullptr.
Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
*/
/// Finds \p key and return the item found
/** \anchor cds_intrusive_EllenBinTree_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
- If \p key is not found it returns \p NULL.
+ If \p key is not found it returns \p nullptr.
RCU should be locked before call the function.
Returned pointer is valid while RCU is locked.
typedef Node node_type;
//@endcond
- /// Checks if the link field of node \p pNode is NULL
+ /// Checks if the link field of node \p pNode is \p nullptr
/**
- An asserting is generated if \p pNode link field is not NULL
+ An asserting is generated if \p pNode link field is not \p nullptr
*/
static void is_empty( node_type const * pNode )
{
/// Finds the key \p val
/** \anchor cds_intrusive_LazyList_nogc_find_val
The function searches the item with key equal to \p val
- and returns pointer to value found or \p NULL.
+ and returns pointer to value found or \p nullptr.
*/
template <typename Q>
value_type * find( Q const& val )
/// Finds the key \p val and return the item found
/** \anchor cds_intrusive_LazyList_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Pop back
/**
- Pops rightmost item from the deque. If the deque is empty then returns \p NULL.
+ Pops rightmost item from the deque. If the deque is empty then returns \p nullptr.
For popped object the disposer specified in \p Options template parameters is called.
*/
/// Pop front
/**
- Pops leftmost item from the deque. If the deque is empty then returns \p NULL.
+ Pops leftmost item from the deque. If the deque is empty then returns \p nullptr.
For popped object the disposer specified in \p Options template parameters is called.
*/
/// Clear the deque
/**
- The function repeatedly calls \ref pop_back until it returns \p NULL.
+ The function repeatedly calls \ref pop_back until it returns \p nullptr.
The disposer defined in template \p Options is called for each item
that can be safely disposed.
*/
typedef Node node_type;
//@endcond
- /// Checks if the link field of node \p pNode is \p NULL
+ /// Checks if the link field of node \p pNode is \p nullptr
/**
- An asserting is generated if \p pNode link field is not \p NULL
+ An asserting is generated if \p pNode link field is not \p nullptr
*/
static void is_empty( const node_type * pNode )
{
protected:
virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC )
{
- assert( pGC != NULL );
+ assert( pGC );
typename gc::GuardArray<2> aGuards( *pGC );
while ( true ) {
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list <tt>begin() == end()</tt>
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to <tt>NULL</tt>.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
/// Finds the key \p val
/** \anchor cds_intrusive_MichaelList_nogc_find_val
The function searches the item with key equal to \p val
- and returns pointer to value found or \p NULL.
+ and returns pointer to value found or \p nullptr.
*/
template <typename Q>
value_type * find( Q const & val )
/// Returns an iterator that addresses the location succeeding the last element in a list
/**
Do not use the value returned by <tt>end</tt> function to access any item.
- Internally, <tt>end</tt> returning value equals to \p NULL.
+ Internally, <tt>end</tt> returning value equals to \p nullptr.
The returned value can be used only to control reaching the end of the list.
For empty list \code begin() == end() \endcode
/// Finds the key \p val and return the item found
/** \anchor cds_intrusive_MichaelList_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Finds the key \p val
/** \anchor cds_intrusive_MichaelHashSet_nogc_find_val
The function searches the item with key equal to \p val
- and returns pointer to item found, otherwise \p NULL.
+ and returns pointer to item found, otherwise \p nullptr.
Note the hash functor specified for class \p Traits template parameter
should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Finds the key \p val and return the item found
/** \anchor cds_intrusive_MichaelHashSet_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Dequeues a value from the queue
/** @anchor cds_intrusive_MSQueue_dequeue
- If the queue is empty the function returns \p NULL.
+ If the queue is empty the function returns \p nullptr.
\par Warning
The queue algorithm has following feature: when \p dequeue is called,
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns \p NULL.
+ The function repeatedly calls \ref dequeue until it returns \p nullptr.
The disposer defined in template \p Options is called for each item
that can be safely disposed.
*/
}
static node_type * to_node_ptr( value_type * v )
{
- return v ? static_cast<node_type *>( v ) : reinterpret_cast<node_type *>( NULL );
+ return v ? static_cast<node_type *>(v) : nullptr;
}
static const node_type * to_node_ptr( const value_type& v )
{
}
static const node_type * to_node_ptr( const value_type * v )
{
- return v ? static_cast<const node_type *>( v ) : reinterpret_cast<const node_type *>( NULL );
+ return v ? static_cast<const node_type *>(v) : nullptr;
}
static value_type * to_value_ptr( node_type& n )
{
}
static value_type * to_value_ptr( node_type * n )
{
- return n ? static_cast<value_type *>( n ) : reinterpret_cast<value_type *>( NULL );
+ return n ? static_cast<value_type *>(n) : nullptr;
}
static const value_type * to_value_ptr( const node_type& n )
{
}
static const value_type * to_value_ptr( const node_type * n )
{
- return n ? static_cast<const value_type *>( n ) : reinterpret_cast<const value_type *>( NULL );
+ return n ? static_cast<const value_type *>(n) : nullptr;
}
};
}
static node_type * to_node_ptr( value_type * v )
{
- return v ? to_node_ptr(*v) : reinterpret_cast<node_type *>( NULL );
+ return v ? to_node_ptr( *v ) : nullptr;
}
static const node_type * to_node_ptr( const value_type& v )
{
}
static const node_type * to_node_ptr( const value_type * v )
{
- return v ? to_node_ptr(*v) : reinterpret_cast<const node_type *>( NULL );
+ return v ? to_node_ptr( *v ) : nullptr;
}
static value_type * to_value_ptr( node_type& n )
{
}
static value_type * to_value_ptr( node_type * n )
{
- return n ? to_value_ptr(*n) : reinterpret_cast<value_type *>( NULL );
+ return n ? to_value_ptr( *n ) : nullptr;
}
static const value_type * to_value_ptr( const node_type& n )
{
}
static const value_type * to_value_ptr( const node_type * n )
{
- return n ? to_value_ptr(*n) : reinterpret_cast<const value_type *>( NULL );
+ return n ? to_value_ptr( *n ) : nullptr;
}
};
typedef Node node_type;
//@endcond
- /// Checks if the link fields of node \p pNode is NULL
+ /// Checks if the link fields of node \p pNode is \p nullptr
/**
- An asserting is generated if \p pNode link fields is not NULL
+ An asserting is generated if \p pNode link fields is not \p nullptr
*/
static void is_empty( const node_type * pNode )
{
/// Dequeues a value from the queue
/** @anchor cds_intrusive_OptimisticQueue_dequeue
- If the queue is empty the function returns \a NULL
+ If the queue is empty the function returns \p nullptr
\par Warning
The queue algorithm has following feature: when \p dequeue is called,
/// Clear the stack
/**
- The function repeatedly calls \ref dequeue until it returns NULL.
+ The function repeatedly calls \ref dequeue until it returns \p nullptr.
The disposer defined in template \p Options is called for each item
that can be safely disposed.
*/
- \ref always_check_link - check in debug and release build (not yet implemented for release mode).
When link checking is on, the container tests that the node's link fields
- must be NULL before inserting the item. If the link is not NULL an assertion is generated
+ must be \p nullptr before inserting the item. If the link is not \p nullptr an assertion is generated
*/
template <link_check_type Value>
struct link_checker {
typedef Node node_type;
//@endcond
- /// Checks if the link field of node \p pNode is NULL
+ /// Checks if the link field of node \p pNode is \p nullptr
/**
- An asserting is generated if \p pNode link field is not NULL
+ An asserting is generated if \p pNode link field is not \p nullptr
*/
static void is_empty( const node_type * pNode )
{
protected:
atomic_marked_ptr m_pNext ; ///< Next item in bottom-list (list at level 0)
unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
- atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL
+ atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
public:
/// Constructs a node of height 1 (a bottom-list node)
protected:
atomic_marked_ptr m_pNext ; ///< Next item in bottom-list (list at level 0)
unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
- atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL
+ atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
public:
bool m_bDel;
protected:
virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC )
{
- assert( pGC != NULL );
+ assert( pGC );
typename gc::GuardArray<2> aGuards( *pGC );
unsigned int const nHeight = height();
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
<b>How to use</b>
protected:
atomic_ptr m_pNext ; ///< Next item in bottom-list (list at level 0)
unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
- atomic_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL
+ atomic_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
public:
/// Constructs a node of height 1 (a bottom-list node)
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
<b>How to use</b>
/// Gets minimum key from the set
/**
- If the set is empty the function returns \p NULL
+ If the set is empty the function returns \p nullptr
*/
value_type * get_min() const
{
/// Gets maximum key from the set
/**
- The function returns \p NULL if the set is empty
+ The function returns \p nullptr if the set is empty
*/
value_type * get_max() const
{
# endif
protected:
unsigned int m_nHeight ; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
- atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p NULL
+ atomic_marked_ptr * m_arrNext ; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
public:
/// Constructs a node of height 1 (a bottom-list node)
bool operator !=(iterator const& i ) const;
};
\endcode
- Note, the iterator object returned by \ref end, \p cend member functions points to \p NULL and should not be dereferenced.
+ Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
<b>How to use</b>
static void dispose_node( value_type * pVal )
{
- assert( pVal != NULL );
+ assert( pVal );
typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
disposer()( pVal );
/// Finds the key \p val and return the item found
/** \anchor cds_intrusive_SkipListSet_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Finds the key \p val
/** \anchor cds_intrusive_SplitListSet_nogc_find_val
The function searches the item with key equal to \p val
- and returns pointer to item found or , and \p NULL otherwise.
+ and returns pointer to item found or , and \p nullptr otherwise.
Note the hash functor specified for class \p Traits template parameter
should accept a parameter of type \p Q that can be not the same as \p value_type.
/// Finds the key \p val and return the item found
/** \anchor cds_intrusive_SplitListSet_rcu_get
The function searches the item with key equal to \p val and returns the pointer to item found.
- If \p val is not found it returns \p NULL.
+ If \p val is not found it returns \p nullptr.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
The function searches an item with key equal to \p val in the set,
unlinks it from the set, and returns a pointer to unlinked item.
- If the item with key equal to \p val is not found the function return \p NULL.
+ If the item with key equal to \p val is not found the function return \p nullptr.
Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
*/
myData * p;
p = s1.pop() ; // pop i1 from s1
- p = s1.pop() ; // p == NULL, s1 is empty
+ p = s1.pop() ; // p == nullptr, s1 is empty
p = s2.pop() ; // pop i1 from s2
p = s2.pop() ; // pop i2 from s2
- p = s2.pop() ; // p == NULL, s2 is empty
+ p = s2.pop() ; // p == nullptr, s2 is empty
}
\endcode
/// Pop an item from the stack
/**
- If stack is empty, returns \p NULL.
+ If stack is empty, returns \p nullptr.
The disposer is <b>not</b> called for popped item.
See \ref cds_intrusive_item_destroying "Destroying items of intrusive containers".
*/
m_stat.onPopRace();
if ( m_Backoff.backoff( op, m_stat )) {
- // may return NULL if stack is empty
+ // may return nullptr if stack is empty
return op.pVal;
}
}
/// Dequeues item from the queue
/** @anchor cds_intrusive_TsigasQueue_dequeue
- If the queue is empty the function returns \a NULL
+ If the queue is empty the function returns \p nullptr
Dequeue does not call value disposer. You can manually dispose returned value if it is needed.
*/
if ( th != m_nHead.load(memory_model::memory_order_relaxed) )
goto TryAgain;
- // two consecutive NULL means queue empty
+ // two consecutive nullptr means queue empty
if ( temp == m_nTail.load(memory_model::memory_order_acquire) )
- return NULL;
+ return nullptr;
temp = ( temp + 1 ) & nModulo;
tt = m_buffer[ temp ].load(memory_model::memory_order_relaxed);
while ( is_free( tt ) ) {
if ( th != m_nHead.load(memory_model::memory_order_relaxed) )
goto TryAgain;
- // two consecutive NULL means queue empty
+ // two consecutive nullptr means queue empty
if ( temp == m_nTail.load(memory_model::memory_order_relaxed) )
return true;
temp = ( temp + 1 ) & nModulo;
/// Dequeues an item from queue
/**
- If queue is empty, returns \p NULL.
+ If queue is empty, returns \p nullptr.
*/
value_type * dequeue()
{
#include <cds/details/allocator.h>
#include <cds/lock/scoped_lock.h>
-#include <cds/int_algo.h>
+#include <cds/algo/int_algo.h>
#include <boost/mpl/if.hpp>
/// Processor heap's \p active field
/**
The \p active field in the processor heap structure is primarily a pointer to the descriptor
- of the active superblock owned by the processor heap. If the value of \p active is not \p NULL, it is
+ of the active superblock owned by the processor heap. If the value of \p active is not \p nullptr, it is
guaranteed that the active superblock has at least one block available for reservation.
Since the addresses of superblock descriptors can be guaranteed to be aligned to some power
of 2 (e.g., 64), as an optimization, we can carve a credits subfield to hold the number
of credits is n, then the active superblock contains n+1 blocks available for reservation
through the \p active field. Note that the number of blocks in a superblock is not limited
to the maximum reservations that can be held in the credits subfield. In a typical malloc operation
- (i.e., when \p active != \p NULL and \p credits > 0), the thread reads \p active and then
+ (i.e., when \p active != \p nullptr and \p credits > 0), the thread reads \p active and then
atomically decrements credits while validating that the active superblock is still valid.
*/
class active_tag {
CDS_DATA_ALIGNMENT(8) CDS_ATOMIC::atomic<active_tag> active; ///< pointer to the descriptor of active superblock owned by processor heap
processor_desc * pProcDesc ; ///< pointer to parent processor descriptor
const size_class * pSizeClass ; ///< pointer to size class
- CDS_ATOMIC::atomic<superblock_desc *> pPartial ; ///< pointer to partial filled superblock (may be NULL)
+ CDS_ATOMIC::atomic<superblock_desc *> pPartial ; ///< pointer to partial filled superblock (may be \p nullptr)
partial_list partialList ; ///< list of partial filled superblocks owned by the processor heap
unsigned int nPageIdx ; ///< page size-class index, \ref c_nPageSelfAllocation - "small page"
}
} while ( !pPartial.compare_exchange_weak( pDesc, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
- //assert( pDesc == NULL || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
- //assert( pDesc == NULL || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+ //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
+ //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
return pDesc;
}
/// Reallocate memory block
/**
If \p nNewSize is zero, then the block pointed to by \p pMemory is freed;
- the return value is \p NULL, and \p pMemory is left pointing at a freed block.
+ the return value is \p nullptr, and \p pMemory is left pointing at a freed block.
If there is not enough available memory to expand the block to the given size,
- the original block is left unchanged, and \p NULL is returned.
+ the original block is left unchanged, and \p nullptr is returned.
Aligned memory block cannot be realloc'ed: if \p pMemory has been allocated by \ref alloc_aligned,
- then the return value is \p NULL and the original block is left unchanged.
+ then the return value is \p nullptr and the original block is left unchanged.
*/
void * realloc(
void * pMemory, ///< Pointer to previously allocated memory block
The cleaner is a functor called when an item is removed from a container.
Note, the cleaner should not delete (deallocate) the value \p val passed in.
However, if the \p value_type type is a structure that contains dynamically allocated
- field(s), the cleaning functor may deallocate it and iniitalize to default value (usually, \p NULL).
+ field(s), the cleaning functor may deallocate it and iniitalize to default value (usually, \p nullptr).
The interface for type \p value_type is:
\code
#include <memory>
#include <cds/details/is_aligned.h>
-#include <cds/int_algo.h>
+#include <cds/algo/int_algo.h>
namespace cds {
/// OS specific wrappers
size_t len = sizeof(nCPU);
/* get the number of CPUs from the system */
- return ::sysctl(mib, 2, &nCPU, &len, NULL, 0) == 0 && nCPU > 0 ? (unsigned int) nCPU : 1;
+ return ::sysctl( mib, 2, &nCPU, &len, nullptr, 0 ) == 0 && nCPU > 0 ? (unsigned int)nCPU : 1;
}
/// Get current processor number
)
{
void * pMem;
- return ::posix_memalign( &pMem, nAlignment, nSize ) == 0 ? pMem : NULL;
+ return ::posix_memalign( &pMem, nAlignment, nSize ) == 0 ? pMem : nullptr;
}
/// Frees a block of memory that was allocated with aligned_malloc.
#define __CDS_OS_POSIX_THREAD_H
#include <pthread.h>
+#include <signal.h>
namespace cds { namespace OS {
/// posix-related wrappers
namespace posix {
/// Posix thread id type
- typedef std::thread::native_thread_handle ThreadId;
+ typedef std::thread::native_handle_type ThreadId;
/// Get current thread id
- static inline ThreadId getCurrentThreadId() { return pthread_self(); }
+ static inline ThreadId getCurrentThreadId()
+ {
+ return pthread_self();
+ }
/// Checks if thread \p id is alive
static inline bool isThreadAlive( ThreadId id )
{
// if sig is zero, error checking is performed but no signal is actually sent.
// ESRCH - No thread could be found corresponding to that specified by the given thread ID
- // Unresolved problem: Linux may crash on dead thread_id. Workaround unknown (except signal handler...)
+ // Unresolved problem: Linux may crash on dead thread_id. Workaround unknown (except signal handler...)
return pthread_kill( id, 0 ) != ESRCH;
}
-
- /// Default back-off thread strategy (yield)
- static inline void backoff()
- {
- std::this_thread::yield();
- }
-
} // namespace posix
using posix::ThreadId;
using posix::getCurrentThreadId;
using posix::isThreadAlive;
- using posix::backoff;
}} // namespace cds::OS
# include <cds/os/posix/thread.h>
#endif
+namespace cds { namespace OS {
+
+ /// Default backoff::yield implementation
+ static inline void backoff()
+ {
+ std::this_thread::yield();
+ }
+}} // namespace cds::OS
+
#endif // #ifndef __CDS_OS_THREAD_H
{
char *ptmp = 0;
if ( !FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
- NULL,
+ nullptr,
nCode,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
(LPTSTR) &ptmp, 0, 0 )
static inline bool isThreadAlive( ThreadId id )
{
HANDLE h = ::OpenThread( SYNCHRONIZE, FALSE, id );
- if ( h == NULL )
+ if ( h == nullptr )
return false;
::CloseHandle( h );
return true;
}
-
- /// Default backoff::yield implementation
- static inline void backoff()
- {
- std::this_thread::yield();
- }
} // namespace Win32
using Win32::ThreadId;
using Win32::getCurrentThreadId;
using Win32::isThreadAlive;
- using Win32::backoff;
}} // namespace cds::OS
//@cond
ThreadData()
- : m_pGPIRCU( NULL )
- , m_pGPBRCU( NULL )
- , m_pGPTRCU( NULL )
+ : m_pGPIRCU( nullptr )
+ , m_pGPBRCU( nullptr )
+ , m_pGPTRCU( nullptr )
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
- , m_pSHBRCU( NULL )
- , m_pSHTRCU( NULL )
+ , m_pSHBRCU( nullptr )
+ , m_pSHTRCU( nullptr )
#endif
, m_nFakeProcessorNumber( s_nLastUsedProcNo.fetch_add(1, CDS_ATOMIC::memory_order_relaxed) % s_nProcCount )
, m_nAttachCount(0)
m_ptbManager = nullptr;
}
- assert( m_pGPIRCU == NULL );
- assert( m_pGPBRCU == NULL );
- assert( m_pGPTRCU == NULL );
+ assert( m_pGPIRCU == nullptr );
+ assert( m_pGPBRCU == nullptr );
+ assert( m_pGPTRCU == nullptr );
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
- assert( m_pSHBRCU == NULL );
- assert( m_pSHTRCU == NULL );
+ assert( m_pSHBRCU == nullptr );
+ assert( m_pSHTRCU == nullptr );
#endif
}
if ( cds::urcu::details::singleton<cds::urcu::general_instant_tag>::isUsed() ) {
cds::urcu::details::singleton<cds::urcu::general_instant_tag>::detach_thread( m_pGPIRCU );
- m_pGPIRCU = NULL;
+ m_pGPIRCU = nullptr;
}
if ( cds::urcu::details::singleton<cds::urcu::general_buffered_tag>::isUsed() ) {
cds::urcu::details::singleton<cds::urcu::general_buffered_tag>::detach_thread( m_pGPBRCU );
- m_pGPBRCU = NULL;
+ m_pGPBRCU = nullptr;
}
if ( cds::urcu::details::singleton<cds::urcu::general_threaded_tag>::isUsed() ) {
cds::urcu::details::singleton<cds::urcu::general_threaded_tag>::detach_thread( m_pGPTRCU );
- m_pGPTRCU = NULL;
+ m_pGPTRCU = nullptr;
}
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
if ( cds::urcu::details::singleton<cds::urcu::signal_buffered_tag>::isUsed() ) {
cds::urcu::details::singleton<cds::urcu::signal_buffered_tag>::detach_thread( m_pSHBRCU );
- m_pSHBRCU = NULL;
+ m_pSHBRCU = nullptr;
}
if ( cds::urcu::details::singleton<cds::urcu::signal_threaded_tag>::isUsed() ) {
cds::urcu::details::singleton<cds::urcu::signal_threaded_tag>::detach_thread( m_pSHTRCU );
- m_pSHTRCU = NULL;
+ m_pSHTRCU = nullptr;
}
#endif
return true;
{
if ( cxx11_internal::s_pThreadData ) {
ThreadData * p = cxx11_internal::s_pThreadData;
- cxx11_internal::s_pThreadData = NULL;
+ cxx11_internal::s_pThreadData = nullptr;
p->ThreadData::~ThreadData();
}
}
/// Checks whether current thread is attached to \p libcds feature or not.
static bool isThreadAttached()
{
- return _threadData() != NULL;
+ return _threadData() != nullptr;
}
/// This method must be called in beginning of thread execution
*/
static gc::HP::thread_gc_impl& getHZPGC()
{
- assert( _threadData()->m_hpManager != NULL );
+ assert( _threadData()->m_hpManager != nullptr );
return *(_threadData()->m_hpManager);
}
*/
static gc::HRC::thread_gc_impl& getHRCGC()
{
- assert( _threadData()->m_hrcManager != NULL );
+ assert( _threadData()->m_hrcManager != nullptr );
return *(_threadData()->m_hrcManager);
}
*/
static gc::PTB::thread_gc_impl& getPTBGC()
{
- assert( _threadData()->m_ptbManager != NULL );
+ assert( _threadData()->m_ptbManager != nullptr );
return *(_threadData()->m_ptbManager);
}
{
if ( gcc_internal::s_pThreadData ) {
ThreadData * p = gcc_internal::s_pThreadData;
- gcc_internal::s_pThreadData = NULL;
+ gcc_internal::s_pThreadData = nullptr;
p->ThreadData::~ThreadData();
}
}
/// Checks whether current thread is attached to \p libcds feature or not.
static bool isThreadAttached()
{
- return _threadData() != NULL;
+ return _threadData() != nullptr;
}
/// This method must be called in beginning of thread execution
*/
static gc::HP::thread_gc_impl& getHZPGC()
{
- assert( _threadData()->m_hpManager != NULL );
+ assert( _threadData()->m_hpManager );
return *(_threadData()->m_hpManager);
}
*/
static gc::HRC::thread_gc_impl& getHRCGC()
{
- assert( _threadData()->m_hrcManager != NULL );
+ assert( _threadData()->m_hrcManager );
return *(_threadData()->m_hrcManager);
}
*/
static gc::PTB::thread_gc_impl& getPTBGC()
{
- assert( _threadData()->m_ptbManager != NULL );
+ assert( _threadData()->m_ptbManager );
return *(_threadData()->m_ptbManager);
}
{
if ( msvc_internal::s_pThreadData ) {
msvc_internal::s_pThreadData->ThreadData::~ThreadData();
- msvc_internal::s_pThreadData = NULL;
+ msvc_internal::s_pThreadData = nullptr;
}
}
static bool isThreadAttached()
{
ThreadData * pData = _threadData();
- return pData != NULL;
+ return pData != nullptr;
}
/// This method must be called in beginning of thread execution
*/
static gc::HP::thread_gc_impl& getHZPGC()
{
- assert( _threadData()->m_hpManager != NULL );
+ assert( _threadData()->m_hpManager );
return *(_threadData()->m_hpManager);
}
*/
static gc::HRC::thread_gc_impl& getHRCGC()
{
- assert( _threadData()->m_hrcManager != NULL );
+ assert( _threadData()->m_hrcManager );
return *(_threadData()->m_hrcManager);
}
*/
static gc::PTB::thread_gc_impl& getPTBGC()
{
- assert( _threadData()->m_ptbManager != NULL );
+ assert( _threadData()->m_ptbManager );
return *(_threadData()->m_ptbManager);
}
static void free()
{
ThreadData * p = get();
- pthread_setspecific( m_key, NULL );
+ pthread_setspecific( m_key, nullptr );
if ( p )
delete p;
}
case do_checkData:
return Holder::get();
case do_attachThread:
- if ( Holder::get() == NULL )
+ if ( Holder::get() == nullptr )
Holder::alloc();
return Holder::get();
case do_detachThread:
Holder::free();
- return NULL;
+ return nullptr;
case init_holder:
case fini_holder:
break;
assert( false ) ; // anything forgotten?..
}
assert(false) ; // how did we get here?
- return NULL;
+ return nullptr;
}
//@endcond
/// Checks whether current thread is attached to \p libcds feature or not.
static bool isThreadAttached()
{
- return _threadData( do_checkData ) != NULL;
+ return _threadData( do_checkData ) != nullptr;
}
/// This method must be called in beginning of thread execution
/**
- If TLS pointer to manager's data is NULL, pthread_exception is thrown
+ If TLS pointer to manager's data is \p nullptr, pthread_exception is thrown
with code = -1.
If an error occurs in call of pthread API function, pthread_exception is thrown
with pthread error code.
static void attachThread()
{
ThreadData * pData = _threadData( do_attachThread );
- assert( pData != NULL );
+ assert( pData );
if ( pData ) {
pData->init();
/// This method must be called in end of thread execution
/**
- If TLS pointer to manager's data is NULL, pthread_exception is thrown
+ If TLS pointer to manager's data is \p nullptr, pthread_exception is thrown
with code = -1.
If an error occurs in call of pthread API function, pthread_exception is thrown
with pthread error code.
static void detachThread()
{
ThreadData * pData = _threadData( do_getData );
- assert( pData != NULL );
+ assert( pData );
if ( pData ) {
if ( pData->fini() )
{
api_error_code nErr;
void * pData = ::TlsGetValue( m_key );
- if ( pData == NULL && (nErr = ::GetLastError()) != ERROR_SUCCESS )
+ if ( pData == nullptr && (nErr = ::GetLastError()) != ERROR_SUCCESS )
throw api_exception( nErr, "TlsGetValue" );
return reinterpret_cast<ThreadData *>( pData );
}
static void free()
{
ThreadData * p = get();
- ::TlsSetValue( m_key, NULL );
+ ::TlsSetValue( m_key, nullptr );
if ( p )
delete p;
}
# ifdef _DEBUG
{
ThreadData * p = Holder::get();
- assert( p != NULL );
+ assert( p );
return p;
}
# else
case do_checkData:
return Holder::get();
case do_attachThread:
- if ( Holder::get() == NULL )
+ if ( Holder::get() == nullptr )
Holder::alloc();
return Holder::get();
case do_detachThread:
Holder::free();
- return NULL;
+ return nullptr;
default:
assert( false ) ; // anything forgotten?..
}
- return NULL;
+ return nullptr;
}
//@endcond
/// Checks whether current thread is attached to \p libcds feature or not.
static bool isThreadAttached()
{
- return _threadData( do_checkData ) != NULL;
+ return _threadData( do_checkData ) != nullptr;
}
/// This method must be called in beginning of thread execution
/**
- If TLS pointer to manager's data is NULL, api_exception is thrown
+ If TLS pointer to manager's data is \p nullptr, api_exception is thrown
with code = -1.
If an error occurs in call of Win TLS API function, api_exception is thrown
with Windows error code.
static void attachThread()
{
ThreadData * pData = _threadData( do_attachThread );
- assert( pData != NULL );
+ assert( pData );
if ( pData ) {
pData->init();
/// This method must be called in end of thread execution
/**
- If TLS pointer to manager's data is NULL, api_exception is thrown
+ If TLS pointer to manager's data is \p nullptr, api_exception is thrown
with code = -1.
If an error occurs in call of Win TLS API function, api_exception is thrown
with Windows error code.
static void detachThread()
{
ThreadData * pData = _threadData( do_getData );
- assert( pData != NULL );
+ assert( pData );
if ( pData ) {
if ( pData->fini() )
inline cds::urcu::details::thread_data<cds::urcu::signal_buffered_tag> * getRCU<cds::urcu::signal_buffered_tag>()
{
ThreadData * p = Manager::thread_data();
- return p ? p->m_pSHBRCU : NULL;
+ return p ? p->m_pSHBRCU : nullptr;
}
template<>
inline cds::urcu::details::thread_data<cds::urcu::signal_threaded_tag> * getRCU<cds::urcu::signal_threaded_tag>()
{
ThreadData * p = Manager::thread_data();
- return p ? p->m_pSHTRCU : NULL;
+ return p ? p->m_pSHTRCU : nullptr;
}
#endif
static bool isUsed()
{
- return rcu_instance::s_pRCU != NULL;
+ return rcu_instance::s_pRCU != nullptr;
}
public:
#ifndef _CDS_URCU_DETAILS_SH_H
#define _CDS_URCU_DETAILS_SH_H
+#include <memory.h> //memset
#include <cds/urcu/details/sh_decl.h>
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
sigaction( m_nSigNo, &sigact, nullptr );
sigaddset( &sigact.sa_mask, m_nSigNo );
- pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, NULL );
+ pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
}
template <typename RCUtag>
static bool isUsed()
{
- return rcu_instance::s_pRCU != NULL;
+ return rcu_instance::s_pRCU != nullptr;
}
int signal_no() const
release();
}
- /// Checks if the pointer is \p NULL
+ /// Checks if the pointer is \p nullptr
bool empty() const CDS_NOEXCEPT
{
return m_pNode == nullptr;
release();
}
- /// Checks if the pointer is \p NULL
+ /// Checks if the pointer is \p nullptr
bool empty() const CDS_NOEXCEPT
{
return m_pNode == nullptr;
<ClInclude Include="..\..\..\cds\algo\elimination_opt.h" />\r
<ClInclude Include="..\..\..\cds\algo\elimination_tls.h" />\r
<ClInclude Include="..\..\..\cds\algo\flat_combining.h" />\r
+ <ClInclude Include="..\..\..\cds\algo\int_algo.h" />\r
<ClInclude Include="..\..\..\cds\compiler\clang\cxx11_atomic_prepatches.h" />\r
<ClInclude Include="..\..\..\cds\compiler\clang\defs.h" />\r
<ClInclude Include="..\..\..\cds\compiler\cxx11_atomic.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\striped_set\boost_unordered_set.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\striped_set\resizing_policy.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\striped_set\striping_policy.h" />\r
+ <ClInclude Include="..\..\..\cds\lock\array.h" />\r
+ <ClInclude Include="..\..\..\cds\lock\scoped_lock.h" />\r
<ClInclude Include="..\..\..\cds\memory\mapper.h" />\r
<ClInclude Include="..\..\..\cds\memory\pool_allocator.h" />\r
<ClInclude Include="..\..\..\cds\memory\vyukov_queue_pool.h" />\r
<ClInclude Include="..\..\..\cds\algo\flat_combining.h">\r
<Filter>Header Files\cds\algo</Filter>\r
</ClInclude>\r
+ <ClInclude Include="..\..\..\cds\algo\int_algo.h">\r
+ <Filter>Header Files\cds\algo</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\lock\array.h">\r
+ <Filter>Header Files\cds\lock</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\lock\scoped_lock.h">\r
+ <Filter>Header Files\cds\lock</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
#endif
static cds::OS::ThreadId s_MainThreadId = 0;
-static HINSTANCE s_DllInstance = NULL;
+static HINSTANCE s_DllInstance = nullptr;
#if _WIN32_WINNT < 0x0601
// For Windows below Windows 7
// Array of processor - cell relationship
// Array size is s_nProcessorCount
// s_arrProcessorCellRelationship[i] is the cell (the processor group) number for i-th processor
-// static unsigned int * s_arrProcessorCellRelationship = NULL;
+// static unsigned int * s_arrProcessorCellRelationship = nullptr;
static void discover_topology()
{
LPFN_GLPI glpi;
bool bDone = false;
- PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL;
- PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = NULL;
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = nullptr;
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = nullptr;
DWORD returnLength = 0;
DWORD logicalProcessorCount = 0;
DWORD numaNodeCount = 0;
s_nProcessorGroupCount = 1;
glpi = (LPFN_GLPI) GetProcAddress( GetModuleHandle("kernel32"), "GetLogicalProcessorInformation" );
- if (NULL == glpi) {
+ if ( glpi == nullptr ) {
return;
}
buffer = reinterpret_cast<PSYSTEM_LOGICAL_PROCESSOR_INFORMATION>( ::malloc( returnLength ) );
- if (NULL == buffer) {
+ if ( buffer == nullptr ) {
// allocation failed
return;
}
static void prepare_current_processor_call()
{
s_fnGetCurrentProcessorNumber = (fnGetCurrentProcessorNumber) GetProcAddress( GetModuleHandle("kernel32"), "GetCurrentProcessorNumber" );
- if ( s_fnGetCurrentProcessorNumber == NULL )
+ if ( s_fnGetCurrentProcessorNumber == nullptr )
s_fnGetCurrentProcessorNumber = (fnGetCurrentProcessorNumber) GetProcAddress( GetModuleHandle("ntdll"), "NtGetCurrentProcessorNumber" );
}
namespace cds { namespace OS { namespace Win32 {
unsigned int topology::current_processor()
{
- if ( s_fnGetCurrentProcessorNumber != NULL )
+ if ( s_fnGetCurrentProcessorNumber != nullptr )
return s_fnGetCurrentProcessorNumber();
return 0;
}
case DLL_PROCESS_DETACH:
/*
#if _WIN32_WINNT < 0x0601
- if ( s_arrProcessorCellRelationship != NULL ) {
+ if ( s_arrProcessorCellRelationship != nullptr ) {
delete [] s_arrProcessorCellRelationship;
- s_arrProcessorCellRelationship = NULL;
+ s_arrProcessorCellRelationship = nullptr;
}
#endif
*/
/// Max array size of retired pointers
static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
- GarbageCollector * GarbageCollector::m_pHZPManager = NULL;
+ GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
{
m_pHZPManager->detachAllThread();
delete m_pHZPManager;
- m_pHZPManager = NULL;
+ m_pHZPManager = nullptr;
}
}
size_t nMaxRetiredPtrCount,
scan_type nScanType
)
- : m_pListHead(NULL)
+ : m_pListHead( nullptr )
,m_bStatEnabled( true )
,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
- hplist_node * pNext = NULL;
+ hplist_node * pNext = nullptr;
for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
assert( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId
|| hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId
void GarbageCollector::RetireHPRec( details::HPRec * pRec )
{
- assert( pRec != NULL );
+ assert( pRec != nullptr );
CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec );
pRec->clear();
void GarbageCollector::detachAllThread()
{
- hplist_node * pNext = NULL;
+ hplist_node * pNext = nullptr;
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = pNext ) {
pNext = hprec->m_pNextNode;
CDS_EXPORT_API DWORD cds::threading::wintls::Manager::Holder::m_key = TLS_OUT_OF_INDEXES;
__declspec( thread ) threading::msvc_internal::ThreadDataPlaceholder threading::msvc_internal::s_threadData;
- __declspec( thread ) threading::ThreadData * threading::msvc_internal::s_pThreadData = NULL;
+ __declspec(thread) threading::ThreadData * threading::msvc_internal::s_pThreadData = nullptr;
#else
pthread_key_t threading::pthread::Manager::Holder::m_key;
# if CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG
__thread threading::gcc_internal::ThreadDataPlaceholder CDS_DATA_ALIGNMENT(8) threading::gcc_internal::s_threadData;
- __thread threading::ThreadData * threading::gcc_internal::s_pThreadData = NULL;
+ __thread threading::ThreadData * threading::gcc_internal::s_pThreadData = nullptr;
# endif
#endif
#ifdef CDS_CXX11_THREAD_LOCAL_SUPPORT
thread_local threading::cxx11_internal::ThreadDataPlaceholder CDS_DATA_ALIGNMENT(8) threading::cxx11_internal::s_threadData;
- thread_local threading::ThreadData * threading::cxx11_internal::s_pThreadData = NULL;
+ thread_local threading::ThreadData * threading::cxx11_internal::s_pThreadData = nullptr;
#endif
namespace details {
};
}
- GarbageCollector * GarbageCollector::m_pManager = NULL;
+ GarbageCollector * GarbageCollector::m_pManager = nullptr;
void CDS_STDCALL GarbageCollector::Construct(
size_t nLiberateThreshold
{
if ( m_pManager ) {
delete m_pManager;
- m_pManager = NULL;
+ m_pManager = nullptr;
}
}
namespace cds { namespace OS { CDS_CXX11_INLINE_NAMESPACE namespace Hpux {
size_t topology::s_nProcMapSize = 0;
- topology::processor_map * topology::s_procMap = NULL;
+ topology::processor_map * topology::s_procMap = nullptr;
void topology::make_processor_map()
{
void topology::init()
{
- assert( s_procMap == NULL );
+ assert( s_procMap == nullptr );
make_processor_map();
}
void topology::fini()
{
- assert( s_procMap != NULL );
+ assert( s_procMap );
if ( s_procMap ) {
::free( s_procMap );
- s_procMap = NULL;
+ s_procMap = nullptr;
}
}
void topology::init()
{
size_t len = sizeof( s_nProcessorCount );
- if ( sysctlbyname("hw.logicalcpu", &s_nProcessorCount, &len, NULL, 0 ) != 0 )
+ if ( sysctlbyname("hw.logicalcpu", &s_nProcessorCount, &len, nullptr, 0 ) != 0 )
s_nProcessorCount = 1;
}
static TestCase * current_test()
{
- assert( m_pCurTestCase != NULL );
+ assert( m_pCurTestCase );
return m_pCurTestCase;
}
std::string TestCase::m_strTestDataDir(".");
Config TestCase::m_Cfg;
- TestCase * TestCase::m_pCurTestCase = NULL;
+ TestCase * TestCase::m_pCurTestCase = nullptr;
TestCase *TestCase::m_root = 0;
Reporter *TestCase::m_reporter = 0;
char buf[ 4096 ];
- TestCfg * pMap = NULL;
+ TestCfg * pMap = nullptr;
while ( !s.eof() ) {
s.getline( buf, sizeof(buf)/sizeof(buf[0]) );
char * pszStr = buf;
TestThread( TestThread& src )
: m_Pool( src.m_Pool )
- , m_pThread( NULL )
+ , m_pThread( nullptr )
, m_bTimeElapsed( false )
, m_nDuration( 0 )
, m_nThreadNo( 0 )
public:
TestThread( ThreadPool& pool )
: m_Pool( pool )
- , m_pThread( NULL )
+ , m_pThread( nullptr )
, m_nDuration( 0 )
, m_nThreadNo( 0 )
{}
public:
ThreadPool( TestCase& tc )
: m_Test( tc )
- , m_pBarrierStart( NULL )
- , m_pBarrierDone( NULL )
+ , m_pBarrierStart( nullptr )
+ , m_pBarrierDone( nullptr )
{}
~ThreadPool();
value_type * pv;
pv = q.pop_back();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 2 ));
pv = q.pop_back();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 1 ));
pv = q.pop_back();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 0 ));
pv = q.pop_back();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 0 ));
CPPUNIT_ASSERT( v1.nDisposeCount == 1 );
pv = q.pop_front();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 2 ));
pv = q.pop_front();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 1 ));
pv = q.pop_front();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 0 ));
pv = q.pop_front();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 0 ));
CPPUNIT_ASSERT( v1.nDisposeCount == 2 );
pv = q.pop_back();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 2 ));
pv = q.pop_back();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 1 ));
pv = q.pop_back();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 0 ));
pv = q.pop_back();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 0 ));
CPPUNIT_ASSERT( v1.nDisposeCount == 3 );
pv = q.pop_front();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 2 ));
pv = q.pop_front();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 1 ));
pv = q.pop_front();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 0 ));
pv = q.pop_front();
Deque::gc::scan();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_ASSERT( check_ic( q.size(), 0 ));
CPPUNIT_ASSERT( v1.nDisposeCount == 4 );
{
rcu_lock l;
pVal = m.get( nKey );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->first == nKey );
CPPUNIT_CHECK( pVal->second.m_val == nKey );
ep.release();
{
rcu_lock l;
- CPPUNIT_CHECK( m.get( nKey ) == NULL );
+ CPPUNIT_CHECK( m.get( nKey ) == nullptr );
CPPUNIT_CHECK( !m.extract( ep, nKey ));
CPPUNIT_CHECK( ep.empty() );
nKey = arr[i+1];
pVal = m.get_with( other_item(nKey), other_less() );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->first == nKey );
CPPUNIT_CHECK( pVal->second.m_val == nKey );
ep.release();
{
rcu_lock l;
- CPPUNIT_CHECK( m.get_with( other_item(nKey), other_less() ) == NULL );
+ CPPUNIT_CHECK( m.get_with( other_item(nKey), other_less() ) == nullptr );
CPPUNIT_CHECK( !m.extract_with( ep, other_item(nKey), other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
CPPUNIT_CHECK( check_size( m, 0 ));
{
rcu_lock l;
- CPPUNIT_CHECK( m.get( int(nLimit / 2) ) == NULL );
+ CPPUNIT_CHECK( m.get( int(nLimit / 2) ) == nullptr );
CPPUNIT_CHECK( !m.extract( ep, int(nLimit / 2) ));
CPPUNIT_CHECK( ep.empty() );
}
CPPUNIT_ASSERT( m.empty());
}
- CPPUNIT_MSG( PrintStat()(m, NULL) );
+ CPPUNIT_MSG( PrintStat()(m, nullptr) );
}
template <class Map, typename PrintStat >
CPPUNIT_ASSERT( m.insert( i, i * 2 ) != m.end() );
typename Map::value_type * pVal = m.get_min();
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->first == i );
CPPUNIT_CHECK( pVal->second.m_val == i * 2 );
}
m.clear();
CPPUNIT_ASSERT( m.empty() );
CPPUNIT_ASSERT( check_size( m, 0 ));
- CPPUNIT_CHECK( m.get_min() == NULL );
- CPPUNIT_CHECK( m.get_max() == NULL );
+ CPPUNIT_CHECK( m.get_min() == nullptr );
+ CPPUNIT_CHECK( m.get_max() == nullptr );
// iterator test
CPPUNIT_ASSERT( m.insert( i, i * 2 ) != m.end() );
typename Map::value_type * pVal = m.get_max();
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->first == i );
CPPUNIT_CHECK( pVal->second.m_val == i * 2 );
}
{
rcu_lock l;
value_type * pVal = m.get( nKey );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->first == nKey );
CPPUNIT_CHECK( pVal->second.m_val == nKey * 2 );
}
{
rcu_lock l;
- CPPUNIT_CHECK( m.get( nKey ) == NULL );
+ CPPUNIT_CHECK( m.get( nKey ) == nullptr );
}
CPPUNIT_CHECK( !m.extract(ep, nKey) );
}
{
rcu_lock l;
value_type * pVal = m.get_with( wrapped_item(nKey), wrapped_less() );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->first == nKey );
CPPUNIT_CHECK( pVal->second.m_val == nKey * 2 );
}
{
rcu_lock l;
- CPPUNIT_CHECK( m.get_with( wrapped_item(nKey), wrapped_less() ) == NULL );
+ CPPUNIT_CHECK( m.get_with( wrapped_item(nKey), wrapped_less() ) == nullptr );
}
CPPUNIT_CHECK( !m.extract_with(ep, wrapped_item(nKey), wrapped_less()) );
}
CPPUNIT_CHECK( !m.extract_max(ep) );
}
- CPPUNIT_MSG( PrintStat()(m, NULL) );
+ CPPUNIT_MSG( PrintStat()(m, nullptr) );
}
public:
size_t nCurIdx = 0;
while ( nTotalAllocated < s_nAllocPerPass ) {
size_t nSize = m_arrSize[nCurIdx] + 4;
- char * p = a.allocate( nSize, NULL );
- CPPUNIT_ASSERT( p != NULL );
+ char * p = a.allocate( nSize, nullptr );
+ CPPUNIT_ASSERT( p != nullptr );
memset( p, 0x96, nSize );
nTotalAllocated += nSize;
a.deallocate( p, 1 );
for ( size_t nPass = 0; nPass < s_nPassCount; ++nPass ) {
unsigned long long nTotalAllocated = 0;
- char * pHead = a.allocate( sizeof(void *), NULL );
- CPPUNIT_ASSERT( pHead != NULL );
+ char * pHead = a.allocate( sizeof(void *), nullptr );
+ CPPUNIT_ASSERT( pHead != nullptr );
char * pCur = pHead;
size_t nCurIdx = 0;
while ( nTotalAllocated < s_nAllocPerPass ) {
size_t nSize = m_arrSize[nCurIdx] + sizeof(void *);
- char * p = a.allocate( nSize, NULL );
- CPPUNIT_ASSERT( p != NULL );
+ char * p = a.allocate( nSize, nullptr );
+ CPPUNIT_ASSERT( p != nullptr );
memset( p, 0x96, nSize );
*((char **) pCur) = p;
pCur = p;
if ( ++nCurIdx > s_nArrSizeSize )
nCurIdx = 0;
}
- *((char **) pCur) = NULL;
+ *((char **) pCur) = nullptr;
pCur = pHead;
- while ( pCur != NULL ) {
+ while ( pCur != nullptr ) {
char * pNext = *((char **) pCur);
a.deallocate( pCur, 0 );
pCur = pNext;
{
rcu_lock lock;
value_type * pGet = l.get( a[i] );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->nKey == a[i] );
CPPUNIT_CHECK( pGet->nVal == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[i]) == NULL );
+ CPPUNIT_CHECK( l.get( a[i] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[i] ) );
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[0] ) == NULL );
+ CPPUNIT_CHECK( l.get( a[0] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[0] ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get_with( itm, other_less() );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->nKey == a[i] );
CPPUNIT_CHECK( pGet->nVal == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( itm, other_less()) == NULL );
+ CPPUNIT_CHECK( l.get_with( itm, other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, itm, other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( other_item(0), other_less() ) == NULL );
+ CPPUNIT_CHECK( l.get_with( other_item( 0 ), other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, other_item(0), other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
CPPUNIT_ASSERT( l.find( v1.key(), find_functor() ));
CPPUNIT_ASSERT( v1.s.nFindCall == 1 );
- CPPUNIT_ASSERT( l.find_with( v2.key(), less<value_type>() ) == NULL );
- CPPUNIT_ASSERT( l.find( v3.key() ) == NULL );
+ CPPUNIT_ASSERT( l.find_with( v2.key(), less<value_type>() ) == nullptr );
+ CPPUNIT_ASSERT( l.find( v3.key() ) == nullptr );
CPPUNIT_ASSERT( !l.empty() );
//CPPUNIT_ASSERT( !l.insert( v1 )) ; // assertion "is_empty" is raised
CPPUNIT_ASSERT( !l.find_with( v3.key(), less<value_type>() ));
CPPUNIT_ASSERT( !l.empty() );
- CPPUNIT_ASSERT( !l.insert( v1 )) ; // assertion "is_empty" is not raised since pNext is NULL
+ CPPUNIT_ASSERT( !l.insert( v1 )) ; // assertion "is_empty" is not raised since pNext is nullptr
{
value_type v( v1 );
{
rcu_lock lock;
value_type * pGet = l.get( a[i] );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->nKey == a[i] );
CPPUNIT_CHECK( pGet->nVal == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[i]) == NULL );
+ CPPUNIT_CHECK( l.get( a[i] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[i] ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[0] ) == NULL );
+ CPPUNIT_CHECK( l.get( a[0] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[0] ) );
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get_with( itm, other_less() );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->nKey == a[i] );
CPPUNIT_CHECK( pGet->nVal == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( itm, other_less()) == NULL );
+ CPPUNIT_CHECK( l.get_with( itm, other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, itm, other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( other_item(0), other_less() ) == NULL );
+ CPPUNIT_CHECK( l.get_with( other_item( 0 ), other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, other_item(0), other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
CPPUNIT_ASSERT( l.find( v1.key(), find_functor() ));
CPPUNIT_ASSERT( v1.s.nFindCall == 1 );
- CPPUNIT_ASSERT( l.find_with( v2.key(), less<value_type>() ) == NULL );
+ CPPUNIT_ASSERT( l.find_with( v2.key(), less<value_type>() ) == nullptr );
CPPUNIT_ASSERT( !l.find_with( v3.key(), less<value_type>(), find_functor() ));
CPPUNIT_ASSERT( !l.empty() );
- CPPUNIT_ASSERT( !l.insert( v1 )) ; // assertion "is_empty" is not raised since pNext is NULL
+ CPPUNIT_ASSERT( !l.insert( v1 )) ; // assertion "is_empty" is not raised since pNext is nullptr
{
value_type v( v1 );
{
rcu_lock lock;
value_type * pGet = l.get( a[i] );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->nKey == a[i] );
CPPUNIT_CHECK( pGet->nVal == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[i]) == NULL );
+ CPPUNIT_CHECK( l.get( a[i] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[i] ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[0] ) == NULL );
+ CPPUNIT_CHECK( l.get( a[0] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[0] ) );
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get_with( itm, other_less() );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->nKey == a[i] );
CPPUNIT_CHECK( pGet->nVal == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( itm, other_less()) == NULL );
+ CPPUNIT_CHECK( l.get_with( itm, other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, itm, other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( other_item(0), other_less() ) == NULL );
+ CPPUNIT_CHECK( l.get_with( other_item( 0 ), other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, other_item(0), other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get( a[i] );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->first == a[i] );
CPPUNIT_CHECK( pGet->second.m_val == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[i]) == NULL );
+ CPPUNIT_CHECK( l.get( a[i] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[i] ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[0] ) == NULL );
+ CPPUNIT_CHECK( l.get( a[0] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[0] ) );
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get_with( itm, other_less() );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->first == a[i] );
CPPUNIT_CHECK( pGet->second.m_val == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( itm, other_less()) == NULL );
+ CPPUNIT_CHECK( l.get_with( itm, other_less()) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, itm, other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( 3.14f, other_less() ) == NULL );
+ CPPUNIT_CHECK( l.get_with( 3.14f, other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, 3.14f, other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get( a[i] );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->nKey == a[i] );
CPPUNIT_CHECK( pGet->nVal == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[i]) == NULL );
+ CPPUNIT_CHECK( l.get( a[i] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[i] ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[0] ) == NULL );
+ CPPUNIT_CHECK( l.get( a[0] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[0] ) );
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get_with( itm, other_less() );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->nKey == a[i] );
CPPUNIT_CHECK( pGet->nVal == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( itm, other_less()) == NULL );
+ CPPUNIT_CHECK( l.get_with( itm, other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, itm, other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( other_item(0), other_less() ) == NULL );
+ CPPUNIT_CHECK( l.get_with( other_item( 0 ), other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, other_item(0), other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get( a[i] );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->first == a[i] );
CPPUNIT_CHECK( pGet->second.m_val == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[i]) == NULL );
+ CPPUNIT_CHECK( l.get( a[i] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[i] ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get( a[0] ) == NULL );
+ CPPUNIT_CHECK( l.get( a[0] ) == nullptr );
CPPUNIT_CHECK( !l.extract( ep, a[0] ) );
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
value_type * pGet = l.get_with( itm, other_less() );
- CPPUNIT_ASSERT( pGet != NULL );
+ CPPUNIT_ASSERT( pGet != nullptr );
CPPUNIT_CHECK( pGet->first == a[i] );
CPPUNIT_CHECK( pGet->second.m_val == a[i] * 2 );
ep.release();
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( itm, other_less()) == NULL );
+ CPPUNIT_CHECK( l.get_with( itm, other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, itm, other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
{
rcu_lock lock;
- CPPUNIT_CHECK( l.get_with( 3.14f, other_less() ) == NULL );
+ CPPUNIT_CHECK( l.get_with( 3.14f, other_less() ) == nullptr );
CPPUNIT_CHECK( !l.extract_with( ep, 3.14f, other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
// Pop test
key_type nPrev = c_nMinValue + key_type(pq.capacity()) - 1;
key_type * p = pq.pop();
- CPPUNIT_ASSERT( p != NULL );
+ CPPUNIT_ASSERT( p != nullptr );
CPPUNIT_CHECK_EX( *p == nPrev, "Expected=" << nPrev << ", current=" << *p );
CPPUNIT_ASSERT( pq.size() == pq.capacity() - 1 );
nSize = pq.size();
while ( pq.size() > 1 ) {
p = pq.pop();
- CPPUNIT_ASSERT( p != NULL );
+ CPPUNIT_ASSERT( p != nullptr );
CPPUNIT_CHECK_EX( *p == nPrev - 1, "Expected=" << nPrev - 1 << ", current=" << *p );
nPrev = *p;
--nSize;
CPPUNIT_ASSERT( pq.size() == 1 );
p = pq.pop();
- CPPUNIT_ASSERT( p != NULL );
+ CPPUNIT_ASSERT( p != nullptr );
CPPUNIT_CHECK_EX( *p == c_nMinValue, "Expected=" << c_nMinValue << ", current=" << *p );
CPPUNIT_ASSERT( !pq.full() );
value_type * pv;
pv = q.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
pv = q.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
pv = q.dequeue();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
pv = q.dequeue();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
CPPUNIT_CHECK( v2.nDisposeCount == 0 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
value_type * pv;
pv = q.pop();
Queue::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
pv = q.pop();
Queue::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 1 );
pv = q.pop();
Queue::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 1 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
pv = q.pop();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_CHECK( v1.nDisposeCount == 1 );
CPPUNIT_CHECK( v2.nDisposeCount == 1 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
value_type * pv;
pv = q.pop();
Queue::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
pv = q.pop();
Queue::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
pv = q.pop();
Queue::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
pv = q.pop();
Queue::gc::scan();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 4 );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 1 );
pv = q.pop();
Queue::gc::scan();
- CPPUNIT_CHECK( pv == NULL );
+ CPPUNIT_CHECK( pv == nullptr );
CPPUNIT_CHECK( v1.nDisposeCount == 1 );
CPPUNIT_CHECK( v2.nDisposeCount == 1 );
CPPUNIT_CHECK( v3.nDisposeCount == 1 );
value_type * pv;
pv = q.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
pv = q.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
pv = q.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( q.empty() );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
pv = q.pop();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_CHECK( v1.nDisposeCount == 0 );
CPPUNIT_CHECK( v2.nDisposeCount == 0 );
CPPUNIT_CHECK( v3.nDisposeCount == 0 );
else
pVal = q.dequeue();
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
int nSegment = int( nCount / q.quasi_factor() );
int nMin = nSegment * int(q.quasi_factor());
CPPUNIT_CHECK( misc::check_size( q, 0 ));
// pop from empty queue
- CPPUNIT_ASSERT( q.pop() == NULL );
+ CPPUNIT_ASSERT( q.pop() == nullptr );
CPPUNIT_CHECK( q.empty() );
CPPUNIT_CHECK( misc::check_size( q, 0 ));
CPPUNIT_ASSERT( s.unlink( e1 ) );
CPPUNIT_ASSERT( s.erase_with( k2, typename std::conditional<Set::c_isSorted, less2, equal_to2>::type() ) == &e2 );
- CPPUNIT_ASSERT( s.erase( e2 ) == NULL );
+ CPPUNIT_ASSERT( s.erase( e2 ) == nullptr );
CPPUNIT_ASSERT( e3.nEraseCount == 0 );
CPPUNIT_ASSERT( s.erase_with( k3, typename std::conditional<Set::c_isSorted, less2, equal_to2>::type(), erase_functor()) == &e3 );
CPPUNIT_ASSERT( e3.nEraseCount == 1 );
- CPPUNIT_ASSERT( s.erase( k3, erase_functor()) == NULL );
+ CPPUNIT_ASSERT( s.erase( k3, erase_functor() ) == nullptr );
CPPUNIT_ASSERT( e3.nEraseCount == 1 );
CPPUNIT_ASSERT( s.insert( e3 ) );
CPPUNIT_ASSERT( check_size( s, 0 ));
// insert/find test
- CPPUNIT_ASSERT( s.find( v1.key() ) == NULL );
+ CPPUNIT_ASSERT( s.find( v1.key() ) == nullptr );
CPPUNIT_ASSERT( s.insert( v1 ));
CPPUNIT_ASSERT( s.find_with( v1.key(), less<value_type>() ) == &v1 );
CPPUNIT_ASSERT( check_size( s, 1 ));
CPPUNIT_ASSERT( !s.empty() );
- CPPUNIT_ASSERT( s.find( v2.key() ) == NULL );
+ CPPUNIT_ASSERT( s.find( v2.key() ) == nullptr );
CPPUNIT_ASSERT( s.insert( v2 ));
CPPUNIT_ASSERT( v2.nFindCount == 0 );
CPPUNIT_ASSERT( s.find( key = v2.key(), find_functor() ));
{
find_functor ff;
- CPPUNIT_ASSERT( s.find( v3 ) == NULL );
+ CPPUNIT_ASSERT( s.find( v3 ) == nullptr );
CPPUNIT_ASSERT( s.insert( v3 ));
CPPUNIT_ASSERT( v3.nFindCount == 0 );
CPPUNIT_ASSERT( s.find_with( v3, less<value_type>(), cds::ref(ff) ));
{
rcu_lock l;
pVal = s.get( nKey );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == nKey );
CPPUNIT_CHECK( pVal->nVal == nKey * 2 );
ep.release();
{
rcu_lock l;
- CPPUNIT_CHECK( s.get( nKey ) == NULL );
+ CPPUNIT_CHECK( s.get( nKey ) == nullptr );
CPPUNIT_CHECK( !s.extract( ep, nKey ));
CPPUNIT_CHECK( ep.empty() );
nKey = arr[i+1];
pVal = s.get_with( nKey, less<value_type>() );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == nKey );
CPPUNIT_CHECK( pVal->nVal == nKey * 2 );
ep.release();
{
rcu_lock l;
- CPPUNIT_CHECK( s.get_with( nKey, less<value_type>() ) == NULL );
+ CPPUNIT_CHECK( s.get_with( nKey, less<value_type>() ) == nullptr );
CPPUNIT_CHECK( !s.extract_with( ep, nKey, less<value_type>() ));
CPPUNIT_CHECK( ep.empty() );
}
CPPUNIT_CHECK( check_size( s, 0 ));
{
rcu_lock l;
- CPPUNIT_CHECK( s.get( 100 ) == NULL );
+ CPPUNIT_CHECK( s.get( 100 ) == nullptr );
CPPUNIT_CHECK( !s.extract( ep, 100 ));
CPPUNIT_CHECK( ep.empty() );
}
Set::gc::force_dispose();
}
- CPPUNIT_MSG( PrintStat()( s, NULL ));
+ CPPUNIT_MSG( PrintStat()(s, nullptr) );
}
template <typename Set>
CPPUNIT_ASSERT( check_size( s, 0 ));
// insert/find test
- CPPUNIT_ASSERT( s.find( v1.key() ) == NULL );
+ CPPUNIT_ASSERT( s.find( v1.key() ) == nullptr );
CPPUNIT_ASSERT( s.insert( v1 ));
CPPUNIT_ASSERT( s.find( v1.key() ) == &v1 );
CPPUNIT_ASSERT( check_size( s, 1 ));
CPPUNIT_ASSERT( !s.empty() );
- CPPUNIT_ASSERT( s.find_with( v2.key(), less() ) == NULL );
+ CPPUNIT_ASSERT( s.find_with( v2.key(), less() ) == nullptr );
CPPUNIT_ASSERT( s.insert( v2 ));
CPPUNIT_ASSERT( v2.nFindCount == 0 );
CPPUNIT_ASSERT( s.find_with( key = v2.key(), less(), find_functor() ));
{
find_functor ff;
- CPPUNIT_ASSERT( s.find( v3 ) == NULL );
+ CPPUNIT_ASSERT( s.find( v3 ) == nullptr );
CPPUNIT_ASSERT( s.insert( v3 ));
CPPUNIT_ASSERT( v3.nFindCount == 0 );
CPPUNIT_ASSERT( s.find( v3, cds::ref(ff) ));
//CPPUNIT_MSG( PrintStat()(s, "Ensure test") );
// get_min test
- CPPUNIT_CHECK( s.get_min() == NULL );
- CPPUNIT_CHECK( s.get_max() == NULL );
+ CPPUNIT_CHECK( s.get_min() == nullptr );
+ CPPUNIT_CHECK( s.get_max() == nullptr );
{
value_type v[1000];
CPPUNIT_ASSERT( s.insert( v[i] ));
value_type * pVal = s.get_min();
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == i );
CPPUNIT_CHECK( pVal->nVal == i * 2 );
}
CPPUNIT_ASSERT( s.insert( v[i] ));
value_type * pVal = s.get_max();
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == i );
CPPUNIT_CHECK( pVal->nVal == i * 2 );
}
CPPUNIT_ASSERT( s.begin() == s.end() );
CPPUNIT_ASSERT( s.cbegin() == s.cend() );
- CPPUNIT_MSG( PrintStat()(s, NULL) );
+ CPPUNIT_MSG( PrintStat()(s, nullptr) );
}
public:
{
rcu_lock l;
pVal = s.get( i );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == i );
CPPUNIT_CHECK( pVal->nVal == i * 2 );
pVal->nVal *= 2;
{
rcu_lock l;
- CPPUNIT_CHECK( s.get( i ) == NULL );
+ CPPUNIT_CHECK( s.get( i ) == nullptr );
}
CPPUNIT_CHECK( !s.extract( ep, i ) );
CPPUNIT_ASSERT( ep.empty() );
{
rcu_lock l;
value_type * pVal = s.get_with( other_key(i), other_key_less<typename Set::value_type>() );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == i );
CPPUNIT_CHECK( pVal->nVal == i * 2 );
pVal->nVal *= 2;
{
rcu_lock l;
- CPPUNIT_CHECK( s.get_with( other_key(i), other_key_less<typename Set::value_type>() ) == NULL );
+ CPPUNIT_CHECK( s.get_with( other_key( i ), other_key_less<typename Set::value_type>() ) == nullptr );
}
CPPUNIT_CHECK( !s.extract_with( ep, other_key(i), other_key_less<typename Set::value_type>() ));
}
CPPUNIT_CHECK( !s.extract_max(ep) );
}
- CPPUNIT_MSG( PrintStat()(s, NULL) );
+ CPPUNIT_MSG( PrintStat()(s, nullptr) );
}
template <typename Set>
CPPUNIT_ASSERT( s.unlink( e1 ) );
CPPUNIT_ASSERT( s.erase_with( k2, less2() ) == &e2 );
- CPPUNIT_ASSERT( s.erase( e2 ) == NULL );
+ CPPUNIT_ASSERT( s.erase( e2 ) == nullptr );
CPPUNIT_ASSERT( e3.nEraseCount == 0 );
CPPUNIT_ASSERT( s.erase_with( k3, less2(), erase_functor()) == &e3 );
CPPUNIT_ASSERT( e3.nEraseCount == 1 );
- CPPUNIT_ASSERT( s.erase( k3, erase_functor()) == NULL );
+ CPPUNIT_ASSERT( s.erase( k3, erase_functor() ) == nullptr );
CPPUNIT_ASSERT( e3.nEraseCount == 1 );
CPPUNIT_ASSERT( s.insert( e3 ) );
{
rcu_lock l;
pVal = s.get( nKey );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == nKey );
CPPUNIT_CHECK( pVal->nVal == nKey );
ep.release();
{
rcu_lock l;
- CPPUNIT_CHECK( s.get( nKey ) == NULL );
+ CPPUNIT_CHECK( s.get( nKey ) == nullptr );
CPPUNIT_CHECK( !s.extract( ep, nKey ));
CPPUNIT_CHECK( ep.empty() );
nKey = arr[i+1];
pVal = s.get_with( other_item(nKey), other_less() );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == nKey );
CPPUNIT_CHECK( pVal->nVal == nKey );
ep.release();
{
rcu_lock l;
- CPPUNIT_CHECK( s.get_with( other_item(nKey), other_less() ) == NULL );
+ CPPUNIT_CHECK( s.get_with( other_item( nKey ), other_less() ) == nullptr );
CPPUNIT_CHECK( !s.extract_with( ep, other_item(nKey), other_less() ));
CPPUNIT_CHECK( ep.empty() );
}
CPPUNIT_CHECK( check_size( s, 0 ));
{
rcu_lock l;
- CPPUNIT_CHECK( s.get( int(nLimit / 2) ) == NULL );
+ CPPUNIT_CHECK( s.get( int( nLimit / 2 ) ) == nullptr );
CPPUNIT_CHECK( !s.extract( ep, int(nLimit / 2) ));
CPPUNIT_CHECK( ep.empty() );
}
CPPUNIT_ASSERT( gp.empty() );
}
- CPPUNIT_MSG( PrintStat()(s, NULL) );
+ CPPUNIT_MSG( PrintStat()(s, nullptr) );
}
template <class Set, typename PrintStat >
CPPUNIT_ASSERT( s.insert( std::make_pair( i, i * 2) ) != s.end() );
typename Set::value_type * pVal = s.get_min();
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == i );
CPPUNIT_CHECK( pVal->nVal == i * 2 );
}
CPPUNIT_ASSERT( s.empty() );
CPPUNIT_ASSERT( check_size( s, 0 ));
- CPPUNIT_CHECK( s.get_min() == NULL );
- CPPUNIT_CHECK( s.get_max() == NULL );
+ CPPUNIT_CHECK( s.get_min() == nullptr );
+ CPPUNIT_CHECK( s.get_max() == nullptr );
// iterator test
for ( int i = 0; i < 500; ++i ) {
CPPUNIT_ASSERT( s.insert( std::make_pair( i, i * 2) ) != s.end() );
typename Set::value_type * pVal = s.get_max();
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == i );
CPPUNIT_CHECK( pVal->nVal == i * 2 );
}
{
rcu_lock l;
pVal = s.get( nKey );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == nKey );
CPPUNIT_CHECK( pVal->nVal == nKey * 2 );
}
{
rcu_lock l;
- CPPUNIT_CHECK( s.get( nKey ) == NULL );
+ CPPUNIT_CHECK( s.get( nKey ) == nullptr );
}
CPPUNIT_CHECK( !s.extract( ep, nKey ));
}
{
rcu_lock l;
pVal = s.get_with( wrapped_item(nKey), wrapped_less() );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->nKey == nKey );
CPPUNIT_CHECK( pVal->nVal == nKey );
}
{
rcu_lock l;
- CPPUNIT_CHECK( s.get_with( wrapped_item(nKey), wrapped_less() ) == NULL );
+ CPPUNIT_CHECK( s.get_with( wrapped_item( nKey ), wrapped_less() ) == nullptr );
}
CPPUNIT_CHECK( !s.extract_with( ep, wrapped_item(nKey), wrapped_less() ));
}
CPPUNIT_CHECK( !s.extract_max(ep) );
}
- CPPUNIT_MSG( PrintStat()(s, NULL) );
+ CPPUNIT_MSG( PrintStat()(s, nullptr) );
}
public:
value_type * pv;
pv = stack.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( !stack.empty() );
pv = stack.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !stack.empty() );
pv = stack.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( stack.empty() );
pv = stack.pop();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_ASSERT( v1.nDisposeCount == 0 );
CPPUNIT_ASSERT( v2.nDisposeCount == 0 );
value_type * pv;
pv = stack.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 3 );
CPPUNIT_ASSERT( !stack.empty() );
pv = stack.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 2 );
CPPUNIT_ASSERT( !stack.empty() );
pv = stack.pop();
- CPPUNIT_ASSERT( pv != NULL );
+ CPPUNIT_ASSERT( pv != nullptr );
CPPUNIT_ASSERT( pv->nVal == 1 );
CPPUNIT_ASSERT( stack.empty() );
pv = stack.pop();
- CPPUNIT_ASSERT( pv == NULL );
+ CPPUNIT_ASSERT( pv == nullptr );
CPPUNIT_ASSERT( v1.nDisposeCount == 0 );
CPPUNIT_ASSERT( v2.nDisposeCount == 0 );
{
typename map_type::rcu_lock l;
typename map_type::value_type * pVal = m.get(nKey);
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->first == nKey);
}
CPPUNIT_ASSERT( m.extract( ep, nKey ));
{
typename map_type::rcu_lock l;
typename map_type::value_type * pVal = m.get_with(wrapped_int(nKey), wrapped_less());
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal->first == nKey);
}
CPPUNIT_ASSERT( m.extract_with( ep, wrapped_int(nKey), wrapped_less() ));
{
typename set_type::rcu_lock l;
value_type * p = s.get( nKey );
- CPPUNIT_ASSERT( p != NULL );
+ CPPUNIT_ASSERT( p != nullptr );
CPPUNIT_CHECK( p->nKey == nKey );
}
CPPUNIT_ASSERT( s.extract( ep, nKey ));
{
typename set_type::rcu_lock l;
- CPPUNIT_CHECK( s.get( nKey ) == NULL );
+ CPPUNIT_CHECK( s.get( nKey ) == nullptr );
}
CPPUNIT_CHECK( !s.extract( ep, nKey ));
}
{
typename set_type::rcu_lock l;
value_type * p = s.get_with( wrapped_int(nKey), wrapped_less() );
- CPPUNIT_ASSERT( p != NULL );
+ CPPUNIT_ASSERT( p != nullptr );
CPPUNIT_CHECK( p->nKey == nKey );
}
CPPUNIT_ASSERT( s.extract_with( ep, wrapped_int(nKey), wrapped_less() ));
{
typename set_type::rcu_lock l;
- CPPUNIT_CHECK( s.get_with( wrapped_int(nKey), wrapped_less() ) == NULL );
+ CPPUNIT_CHECK( s.get_with( wrapped_int( nKey ), wrapped_less() ) == nullptr );
}
CPPUNIT_CHECK( !s.extract_with( ep, wrapped_int(nKey), wrapped_less() ));
}
{
typename tree_type::rcu_lock l;
pVal = t.get( v1.nKey );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal == &v1 );
}
CPPUNIT_ASSERT( t.extract( ep, v1.nKey ));
CPPUNIT_CHECK( ep->nKey == v1.nKey );
{
typename tree_type::rcu_lock l;
- CPPUNIT_CHECK( t.get( v1.nKey ) == NULL );
+ CPPUNIT_CHECK( t.get( v1.nKey ) == nullptr );
}
ep.release();
CPPUNIT_ASSERT( !t.extract( ep, v1.nKey ));
CPPUNIT_CHECK( ep->nKey == v5.nKey );
{
typename tree_type::rcu_lock l;
- CPPUNIT_CHECK( t.get( v5.nKey ) == NULL );
+ CPPUNIT_CHECK( t.get( v5.nKey ) == nullptr );
}
ep.release();
CPPUNIT_ASSERT( !t.extract( ep, v5.nKey ));
CPPUNIT_CHECK( ep->nKey == v3.nKey );
{
typename tree_type::rcu_lock l;
- CPPUNIT_CHECK( t.get( v3.nKey ) == NULL );
+ CPPUNIT_CHECK( t.get( v3.nKey ) == nullptr );
}
ep.release();
{
typename tree_type::rcu_lock l;
pVal = t.get_with( wrapped_int(v2.nKey), wrapped_less<value_type>() );
- CPPUNIT_ASSERT( pVal != NULL );
+ CPPUNIT_ASSERT( pVal != nullptr );
CPPUNIT_CHECK( pVal == &v2 );
}
CPPUNIT_ASSERT( t.extract_with( ep, wrapped_int(v2.nKey), wrapped_less<value_type>() ));
CPPUNIT_CHECK( ep->nKey == v2.nKey );
{
typename tree_type::rcu_lock l;
- CPPUNIT_CHECK( t.get_with( wrapped_int(v2.nKey), wrapped_less<value_type>() ) == NULL );
+ CPPUNIT_CHECK( t.get_with( wrapped_int( v2.nKey ), wrapped_less<value_type>() ) == nullptr );
}
ep.release();
CPPUNIT_CHECK( !t.extract_with( ep, wrapped_int(v2.nKey), wrapped_less<value_type>() ));
{
typename tree_type::rcu_lock l;
- CPPUNIT_CHECK( t.get( v1.nKey ) == NULL );
- CPPUNIT_CHECK( t.get( v2.nKey ) == NULL );
- CPPUNIT_CHECK( t.get( v3.nKey ) == NULL );
- CPPUNIT_CHECK( t.get( v4.nKey ) == NULL );
- CPPUNIT_CHECK( t.get( v5.nKey ) == NULL );
+ CPPUNIT_CHECK( t.get( v1.nKey ) == nullptr );
+ CPPUNIT_CHECK( t.get( v2.nKey ) == nullptr );
+ CPPUNIT_CHECK( t.get( v3.nKey ) == nullptr );
+ CPPUNIT_CHECK( t.get( v4.nKey ) == nullptr );
+ CPPUNIT_CHECK( t.get( v5.nKey ) == nullptr );
}
CPPUNIT_CHECK( !t.extract(ep, v1.nKey));
for ( size_t nPass = 0; nPass < s_nPassPerThread; ++nPass ) {
value_type ** pCell = m_arr;
for ( size_t i = 0; i < s_nBlockCount; ++i, ++pCell ) {
- *pCell = m_Alloc.allocate( nSize, NULL );
- CPPUNIT_ASSERT( *pCell != NULL );
+ *pCell = m_Alloc.allocate( nSize, nullptr );
+ CPPUNIT_ASSERT( *pCell != nullptr );
if ( nSize < 32 )
memset( *pCell, 0, nSize );
for ( size_t nPass = 0; nPass < s_nPassPerThread; ++nPass ) {
size_t nItem = m_rndGen( size_t(1), s_nBlocksPerThread ) - 1;
m_Alloc.deallocate( reinterpret_cast<value_type *>(m_arr[nItem]), 1 );
- m_arr[nItem] = reinterpret_cast<char *>( m_Alloc.allocate( m_rndGen(s_nMinBlockSize, s_nMaxBlockSize), NULL ));
+ m_arr[nItem] = reinterpret_cast<char *>(m_Alloc.allocate( m_rndGen( s_nMinBlockSize, s_nMaxBlockSize ), nullptr ));
CPPUNIT_ASSERT( (reinterpret_cast<cds::uptr_atomic_t>(m_arr[nItem]) & (ALLOC::alignment - 1)) == 0 );
}
}
= m_aThreadData[nThread]
= new char *[ s_nBlocksPerThread ];
for ( size_t i = 0; i < s_nBlocksPerThread; ++i ) {
- thData[i] = reinterpret_cast<char *>( alloc.allocate( rndGen(s_nMinBlockSize, s_nMaxBlockSize), NULL ));
+ thData[i] = reinterpret_cast<char *>(alloc.allocate( rndGen( s_nMinBlockSize, s_nMaxBlockSize ), nullptr ));
CPPUNIT_ASSERT( (reinterpret_cast<cds::uptr_atomic_t>(thData[i]) & (ALLOC::alignment - 1)) == 0 );
}
}
virtual void test()
{
for ( size_t i = 0; i < s_nPassPerThread; ++i ) {
- typename ALLOC::value_type * p = m_Alloc.allocate( m_nSize / sizeof(typename ALLOC::value_type), NULL );
- CPPUNIT_ASSERT( p != NULL );
+ typename ALLOC::value_type * p = m_Alloc.allocate( m_nSize / sizeof( typename ALLOC::value_type ), nullptr );
+ CPPUNIT_ASSERT( p != nullptr );
if ( m_nSize < 32 )
memset( p, 0, m_nSize );
else {
Item()
: m_access( false )
- , m_pszBlock( NULL )
+ , m_pszBlock( nullptr )
{}
Item& operator =(Item const& i)
if ( item.m_access.tryLock() ) {
if ( item.m_pszBlock ) {
m_Alloc.deallocate( item.m_pszBlock, 1 );
- item.m_pszBlock = NULL;
+ item.m_pszBlock = nullptr;
}
else {
size_t nSize;
- item.m_pszBlock = m_Alloc.allocate( nSize = m_rndGen(s_nMinBlockSize, s_nMaxBlockSize ), NULL );
+ item.m_pszBlock = m_Alloc.allocate( nSize = m_rndGen( s_nMinBlockSize, s_nMaxBlockSize ), nullptr );
if ( nSize < 32 )
memset( item.m_pszBlock, 0, nSize );
for ( size_t i = 0; i < m_Data.size(); ++i ) {
if ( m_Data[i].m_pszBlock ) {
alloc.deallocate( m_Data[i].m_pszBlock, 1 );
- m_Data[i].m_pszBlock = NULL;
+ m_Data[i].m_pszBlock = nullptr;
}
}
}
typedef internal_node_allocator< Other, Alloc > other;
};
- T * allocate( size_t n, void const * pHint = NULL )
+ T * allocate( size_t n, void const * pHint = nullptr )
{
internal_node_counter::onAlloc();
return base_class::allocate( n, pHint );
HANDLE m_hMutex;
public:
- Mutex() { m_hMutex = ::CreateMutex( NULL, false, NULL ) ; }
+ Mutex() { m_hMutex = ::CreateMutex( nullptr, false, nullptr ); }
~Mutex() { ::CloseHandle( m_hMutex ) ; }
void lock() { ::WaitForSingleObject( m_hMutex, INFINITE ); }
typedef typename std_allocator::value_type value_type;
// Allocation function
- pointer allocate( size_type _Count, const void* _Hint = NULL )
+ pointer allocate( size_type _Count, const void* _Hint = nullptr )
{
return reinterpret_cast<pointer>( s_MichaelHeap.alloc( sizeof(T) * _Count ));
}
}
else {
Writer * pWriter = dynamic_cast<Writer *>( *it );
- CPPUNIT_ASSERT( pWriter != NULL );
+ CPPUNIT_ASSERT( pWriter != nullptr );
fTimeWriter += pWriter->m_fTime;
nPushFailed += pWriter->m_nPushFailed;
if ( !boost::is_base_of<cds::bounded_container, Queue>::value ) {
{
lock_guard l( m_Lock );
if ( m_List.empty() )
- return cds::nullptr;
+ return nullptr;
value_type& v = m_List.front();
m_List.pop_front();
return &v;
}
else {
Writer * pWriter = dynamic_cast<Writer *>( *it );
- CPPUNIT_ASSERT( pWriter != NULL );
+ CPPUNIT_ASSERT( pWriter != nullptr );
fTimeWriter += pWriter->m_fTime;
nPushFailed += pWriter->m_nPushFailed;
if ( !boost::is_base_of<cds::bounded_container, Queue>::value ) {
size_t nFindFailed = 0;
for ( CppUnitMini::ThreadPool::iterator it = pool.begin(); it != pool.end(); ++it ) {
work_thread * pThread = static_cast<work_thread *>( *it );
- assert( pThread != NULL );
+ assert( pThread != nullptr );
nInsertSuccess += pThread->m_nInsertSuccess;
nInsertFailed += pThread->m_nInsertFailed;
nDeleteSuccess += pThread->m_nDeleteSuccess;
m_Impl.pop();
return v;
}
- return NULL;
+ return nullptr;
}
bool empty() const