/// Releases elimination record for the current thread
static inline void clear_record()
{
- cds::threading::elimination_record().pOp = null_ptr<operation_desc*>();
+ cds::threading::elimination_record().pOp = nullptr;
}
} // namespace elimination
}} // namespace cds::algo
/// Initialization
record()
- : pOp( null_ptr<operation_desc *>() )
+ : pOp( nullptr )
{}
/// Checks if the record is free
bool is_free() const
{
- return pOp == null_ptr<operation_desc *>();
+ return pOp == nullptr;
}
};
: nRequest( req_EmptyRecord )
, nState( inactive )
, nAge(0)
- , pNext( null_ptr<publication_record *>() )
- , pOwner( null_ptr<void *>() )
+ , pNext( nullptr )
+ , pOwner( nullptr )
{}
/// Returns the value of \p nRequest field
*/
kernel()
: m_nCount(0)
- , m_pHead( null_ptr< publication_record_type *>())
+ , m_pHead( nullptr )
, m_pThreadRec( tls_cleanup )
, m_nCompactFactor( 64 - 1 ) // binary mask
, m_nCombinePassCount( 8 )
,unsigned int nCombinePassCount ///< Number of combining passes for combiner thread
)
: m_nCount(0)
- , m_pHead( null_ptr< publication_record_type *>())
+ , m_pHead( nullptr )
, m_pThreadRec( tls_cleanup )
, m_nCompactFactor( (unsigned int)( cds::beans::ceil2( nCompactFactor ) - 1 )) // binary mask
, m_nCombinePassCount( nCombinePassCount )
{
// mark all publication record as detached
for ( publication_record * p = m_pHead; p; p = p->pNext.load( memory_model::memory_order_relaxed ))
- p->pOwner = null_ptr<void *>();
+ p->pOwner = nullptr;
}
/// Gets publication list record for the current thread
public:
/// Initializes an empty iterator object
iterator()
- : m_pRec( null_ptr<publication_record_type *>())
+ : m_pRec( nullptr )
{}
/// Copy ctor
void init()
{
- assert( m_pThreadRec.get() == null_ptr<publication_record_type *>() );
+ assert( m_pThreadRec.get() == nullptr );
publication_record_type * pRec = cxx11_allocator().New();
m_pHead = pRec;
pRec->pOwner = this;
template <class Container>
bool combining_pass( Container& owner, unsigned int nCurAge )
{
- publication_record * pPrev = null_ptr<publication_record *>();
+ publication_record * pPrev = nullptr;
publication_record * p = m_pHead;
bool bOpDone = false;
while ( p ) {
void compact_list( unsigned int const nCurAge )
{
// Thinning publication list
- publication_record * pPrev = null_ptr<publication_record *>();
+ publication_record * pPrev = nullptr;
for ( publication_record * p = m_pHead; p; ) {
if ( p->nState.load( memory_model::memory_order_acquire ) == active && p->nAge + m_nCompactFactor < nCurAge ) {
if ( pPrev ) {
unsigned char * pMem = base_class::alloc_space( nHeight );
return new( pMem )
node_type( nHeight,
- nHeight > 1 ? reinterpret_cast<typename base_class::node_tower_item *>( pMem + base_class::c_nNodeSize )
- : null_ptr<typename base_class::node_tower_item *>(),
- key, val );
+ nHeight > 1 ? reinterpret_cast<typename base_class::node_tower_item *>( pMem + base_class::c_nNodeSize ) : nullptr,
+ key, val
+ );
}
# ifdef CDS_EMPLACE_SUPPORT
template <typename... Args>
{
unsigned char * pMem = base_class::alloc_space( nHeight );
return new( pMem )
- node_type( nHeight, nHeight > 1 ? reinterpret_cast<typename base_class::node_tower_item *>( pMem + base_class::c_nNodeSize )
- : null_ptr<typename base_class::node_tower_item *>(),
- std::forward<Args>(args)... );
+ node_type( nHeight,
+ nHeight > 1 ? reinterpret_cast<typename base_class::node_tower_item *>( pMem + base_class::c_nNodeSize ) : nullptr,
+ std::forward<Args>(args)...
+ );
}
# endif
};
value_type * get( Q const& key ) const
{
leaf_node * pNode = base_class::get( key );
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
/// Finds \p key with \p pred predicate and return the item found
{
leaf_node * pNode = base_class::get_with( key,
cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >());
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
/// Clears the map
value_type * get( Q const& key ) const
{
leaf_node * pNode = base_class::get( key );
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
/// Finds \p key with \p pred predicate and return the item found
{
leaf_node * pNode = base_class::get_with( key,
cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >());
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
/// Clears the set (non-atomic)
key_type const& key() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.first;
}
value_ref val() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.second;
}
pair_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Data) : null_ptr<pair_ptr>();
+ return p ? &(p->m_Data) : nullptr;
}
pair_ref operator *() const
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( &refHead, *p )) {
node_type * m_pItemFound;
ensure_functor()
- : m_pItemFound( null_ptr<node_type *>() )
+ : m_pItemFound( nullptr )
{}
void operator ()(bool, node_type& item, node_type& )
key_type const& key() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.first;
}
value_ref val() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.second;
}
pair_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Data) : null_ptr<pair_ptr>();
+ return p ? &(p->m_Data) : nullptr;
}
pair_ref operator *() const
//@cond
node_type * insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( &refHead, *p ))
return p.release();
- return null_ptr<node_type *>();
+ return nullptr;
}
template <typename K>
return pNode.release();
}
- return null_ptr<node_type *>();
+ return nullptr;
}
std::pair< node_type *, bool > ensure_at( head_type& refHead, const K& key )
{
scoped_node_ptr pNode( alloc_node( key ));
- node_type * pItemFound = null_ptr<node_type *>();
+ node_type * pItemFound = nullptr;
# ifdef CDS_CXX11_LAMBDA_SUPPORT
std::pair<bool, bool> ret = base_class::ensure_at( &refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; } );
if ( ret.first && ret.second )
pNode.release();
- assert( pItemFound != null_ptr<node_type *>() );
+ assert( pItemFound != nullptr );
return std::make_pair( pItemFound, ret.second );
}
key_type const& key() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.first;
}
value_ref val() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.second;
}
pair_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Data) : null_ptr<pair_ptr>();
+ return p ? &(p->m_Data) : nullptr;
}
pair_ref operator *() const
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( &refHead, *p )) {
value_type * get_at( head_type& refHead, K const& val, Compare cmp ) const
{
node_type * pNode = base_class::get_at( &refHead, val, cmp );
- return pNode ? &pNode->m_Data : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Data : nullptr;
}
//@endcond
value_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Value) : null_ptr<value_ptr>();
+ return p ? &(p->m_Value) : nullptr;
}
value_ref operator *() const
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( &refHead, *pNode )) {
node_type * m_pItemFound;
ensure_functor()
- : m_pItemFound( null_ptr<node_type *>() )
+ : m_pItemFound( nullptr )
{}
void operator ()(bool, node_type& item, node_type& )
value_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Value) : null_ptr<value_ptr>();
+ return p ? &(p->m_Value) : nullptr;
}
value_ref operator *() const
//@cond
node_type * insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( &refHead, *p ))
return p.release();
- return null_ptr<node_type *>();
+ return nullptr;
}
template <typename Q>
std::pair< node_type *, bool > ensure_at( head_type& refHead, Q const& val )
{
scoped_node_ptr pNode( alloc_node( val ));
- node_type * pItemFound = null_ptr<node_type *>();
+ node_type * pItemFound = nullptr;
# ifdef CDS_CXX11_LAMBDA_SUPPORT
std::pair<bool, bool> ret = base_class::ensure_at( &refHead, *pNode,
std::pair<bool, bool> ret = base_class::ensure_at( &refHead, *pNode, boost::ref(func) );
pItemFound = func.m_pItemFound;
# endif
- assert( pItemFound != null_ptr<node_type *>() );
+ assert( pItemFound != nullptr );
if ( ret.first && ret.second )
pNode.release();
value_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Value) : null_ptr<value_ptr>();
+ return p ? &(p->m_Value) : nullptr;
}
value_ref operator *() const
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( &refHead, *pNode )) {
value_type * get_at( head_type& refHead, Q const& val, Compare cmp ) const
{
node_type * pNode = base_class::get_at( &refHead, val, cmp );
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
//@endcond
bool push_node_back( node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>());
+ assert( pNode != nullptr );
scoped_node_ptr p(pNode);
if ( base_class::push_back( *pNode ) ) {
bool push_node_front( node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>());
+ assert( pNode != nullptr );
scoped_node_ptr p(pNode);
if ( base_class::push_front( *pNode ) ) {
*/
bool pop_back()
{
- return base_class::pop_back() != null_ptr<node_type *>();
+ return base_class::pop_back() != nullptr;
}
/// Pops back side a value using copy functor
*/
bool pop_front()
{
- return base_class::pop_front() != null_ptr<node_type *>();
+ return base_class::pop_front() != nullptr;
}
/// Pops front side a value using copy functor
key_type const& key() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.first;
}
pair_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Data) : null_ptr<pair_ptr>();
+ return p ? &(p->m_Data) : nullptr;
}
pair_ref operator *() const
value_ref val() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.second;
}
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( refHead, *pNode )) {
p.release();
node_type * m_pItemFound;
ensure_functor()
- : m_pItemFound( null_ptr<node_type *>() )
+ : m_pItemFound( nullptr )
{}
void operator ()(bool, node_type& item, node_type& )
key_type const& key() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.first;
}
value_ref val() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.second;
}
pair_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Data) : null_ptr<pair_ptr>();
+ return p ? &(p->m_Data) : nullptr;
}
pair_ref operator *() const
//@cond
node_type * insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( refHead, *pNode ))
return p.release();
- return null_ptr<node_type *>();
+ return nullptr;
}
template <typename K>
cds::unref(f)( pNode->m_Data );
return pNode.release();
}
- return null_ptr<node_type *>();
+ return nullptr;
}
template <typename K>
std::pair< node_type *, bool > ensure_at( head_type& refHead, const K& key )
{
scoped_node_ptr pNode( alloc_node( key ));
- node_type * pItemFound = null_ptr<node_type *>();
+ node_type * pItemFound = nullptr;
# ifdef CDS_CXX11_LAMBDA_SUPPORT
std::pair<bool, bool> ret = base_class::ensure_at( refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; });
std::pair<bool, bool> ret = base_class::ensure_at( refHead, *pNode, boost::ref(func) );
pItemFound = func.m_pItemFound;
# endif
- assert( pItemFound != null_ptr<node_type *>() );
+ assert( pItemFound != nullptr );
if ( ret.first && ret.second )
pNode.release();
key_type const& key() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.first;
}
pair_ptr operator ->() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- return p ? &(p->m_Data) : null_ptr<pair_ptr>();
+ return p ? &(p->m_Data) : nullptr;
}
pair_ref operator *() const
value_ref val() const
{
typename iterator_base::value_ptr p = iterator_base::operator ->();
- assert( p != null_ptr<typename iterator_base::value_ptr>() );
+ assert( p != nullptr );
return p->m_Data.second;
}
//@cond
bool insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p( pNode );
if ( base_class::insert_at( refHead, *pNode )) {
p.release();
value_type * get_at( head_type& refHead, K const& val, Compare cmp ) const
{
node_type * pNode = base_class::get_at( refHead, val, cmp );
- return pNode ? &pNode->m_Data : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Data : nullptr;
}
//@endcond
//@cond
node_type * insert_node_at( head_type& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p(pNode);
if ( base_class::insert_at( refHead, *pNode ))
return p.release();
- return null_ptr<node_type *>();
+ return nullptr;
}
template <typename Q>
std::pair< node_type *, bool > ensure_at( head_type& refHead, const Q& val )
{
scoped_node_ptr pNode( alloc_node( val ));
- node_type * pItemFound = null_ptr<node_type *>();
+ node_type * pItemFound = nullptr;
# ifdef CDS_CXX11_LAMBDA_SUPPORT
std::pair<bool, bool> ret = base_class::ensure_at( refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&) { pItemFound = &item; });
std::pair<bool, bool> ret = base_class::ensure_at( refHead, *pNode, boost::ref(func) );
pItemFound = func.m_pItemFound;
# endif
- assert( pItemFound != null_ptr<node_type *>() );
+ assert( pItemFound != nullptr );
if ( ret.first && ret.second )
pNode.release();
value_type * get_at( head_type& refHead, Q const& val, Compare cmp ) const
{
node_type * pNode = base_class::get_at( refHead, val, cmp );
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
//@endcond
/// Dereference operator
pair_ptr operator ->() const
{
- assert( base_class::m_pCurBucket != null_ptr<bucket_ptr>() );
+ assert( base_class::m_pCurBucket != nullptr );
return base_class::m_itList.operator ->();
}
/// Dereference operator
pair_ref operator *() const
{
- assert( base_class::m_pCurBucket != null_ptr<bucket_ptr>() );
+ assert( base_class::m_pCurBucket != nullptr );
return base_class::m_itList.operator *();
}
/// Dereference operator
pair_ptr operator ->() const
{
- assert( base_class::m_pCurBucket != null_ptr<bucket_ptr>() );
+ assert( base_class::m_pCurBucket != nullptr );
return base_class::m_itList.operator ->();
}
/// Dereference operator
pair_ref operator *() const
{
- assert( base_class::m_pCurBucket != null_ptr<bucket_ptr>() );
+ assert( base_class::m_pCurBucket != nullptr );
return base_class::m_itList.operator *();
}
/// Dereference operator
pair_ptr operator ->() const
{
- assert( base_class::m_pCurBucket != null_ptr<bucket_ptr>() );
+ assert( base_class::m_pCurBucket != nullptr );
return base_class::m_itList.operator ->();
}
/// Dereference operator
pair_ref operator *() const
{
- assert( base_class::m_pCurBucket != null_ptr<bucket_ptr>() );
+ assert( base_class::m_pCurBucket != nullptr );
return base_class::m_itList.operator *();
}
value_type m_value ; ///< Value stored in the node
node_type( value_type const& v )
- : m_pNext(null_ptr<node_type *>())
+ : m_pNext( nullptr )
, m_value(v)
{}
node_type()
- : m_pNext( null_ptr<node_type *>() )
+ : m_pNext( nullptr )
{}
# ifdef CDS_EMPLACE_SUPPORT
template <typename... Args>
node_type( Args&&... args )
- : m_pNext(null_ptr<node_type *>())
+ : m_pNext( nullptr )
, m_value( std::forward<Args>(args)...)
{}
# endif
bool enqueue_node( node_type * p )
{
- assert( p != null_ptr<node_type *>());
+ assert( p != nullptr );
{
auto_lock lock( m_TailLock );
m_pTail =
auto_lock lock( m_HeadLock );
pNode = m_pHead;
node_type * pNewHead = pNode->m_pNext;
- if ( pNewHead == null_ptr<node_type *>() )
+ if ( pNewHead == nullptr )
return false;
unref(f)( dest, pNewHead->m_value );
m_pHead = pNewHead;
bool empty() const
{
auto_lock lock( m_HeadLock );
- return m_pHead->m_pNext == null_ptr<node_type *>();
+ return m_pHead->m_pNext == nullptr;
}
/// Clears queue
{
auto_lock lockR( m_HeadLock );
auto_lock lockW( m_TailLock );
- while ( m_pHead->m_pNext != null_ptr<node_type *>() ) {
+ while ( m_pHead->m_pNext != nullptr ) {
node_type * pHead = m_pHead;
m_pHead = m_pHead->m_pNext;
free_node( pHead );
static void free_space( unsigned char * p, unsigned int nHeight )
{
- assert( p != null_ptr<unsigned char *>() );
+ assert( p != nullptr );
if ( nHeight == 1 )
node_allocator_type().deallocate( reinterpret_cast<node_type *>(p), 1 );
else
{
unsigned char * pMem = alloc_space( nHeight );
return new( pMem )
- node_type( nHeight, nHeight > 1 ? reinterpret_cast<node_tower_item *>( pMem + c_nNodeSize ) : null_ptr<node_tower_item *>(), v );
+ node_type( nHeight, nHeight > 1 ? reinterpret_cast<node_tower_item *>(pMem + c_nNodeSize) : nullptr, v );
}
# ifdef CDS_EMPLACE_SUPPORT
{
unsigned char * pMem = alloc_space( nHeight );
return new( pMem )
- node_type( nHeight, nHeight > 1 ? reinterpret_cast<node_tower_item *>( pMem + c_nNodeSize ): null_ptr<node_tower_item *>(),
+ node_type( nHeight, nHeight > 1 ? reinterpret_cast<node_tower_item *>(pMem + c_nNodeSize) : nullptr,
std::forward<Args>(args)... );
}
# endif
void Delete( node_type * p )
{
- assert( p != null_ptr<node_type *>() );
+ assert( p != nullptr );
unsigned int nHeight = p->height();
node_allocator_type().destroy( p );
value_type * to_value_ptr( node_type * pNode ) const CDS_NOEXCEPT
{
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
//@endcond
value_type * get_min() const
{
node_type * pNode = base_class::get_min();
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
/// Gets maximum key from the set
value_type * get_max() const
{
node_type * pNode = base_class::get_max();
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
/// Clears the set (non-atomic)
value_type * to_value_ptr( node_type * pNode ) const CDS_NOEXCEPT
{
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
//@endcond
bool insert_node( node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p(pNode);
if ( base_class::insert( *pNode ) ) {
//@cond
iterator insert_node( node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p(pNode);
iterator it( base_class::insert_( *pNode ));
bool insert_node( node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
scoped_node_ptr p(pNode);
if ( base_class::insert( *pNode ) ) {
value_type * get( Q const& val )
{
node_type * pNode = base_class::get( val );
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
/// Finds the key \p val and return the item found
value_type * get_with( Q const& val, Less pred )
{
node_type * pNode = base_class::get_with( val, typename maker::template predicate_wrapper<Less>::type());
- return pNode ? &pNode->m_Value : null_ptr<value_type *>();
+ return pNode ? &pNode->m_Value : nullptr;
}
/// Clears the set (non-atomic)
virtual const char * what( ) const throw() { return _msg; } \
}
-
- //@cond
- // This template function should be replaced with nullptr keyword when all compilers will support it
- template <typename T>
- static inline CDS_CONSTEXPR T null_ptr() CDS_NOEXCEPT
- {
- return reinterpret_cast<T>( NULL );
- }
- //@endcond
-
} // namespace cds
public:
/// Constructs null marked pointer. The flag is cleared.
CDS_CONSTEXPR marked_ptr() CDS_NOEXCEPT
- : m_ptr( null_ptr<pointer_type>() )
+ : m_ptr( nullptr )
{}
/// Constructs marked pointer with \p ptr value. The least bit(s) of \p ptr is the flag.
}
CDS_CONSTEXPR atomic() CDS_NOEXCEPT
- : m_atomic( cds::null_ptr<T *>() )
+ : m_atomic( nullptr )
{}
CDS_CONSTEXPR explicit atomic(marked_ptr val) CDS_NOEXCEPT
assert( m_p != NULL );
m_funcFree( m_p );
- CDS_STRICT_DO( m_p = null_ptr<pointer>() );
- CDS_STRICT_DO( m_funcFree = null_ptr<free_retired_ptr_func>());
+ CDS_STRICT_DO( m_p = nullptr );
+ CDS_STRICT_DO( m_funcFree = nullptr );
}
};
/// Checks if the guarded pointer is \p NULL
bool empty() const CDS_NOEXCEPT
{
- return m_guard.template get<guarded_type>() == null_ptr<guarded_type *>();
+ return m_guard.template get<guarded_type>() == nullptr;
}
/// Clears guarded pointer
bool empty() const CDS_NOEXCEPT
{
- return m_guard.template get<guarded_type>() == null_ptr<guarded_type *>();
+ return m_guard.template get<guarded_type>() == nullptr;
}
void release() CDS_NOEXCEPT
/// Default ctor
retired_node()
- : m_pNode( null_ptr<ContainerNode *>() )
- , m_funcFree( null_ptr<free_retired_ptr_func>() )
+ : m_pNode( nullptr )
+ , m_funcFree( nullptr )
, m_nNextFree(0)
, m_nClaim(0)
, m_bDone( false )
/// Invokes destructor function for the pointer
void free()
{
- assert( m_funcFree != null_ptr<free_retired_ptr_func>() );
+ assert( m_funcFree != nullptr );
m_funcFree( m_pNode.load( CDS_ATOMIC::memory_order_relaxed ));
}
};
size_t nCount = 0;
const size_t nCapacity = capacity();
for ( size_t i = 0; i < nCapacity; ++i ) {
- if ( m_arr[i].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) != null_ptr<ContainerNode *>() )
+ if ( m_arr[i].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) != nullptr )
++nCount;
}
return nCount;
assert( !isFull());
size_t n = m_nFreeList;
- assert( m_arr[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == null_ptr<ContainerNode *>() );
+ assert( m_arr[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
m_nFreeList = m_arr[n].m_nNextFree;
CDS_DEBUG_DO( m_arr[n].m_nNextFree = m_nEndFreeList ; )
m_arr[n].set( p, pFunc );
void pop( size_t n )
{
assert( n < capacity() );
- m_arr[n].m_pNode.store( null_ptr<ContainerNode *>(), CDS_ATOMIC::memory_order_release );
+ m_arr[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_release );
m_arr[n].m_nNextFree = m_nFreeList;
m_nFreeList = n;
}
//@cond
thread_list_node( const GarbageCollector& HzpMgr )
: thread_descriptor( HzpMgr ),
- m_pNext(null_ptr<thread_list_node *>()),
- m_pOwner( null_ptr<ThreadGC *>() ),
+ m_pNext( nullptr ),
+ m_pOwner( nullptr ),
m_idOwner( cds::OS::nullThreadId() ),
m_bFree( false )
{}
~thread_list_node()
{
- assert( m_pOwner == null_ptr<ThreadGC *>() );
+ assert( m_pOwner == nullptr );
assert( m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::nullThreadId() );
}
//@endcond
/// Checks if global GC object is constructed and may be used
static bool isUsed()
{
- return m_pGC != null_ptr<GarbageCollector *>();
+ return m_pGC != nullptr;
}
/// Get max count of hazard pointers as defined in @ref Construct call
//@cond
ThreadGC()
: m_gc( GarbageCollector::instance() )
- , m_pDesc( null_ptr<details::thread_descriptor *>() )
+ , m_pDesc( nullptr )
{}
~ThreadGC()
{
//@endcond
/// Checks if thread GC is initialized
- bool isInitialized() const { return m_pDesc != null_ptr<details::thread_descriptor *>() ; }
+ bool isInitialized() const { return m_pDesc != nullptr; }
/// Initialization. Multiple calls is allowed
void init()
cleanUpLocal();
m_gc.Scan( this );
details::thread_descriptor * pRec = m_pDesc;
- m_pDesc = null_ptr<details::thread_descriptor *>();
+ m_pDesc = nullptr;
if ( pRec )
m_gc.retireHRCThreadDesc( pRec );
}
/// Initializes HP guard \p guard
details::HPGuard& allocGuard()
{
- assert( m_pDesc != null_ptr<details::thread_descriptor *>() );
+ assert( m_pDesc != nullptr );
return m_pDesc->m_hzp.alloc();
}
/// Frees HP guard \p guard
void freeGuard( details::HPGuard& guard )
{
- assert( m_pDesc != null_ptr<details::thread_descriptor *>() );
+ assert( m_pDesc != nullptr );
m_pDesc->m_hzp.free( guard );
}
template <size_t Count>
void allocGuard( details::HPArray<Count>& arr )
{
- assert( m_pDesc != null_ptr<details::thread_descriptor *>() );
+ assert( m_pDesc != nullptr );
m_pDesc->m_hzp.alloc( arr );
}
template <size_t Count>
void freeGuard( details::HPArray<Count>& arr )
{
- assert( m_pDesc != null_ptr<details::thread_descriptor *>() );
+ assert( m_pDesc != nullptr );
m_pDesc->m_hzp.free( arr );
}
public:
HPGuardT() CDS_NOEXCEPT
- : base_class( null_ptr<hazard_ptr>() )
+ : base_class( nullptr )
{}
~HPGuardT() CDS_NOEXCEPT
{}
void clear() CDS_NOEXCEPT
{
// memory order is not necessary here
- base_class::store( null_ptr<hazard_ptr>(), CDS_ATOMIC::memory_order_relaxed );
+ base_class::store( nullptr, CDS_ATOMIC::memory_order_relaxed );
//CDS_COMPILER_RW_BARRIER;
}
};
//@cond
guard_data()
- : pPost( null_ptr<guarded_ptr>())
+ : pPost( nullptr )
#if 0
- , pHandOff( null_ptr<handoff_ptr>() )
+ , pHandOff( nullptr )
#endif
- , pGlobalNext( null_ptr<guard_data *>() )
- , pNextFree( null_ptr<guard_data *>() )
- , pThreadNext( null_ptr<guard_data *>() )
+ , pGlobalNext( nullptr )
+ , pNextFree( nullptr )
+ , pThreadNext( nullptr )
{}
void init()
{
- pPost.store( null_ptr<guarded_ptr>(), CDS_ATOMIC::memory_order_relaxed );
+ pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
}
//@endcond
/// Checks if the guard is free, that is, it does not contain any pointer guarded
bool isFree() const
{
- return pPost.load( CDS_ATOMIC::memory_order_acquire ) == null_ptr<guarded_ptr>();
+ return pPost.load( CDS_ATOMIC::memory_order_acquire ) == nullptr;
}
};
public:
// Default ctor
guard_allocator()
- : m_GuardList( null_ptr<guard_data *>() )
- , m_FreeGuardList( null_ptr<guard_data *>() )
+ : m_GuardList( nullptr )
+ , m_FreeGuardList( nullptr )
{}
// Destructor
~guard_allocator()
{
guard_data * pNext;
- for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != null_ptr<guard_data *>(); pData = pNext ) {
+ for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
pNext = pData->pGlobalNext.load( CDS_ATOMIC::memory_order_relaxed );
m_GuardAllocator.Delete( pData );
}
*/
void free( guard_data * pGuard )
{
- pGuard->pPost.store( null_ptr<void *>(), CDS_ATOMIC::memory_order_relaxed );
+ pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
pGuard->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
pLast = p;
}
- pLast->pNextFree.store( pLast->pThreadNext = null_ptr<guard_data *>(), CDS_ATOMIC::memory_order_relaxed );
+ pLast->pNextFree.store( pLast->pThreadNext = nullptr, CDS_ATOMIC::memory_order_relaxed );
return pHead;
}
*/
void freeList( guard_data * pList )
{
- assert( pList != null_ptr<guard_data *>() );
+ assert( pList != nullptr );
guard_data * pLast = pList;
while ( pLast->pThreadNext ) {
- pLast->pPost.store( null_ptr<void *>(), CDS_ATOMIC::memory_order_relaxed );
+ pLast->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
guard_data * p;
pLast->pNextFree.store( p = pLast->pThreadNext, CDS_ATOMIC::memory_order_relaxed );
pLast = p;
public:
//@cond
retired_ptr_buffer()
- : m_pHead( null_ptr<retired_ptr_node *>() )
+ : m_pHead( nullptr )
, m_nItemCount(0)
{}
~retired_ptr_buffer()
{
- assert( m_pHead.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr<retired_ptr_node *>());
+ assert( m_pHead.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
}
//@endcond
privatize_result privatize()
{
privatize_result res;
- res.first = m_pHead.exchange( null_ptr<retired_ptr_node *>(), CDS_ATOMIC::memory_order_acq_rel );
+ res.first = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_acq_rel );
// Item counter is needed only as a threshold for liberate function
// So, we may clear the item counter without synchronization with m_pHead
item * pLastItem = pNew->items + m_nItemPerBlock - 1;
for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
pItem->m_pNextFree = pItem + 1;
- CDS_STRICT_DO( pItem->m_pNext = null_ptr<item *>() );
+ CDS_STRICT_DO( pItem->m_pNext = nullptr );
}
// link new block to block list
public:
//@cond
retired_ptr_pool()
- : m_pBlockListHead(null_ptr<block *>())
+ : m_pBlockListHead( nullptr )
, m_nCurEpoch(0)
- , m_pGlobalFreeHead( null_ptr<item *>())
+ , m_pGlobalFreeHead( nullptr )
{
for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
- m_pEpochFree[i].store( null_ptr<item *>(), CDS_ATOMIC::memory_order_relaxed );
+ m_pEpochFree[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
allocNewBlock();
}
} while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
success:
- CDS_STRICT_DO( pItem->m_pNextFree = null_ptr<item *>() );
+ CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
return *pItem;
}
*/
void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
{
- assert( pHead != null_ptr<retired_ptr_node *>() );
- assert( pTail != null_ptr<retired_ptr_node *>() );
+ assert( pHead != nullptr );
+ assert( pTail != nullptr );
unsigned int nEpoch;
item * pCurHead;
public:
/// Initialize empty guard.
guard()
- : m_pGuard(null_ptr<details::guard_data *>())
+ : m_pGuard( nullptr )
{}
/// Object destructor, does nothing
/// Guards pointer \p p
void set( void * p )
{
- assert( m_pGuard != null_ptr<details::guard_data *>() );
+ assert( m_pGuard != nullptr );
m_pGuard->pPost.store( p, CDS_ATOMIC::memory_order_release );
//CDS_COMPILER_RW_BARRIER;
}
/// Clears the guard
void clear()
{
- assert( m_pGuard != null_ptr<details::guard_data *>() );
- m_pGuard->pPost.store( null_ptr<void *>(), CDS_ATOMIC::memory_order_relaxed );
+ assert( m_pGuard != nullptr );
+ m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
}
/// Set guard data
void set_guard( details::guard_data * pGuard )
{
- assert( m_pGuard == null_ptr<details::guard_data *>() );
+ assert( m_pGuard == nullptr );
m_pGuard = pGuard;
}
*/
static GarbageCollector& instance()
{
- if ( m_pManager == null_ptr<GarbageCollector *>() )
+ if ( m_pManager == nullptr )
throw PTBManagerEmpty();
return *m_pManager;
}
/// Checks if global GC object is constructed and may be used
static bool isUsed() CDS_NOEXCEPT
{
- return m_pManager != null_ptr<GarbageCollector *>();
+ return m_pManager != nullptr;
}
public:
public:
ThreadGC()
: m_gc( GarbageCollector::instance() )
- , m_pList( null_ptr<details::guard_data *>() )
- , m_pFree( null_ptr<details::guard_data *>() )
+ , m_pList( nullptr )
+ , m_pFree( nullptr )
{}
/// Dtor calls fini()
if ( m_pList ) {
m_gc.freeGuardList( m_pList );
m_pList =
- m_pFree = null_ptr<details::guard_data *>();
+ m_pFree = nullptr;
}
}
/// Initializes guard \p g
void allocGuard( Guard& g )
{
- assert( m_pList != null_ptr<details::guard_data *>() );
+ assert( m_pList != nullptr );
if ( m_pFree ) {
g.m_pGuard = m_pFree;
m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
/// Frees guard \p g
void freeGuard( Guard& g )
{
- assert( m_pList != null_ptr<details::guard_data *>() );
- g.m_pGuard->pPost.store( null_ptr<void *>(), CDS_ATOMIC::memory_order_relaxed );
+ assert( m_pList != nullptr );
+ g.m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
g.m_pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
m_pFree = g.m_pGuard;
}
template <size_t Count>
void allocGuard( GuardArray<Count>& arr )
{
- assert( m_pList != null_ptr<details::guard_data *>() );
+ assert( m_pList != nullptr );
size_t nCount = 0;
while ( m_pFree && nCount < Count ) {
template <size_t Count>
void freeGuard( GuardArray<Count>& arr )
{
- assert( m_pList != null_ptr<details::guard_data *>() );
+ assert( m_pList != nullptr );
details::guard_data * pGuard;
for ( size_t i = 0; i < Count - 1; ++i ) {
pGuard = arr[i].get_guard();
- pGuard->pPost.store( null_ptr<void *>(), CDS_ATOMIC::memory_order_relaxed );
+ pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
pGuard->pNextFree.store( arr[i+1].get_guard(), CDS_ATOMIC::memory_order_relaxed );
}
pGuard = arr[Count-1].get_guard();
- pGuard->pPost.store( null_ptr<void *>(), CDS_ATOMIC::memory_order_relaxed );
+ pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
m_pFree = arr[0].get_guard();
}
atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container
node()
- : m_pNext( null_ptr<node *>() )
+ : m_pNext( nullptr )
{}
};
atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container
node()
- : m_pNext(null_ptr<node *>())
+ : m_pNext( nullptr )
{}
protected:
virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC )
{
- assert( pGC != null_ptr<cds::gc::hrc::ThreadGC *>() );
+ assert( pGC != nullptr );
typename gc::template GuardArray<2> aGuards( *pGC );
while ( true ) {
{
void operator()( value_type * p )
{
- assert( p != null_ptr<value_type *>());
+ assert( p != nullptr );
BasketQueue::clear_links( node_traits::to_node_ptr(p) );
disposer()( p );
static void clear_links( node_type * pNode )
{
- pNode->m_pNext.store( marked_ptr( null_ptr<node_type *>()), memory_model::memory_order_release );
+ pNode->m_pNext.store( marked_ptr( nullptr ), memory_model::memory_order_release );
}
void dispose_node( node_type * p )
public:
/// Initializes empty queue
BasketQueue()
- : m_pHead( null_ptr<node_type *>() )
- , m_pTail( null_ptr<node_type *>() )
+ : m_pHead( nullptr )
+ , m_pTail( nullptr )
, m_nMaxHops( 3 )
{
// GC and node_type::gc must be the same
clear();
node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed).ptr();
- assert( pHead != null_ptr<node_type *>() );
+ assert( pHead != nullptr );
{
node_type * pNext = pHead->m_pNext.load( memory_model::memory_order_relaxed ).ptr();
//m_pTail.store( marked_ptr( pHead ), memory_model::memory_order_relaxed );
}
- m_pHead.store( marked_ptr( null_ptr<node_type *>()), memory_model::memory_order_relaxed );
- m_pTail.store( marked_ptr( null_ptr<node_type *>()), memory_model::memory_order_relaxed );
+ m_pHead.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed );
+ m_pTail.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed );
dispose_node( pHead );
}
marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_acquire );
- if ( pNext.ptr() == null_ptr<node_type *>() ) {
+ if ( pNext.ptr() == nullptr ) {
pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release );
if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) {
if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
marked_ptr p;
bool bTailOk = true;
- while ( ( p = pNext->m_pNext.load(memory_model::memory_order_relaxed) ).ptr() != null_ptr<node_type *>() )
+ while ( (p = pNext->m_pNext.load( memory_model::memory_order_relaxed )).ptr() != nullptr )
{
bTailOk = m_pTail.load( memory_model::memory_order_relaxed ) == t;
if ( !bTailOk )
if ( do_dequeue( res, true ))
return node_traits::to_value_ptr( *res.pNext );
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Synonym for \ref cds_intrusive_BasketQueue_enqueue "enqueue" function
node * m_pNext;
CDS_CONSTEXPR node() CDS_NOEXCEPT
- : m_pNext( null_ptr<node *>() )
+ : m_pNext( nullptr )
{}
void store_hash( size_t * )
{
// This node type does not store hash values!!!
assert(false);
- return null_ptr<size_t *>();
+ return nullptr;
}
void clear()
{
- m_pNext = null_ptr<node *>();
+ m_pNext = nullptr;
}
};
size_t m_arrHash[ hash_array_size ];
node() CDS_NOEXCEPT
- : m_pNext( null_ptr<node *>() )
+ : m_pNext( nullptr )
{
memset( m_arrHash, 0, sizeof(m_arrHash));
}
void clear()
{
- m_pNext = null_ptr<node *>();
+ m_pNext = nullptr;
}
};
{
// This node type does not store hash values!!!
assert(false);
- return null_ptr<size_t *>();
+ return nullptr;
}
void clear()
}
}
else {
- std::fill( m_guard, m_guard + c_nArity, null_ptr<lock_type *>() );
+ std::fill( m_guard, m_guard + c_nArity, nullptr );
}
policy.m_Stat.onCellTryLock();
}
public:
iterator()
- : pNode( null_ptr<node_type *>())
+ : pNode( nullptr )
{}
iterator( node_type * p )
: pNode( p )
}
node_type& operator*()
{
- assert( pNode != null_ptr<node_type *>());
+ assert( pNode != nullptr );
return *pNode;
}
public:
bucket_entry()
- : pHead( null_ptr<node_type *>())
+ : pHead( nullptr )
, nSize(0)
{
static_assert(( std::is_same<typename node_type::probeset_type, probeset_type>::value ), "Incompatible node type" );
}
nSize = 0;
- pHead = null_ptr<node_type *>();
+ pHead = nullptr;
}
template <typename Disposer>
}
nSize = 0;
- pHead = null_ptr<node_type *>();
+ pHead = nullptr;
}
unsigned int size() const
public:
iterator()
- : pArr( null_ptr<node_type **>() )
+ : pArr( nullptr )
{}
iterator( node_type ** p )
: pArr(p)
node_type * operator->()
{
- assert( pArr != null_ptr<node_type **>());
+ assert( pArr != nullptr );
return *pArr;
}
node_type& operator*()
{
- assert( pArr != null_ptr<node_type **>());
- assert( *pArr != null_ptr<node_type *>());
+ assert( pArr != nullptr );
+ assert( *pArr != nullptr );
return *(*pArr);
}
bucket_table_allocator alloc;
for ( unsigned int i = 0; i < c_nArity; ++i ) {
alloc.Delete( pTable[i], nCapacity );
- pTable[i] = null_ptr<bucket_entry *>();
+ pTable[i] = nullptr;
}
}
void free_bucket_tables()
}
m_Stat.onEraseFailed();
- return null_ptr<value_type *>();
+ return nullptr;
}
template <typename Q, typename Predicate, typename Func>
struct dummy_node_disposer {
void operator()( node_type * p )
{
- assert( p != null_ptr<node_type *>());
+ assert( p != nullptr );
- p->m_pNext.store( null_ptr<node_type *>(), CDS_ATOMIC::memory_order_release );
+ p->m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release );
allocator_type().Delete( p );
}
};
update_desc * pNextRetire ; // for local retired list (RCU)
update_desc()
- : pNextRetire( null_ptr<update_desc *>() )
+ : pNextRetire( nullptr )
{}
//@endcond
};
/// Default ctor
internal_node()
: base_class( true )
- , m_pLeft( null_ptr<base_class *>() )
- , m_pRight( null_ptr<base_class *>() )
+ , m_pLeft( nullptr )
+ , m_pRight( nullptr )
, m_pUpdate( update_ptr() )
, m_nEmptyUpdate(0)
{}
bool bRightParent ; // true if pParent is right child of pGrandParent, false otherwise
search_result()
- :pGrandParent( null_ptr<internal_node *>() )
- ,pParent( null_ptr<internal_node *>() )
- ,pLeaf( null_ptr<leaf_node *>() )
+ :pGrandParent( nullptr )
+ , pParent( nullptr )
+ , pLeaf( nullptr )
,bRightLeaf( false )
,bRightParent( false )
{}
void unsafe_clear()
{
while ( true ) {
- internal_node * pParent = null_ptr< internal_node *>();
- internal_node * pGrandParent = null_ptr<internal_node *>();
+ internal_node * pParent = nullptr;
+ internal_node * pGrandParent = nullptr;
tree_node * pLeaf = const_cast<internal_node *>( &m_Root );
// Get leftmost leaf
// See whether pParent->m_pUpdate has not been changed
if ( pParent->m_pUpdate.load( memory_model::memory_order_acquire ) != updParent ) {
// update has been changed - returns nullptr as a flag to search retry
- return null_ptr<tree_node *>();
+ return nullptr;
}
if ( p && p->is_leaf() )
bool search( search_result& res, KeyValue const& key, Compare cmp ) const
{
internal_node * pParent;
- internal_node * pGrandParent = null_ptr<internal_node *>();
+ internal_node * pGrandParent = nullptr;
update_ptr updParent;
update_ptr updGrandParent;
bool bRightLeaf;
int nCmp = 0;
retry:
- pParent = null_ptr< internal_node *>();
- //pGrandParent = null_ptr<internal_node *>();
- updParent = null_ptr<update_desc *>();
+ pParent = nullptr;
+ //pGrandParent = nullptr;
+ updParent = nullptr;
bRightLeaf = false;
tree_node * pLeaf = const_cast<internal_node *>( &m_Root );
while ( pLeaf->is_internal() ) {
update_ptr updGrandParent;
retry:
- pParent = null_ptr< internal_node *>();
- pGrandParent = null_ptr<internal_node *>();
- updParent = null_ptr<update_desc *>();
+ pParent = nullptr;
+ pGrandParent = nullptr;
+ updParent = nullptr;
tree_node * pLeaf = const_cast<internal_node *>( &m_Root );
while ( pLeaf->is_internal() ) {
res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent );
bool bRightParent = false;
retry:
- pParent = null_ptr< internal_node *>();
- pGrandParent = null_ptr<internal_node *>();
- updParent = null_ptr<update_desc *>();
+ pParent = nullptr;
+ pGrandParent = nullptr;
+ updParent = nullptr;
bRightLeaf = false;
tree_node * pLeaf = const_cast<internal_node *>( &m_Root );
while ( pLeaf->is_internal() ) {
{
// precondition: all member of res must be guarded
- assert( res.pGrandParent != null_ptr<internal_node *>() );
+ assert( res.pGrandParent != nullptr );
return
static_cast<internal_node *>(
template <typename Q, typename Compare, typename Equal, typename Func>
bool erase_( Q const& val, Compare cmp, Equal eq, Func f )
{
- update_desc * pOp = null_ptr<update_desc *>();
+ update_desc * pOp = nullptr;
search_result res;
for ( ;; ) {
cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf ));
break;
}
- pOp = null_ptr<update_desc *>();
+ pOp = nullptr;
}
}
}
bool extract_max_( typename gc::Guard& guard )
{
- update_desc * pOp = null_ptr<update_desc *>();
+ update_desc * pOp = nullptr;
search_result res;
for ( ;; ) {
{
if ( help_delete( pOp ))
break;
- pOp = null_ptr<update_desc *>();
+ pOp = nullptr;
}
}
}
bool extract_min_( typename gc::Guard& guard )
{
- update_desc * pOp = null_ptr<update_desc *>();
+ update_desc * pOp = nullptr;
search_result res;
for ( ;; ) {
{
if ( help_delete( pOp ))
break;
- pOp = null_ptr<update_desc *>();
+ pOp = nullptr;
}
}
}
/// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node
explicit base_node( bool bInternal )
: basic_node( bInternal ? internal : 0 )
- , m_pNextRetired( null_ptr<base_node *>() )
+ , m_pNextRetired( nullptr )
{}
};
bool bRightParent ; // true if pParent is right child of pGrandParent, false otherwise
search_result()
- :pGrandParent( null_ptr<internal_node *>() )
- ,pParent( null_ptr<internal_node *>() )
- ,pLeaf( null_ptr<leaf_node *>() )
+ :pGrandParent( nullptr )
+ , pParent( nullptr )
+ , pLeaf( nullptr )
,bRightLeaf( false )
,bRightParent( false )
{}
{}
forward_iterator()
- : m_pUpdate( null_ptr<update_desc *>() )
- , m_pNode( null_ptr< tree_node *>() )
+ : m_pUpdate( nullptr )
+ , m_pNode( nullptr )
{}
cds::urcu::retired_ptr operator *()
reinterpret_cast<cds::urcu::free_retired_ptr_func>( free_internal_node ) );
}
}
- return cds::urcu::retired_ptr( null_ptr<void *>(),
+ return cds::urcu::retired_ptr( nullptr,
reinterpret_cast<cds::urcu::free_retired_ptr_func>( free_update_desc ) );
}
public:
retired_list()
- : pUpdateHead( null_ptr<update_desc *>() )
- , pNodeHead( null_ptr<tree_node *>() )
+ : pUpdateHead( nullptr )
+ , pNodeHead( nullptr )
{}
~retired_list()
rcu_lock l;
while ( true ) {
- internal_node * pParent = null_ptr< internal_node *>();
- internal_node * pGrandParent = null_ptr<internal_node *>();
+ internal_node * pParent = nullptr;
+ internal_node * pGrandParent = nullptr;
tree_node * pLeaf = const_cast<internal_node *>( &m_Root );
// Get leftmost leaf
bool check_delete_precondition( search_result& res )
{
- assert( res.pGrandParent != null_ptr<internal_node *>() );
+ assert( res.pGrandParent != nullptr );
return
static_cast<internal_node *>( res.bRightParent
assert( gc::is_locked() );
internal_node * pParent;
- internal_node * pGrandParent = null_ptr<internal_node *>();
+ internal_node * pGrandParent = nullptr;
tree_node * pLeaf;
update_ptr updParent;
update_ptr updGrandParent;
int nCmp = 0;
retry:
- pParent = null_ptr<internal_node *>();
+ pParent = nullptr;
pLeaf = const_cast<internal_node *>( &m_Root );
- updParent = null_ptr<update_desc *>();
+ updParent = nullptr;
bRightLeaf = false;
while ( pLeaf->is_internal() ) {
pGrandParent = pParent;
assert( gc::is_locked() );
internal_node * pParent;
- internal_node * pGrandParent = null_ptr<internal_node *>();
+ internal_node * pGrandParent = nullptr;
tree_node * pLeaf;
update_ptr updParent;
update_ptr updGrandParent;
retry:
- pParent = null_ptr< internal_node *>();
+ pParent = nullptr;
pLeaf = const_cast<internal_node *>( &m_Root );
while ( pLeaf->is_internal() ) {
pGrandParent = pParent;
assert( gc::is_locked() );
internal_node * pParent;
- internal_node * pGrandParent = null_ptr<internal_node *>();
+ internal_node * pGrandParent = nullptr;
tree_node * pLeaf;
update_ptr updParent;
update_ptr updGrandParent;
bool bRightParent = false;
retry:
- pParent = null_ptr< internal_node *>();
+ pParent = nullptr;
pLeaf = const_cast<internal_node *>( &m_Root );
bRightLeaf = false;
while ( pLeaf->is_internal() ) {
check_deadlock_policy::check();
retired_list updRetire;
- update_desc * pOp = null_ptr<update_desc *>();
+ update_desc * pOp = nullptr;
search_result res;
{
cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf ));
break;
}
- pOp = null_ptr<update_desc *>();
+ pOp = nullptr;
}
else {
// updGP has been changed by CAS
check_deadlock_policy::check();
retired_list updRetire;
- update_desc * pOp = null_ptr<update_desc *>();
+ update_desc * pOp = nullptr;
search_result res;
{
ptr = node_traits::to_value_ptr( res.pLeaf );
break;
}
- pOp = null_ptr<update_desc *>();
+ pOp = nullptr;
}
else {
// updGP has been changed by CAS
check_deadlock_policy::check();
retired_list updRetire;
- update_desc * pOp = null_ptr<update_desc *>();
+ update_desc * pOp = nullptr;
search_result res;
{
result = node_traits::to_value_ptr( res.pLeaf );
break;
}
- pOp = null_ptr<update_desc *>();
+ pOp = nullptr;
}
else {
// updGP has been changed by CAS
check_deadlock_policy::check();
retired_list updRetire;
- update_desc * pOp = null_ptr<update_desc *>();
+ update_desc * pOp = nullptr;
search_result res;
{
result = node_traits::to_value_ptr( res.pLeaf );
break;
}
- pOp = null_ptr<update_desc *>();
+ pOp = nullptr;
}
else {
// updGP has been changed by CAS
}
m_Stat.onFindFailed();
- return null_ptr<value_type *>();
+ return nullptr;
}
value_type * dequeue()
{
fc_record * pRec = m_FlatCombining.acquire_record();
- pRec->pVal = null_ptr<value_type *>();
+ pRec->pVal = nullptr;
if ( c_bEliminationEnabled )
m_FlatCombining.batch_combine( op_deq, pRec, *this );
value_type * pop()
{
fc_record * pRec = m_FlatCombining.acquire_record();
- pRec->pVal = null_ptr<value_type *>();
+ pRec->pVal = nullptr;
if ( c_bEliminationEnabled )
m_FlatCombining.batch_combine( op_pop, pRec, *this );
/// Default ctor
node()
- : m_pNext( null_ptr<node *>())
+ : m_pNext( nullptr )
{}
};
*/
static void is_empty( node_type const * pNode )
{
- assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr<node_type const *>());
+ assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
}
};
}
node()
- : m_pNext( null_ptr<node *>() )
+ : m_pNext( nullptr )
{}
protected:
virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC )
{
- assert( pGC != null_ptr<cds::gc::hrc::ThreadGC *>() );
+ assert( pGC != nullptr );
typename gc::GuardArray<2> aGuards( *pGC );
while ( true ) {
marked_ptr pNextMarked( aGuards.protect( 0, m_pNext ));
node * pNext = pNextMarked.ptr();
- if ( pNext != null_ptr<node *>() && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+ if ( pNext != nullptr && pNext->m_bDeleted.load( CDS_ATOMIC::memory_order_acquire ) ) {
marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
m_pNext.compare_exchange_weak( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
continue;
void retire_node( node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
gc::template retire<clean_disposer>( node_traits::to_value_ptr( *pNode ) );
}
//@endcond
void next()
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
if ( m_pNode ) {
typename gc::Guard g;
node_type * pCur = node_traits::to_node_ptr( m_pNode );
- if ( pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr() != null_ptr<node_type *>() ) { // if pCur is not tail node
+ if ( pCur->m_pNext.load( memory_model::memory_order_relaxed ).ptr() != nullptr ) { // if pCur is not tail node
node_type * pNext;
do {
pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr();
void skip_deleted()
{
- if ( m_pNode != null_ptr<value_type *>() ) {
+ if ( m_pNode != nullptr ) {
typename gc::Guard g;
node_type * pNode = node_traits::to_node_ptr( m_pNode );
typedef typename cds::details::make_const_type<value_type, IsConst>::reference value_ref;
iterator_type()
- : m_pNode(null_ptr<value_type *>())
+ : m_pNode( nullptr )
{}
iterator_type( iterator_type const& src )
m_pNode = m_Guard.assign( src.m_pNode );
}
else
- m_pNode = null_ptr<value_type *>();
+ m_pNode = nullptr;
}
value_ptr operator ->() const
value_ref operator *() const
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
return *m_pNode;
}
// split-list support
bool insert_aux_node( node_type * pHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
break;
bkoff();
}
- assert( pCur.ptr() != null_ptr<node_type *>() );
+ assert( pCur.ptr() != nullptr );
}
pos.pCur = pCur.ptr();
mutable lock_type m_Lock ; ///< Node lock
node()
- : m_pNext( null_ptr<node *>())
+ : m_pNext( nullptr )
{}
};
} // namespace lazy_list
//@cond
void clear_links( node_type * pNode )
{
- pNode->m_pNext.store( null_ptr<node_type *>(), memory_model::memory_order_relaxed );
+ pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed );
}
template <class Disposer>
void next()
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
node_type * pNode = node_traits::to_node_ptr( m_pNode );
node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_relaxed);
- if ( pNext != null_ptr<node_type *>() )
+ if ( pNext != nullptr )
m_pNode = node_traits::to_value_ptr( pNext );
}
typedef typename cds::details::make_const_type<value_type, IsConst>::reference value_ref;
iterator_type()
- : m_pNode(null_ptr<value_type *>())
+ : m_pNode( nullptr )
{}
iterator_type( const iterator_type& src )
value_ref operator *() const
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
return *m_pNode;
}
clear();
assert( m_Head.m_pNext.load(memory_model::memory_order_relaxed) == &m_Tail );
- m_Head.m_pNext.store( null_ptr<node_type *>(), memory_model::memory_order_relaxed );
+ m_Head.m_pNext.store( nullptr, memory_model::memory_order_relaxed );
}
/// Inserts new node
// split-list support
bool insert_aux_node( node_type * pHead, node_type * pNode )
{
- assert( pHead != null_ptr<node_type *>() );
- assert( pNode != null_ptr<node_type *>() );
+ assert( pHead != nullptr );
+ assert( pNode != nullptr );
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
iterator it = find_at_( pHead, val, cmp );
if ( it != end() )
return &*it;
- return null_ptr<value_type *>();
+ return nullptr;
}
template <typename Q, typename Compare>
/// Default ctor
node()
- : m_pNext( null_ptr<node *>())
+ : m_pNext( nullptr )
{}
/// Clears internal fields
value_type * pFound;
get_functor()
- : pFound(null_ptr<value_type *>())
+ : pFound( nullptr )
{}
template <typename Q>
struct clear_and_dispose {
void operator()( value_type * p )
{
- assert( p != null_ptr<value_type *>() );
+ assert( p != nullptr );
clear_links( node_traits::to_node_ptr(p));
disposer()( p );
}
void next()
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
node_type * pNode = node_traits::to_node_ptr( m_pNode );
node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr();
- if ( pNext != null_ptr<node_type *>() )
+ if ( pNext != nullptr )
m_pNode = node_traits::to_value_ptr( pNext );
}
void skip_deleted()
{
- if ( m_pNode != null_ptr<value_type *>() ) {
+ if ( m_pNode != nullptr ) {
node_type * pNode = node_traits::to_node_ptr( m_pNode );
// Dummy tail node could not be marked
typedef typename cds::details::make_const_type<value_type, IsConst>::reference value_ref;
iterator_type()
- : m_pNode(null_ptr<value_type *>())
+ : m_pNode( nullptr )
{}
iterator_type( iterator_type const& src )
value_ref operator *() const
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
return *m_pNode;
}
// split-list support
bool insert_aux_node( node_type * pHead, node_type * pNode )
{
- assert( pHead != null_ptr<node_type *>() );
- assert( pNode != null_ptr<node_type *>() );
+ assert( pHead != nullptr );
+ assert( pNode != nullptr );
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
if ( nResult ) {
if ( nResult > 0 )
return node_traits::to_value_ptr( pos.pCur );
- return null_ptr<value_type *>();
+ return nullptr;
}
}
}
value_type * get_at( node_type * pHead, Q const& val, Compare cmp ) const
{
# ifdef CDS_CXX11_LAMBDA_SUPPORT
- value_type * pFound = null_ptr<value_type *>();
+ value_type * pFound = nullptr;
return find_at( pHead, val, cmp, [&pFound](value_type& found, Q const& ) { pFound = &found; } )
- ? pFound : null_ptr<value_type *>();
+ ? pFound : nullptr;
# else
get_functor gf;
- return find_at( pHead , val, cmp, cds::ref(gf) ) ? gf.pFound : null_ptr<value_type *>();
+ return find_at( pHead, val, cmp, cds::ref( gf ) ) ? gf.pFound : nullptr;
# endif
}
{
void operator()( value_type * p )
{
- assert( p != null_ptr<value_type *>());
+ assert( p != nullptr );
MichaelDeque::clear_links( node_traits::to_node_ptr(p) );
disposer()( p );
node_type * pNode;
at_functor()
- : pNode( null_ptr<node_type *>())
+ : pNode( nullptr )
{}
void operator()( value_type& v, unsigned int nIdx )
# if defined(CDS_CXX11_LAMBDA_SUPPORT) && !((CDS_COMPILER == CDS_COMPILER_MSVC ||CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER < 1700)
// MS VC++2010 bug: error C2955: 'cds::intrusive::node_traits' : use of class template requires template argument list
// see declaration of 'cds::intrusive::node_traits'
- node_type * pNode = null_ptr<node_type *>();
+ node_type * pNode = nullptr;
if ( m_set.find( nIdx,
[&pNode](value_type& v, unsigned int nIdx) {
pNode = node_traits::to_node_ptr(v);
if ( m_set.find( nIdx, cds::ref(f) ))
return f.pNode;
# endif
- return null_ptr<node_type *>();
+ return nullptr;
}
};
//@endcond
return res.pPopped;
}
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Pop front
return res.pPopped;
}
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Returns deque's item count
*/
void clear()
{
- while ( pop_back() != null_ptr<value_type *>() );
+ while ( pop_back() != nullptr );
}
/// Returns reference to internal statistics
atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container
CDS_CONSTEXPR node() CDS_NOEXCEPT
- : m_pNext( null_ptr<node *>() )
+ : m_pNext( nullptr )
{}
};
*/
static void is_empty( const node_type * pNode )
{
- assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr<node_type *>() );
+ assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
}
};
atomic_marked_ptr m_pNext ; ///< pointer to the next node in the stack
node()
- : m_pNext( null_ptr<node *>() )
+ : m_pNext( nullptr )
{}
protected:
//@cond
void retire_node( node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
gc::template retire<clean_disposer>( node_traits::to_value_ptr( *pNode ) );
}
bool link_node( node_type * pNode, position& pos )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
link_checker::is_empty( pNode );
marked_node_ptr cur(pos.pCur);
bool unlink_node( position& pos )
{
- assert( pos.pPrev != null_ptr<atomic_node_ptr *>() );
- assert( pos.pCur != null_ptr<node_type *>() );
+ assert( pos.pPrev != nullptr );
+ assert( pos.pCur != nullptr );
// Mark the node (logical deleting)
marked_node_ptr next(pos.pNext, 0);
m_pNode = m_Guard.assign( g.template get<value_type>() );
}
else {
- m_pNode = null_ptr<value_type *>();
+ m_pNode = nullptr;
m_Guard.clear();
}
}
m_pNode = m_Guard.assign( node_traits::to_value_ptr( p.ptr() ) );
}
else {
- m_pNode = null_ptr<value_type *>();
+ m_pNode = nullptr;
m_Guard.clear();
}
if ( p == pNode.load(memory_model::memory_order_acquire) )
typedef typename cds::details::make_const_type<value_type, IsConst>::reference value_ref;
iterator_type()
- : m_pNode( null_ptr<value_type *>() )
+ : m_pNode( nullptr )
{}
iterator_type( iterator_type const& src )
m_pNode = m_Guard.assign( src.m_pNode );
}
else
- m_pNode = null_ptr<value_type *>();
+ m_pNode = nullptr;
}
value_ptr operator ->() const
value_ref operator *() const
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
return *m_pNode;
}
public:
/// Default constructor initializes empty list
MichaelList()
- : m_pHead(null_ptr<node_type *>())
+ : m_pHead( nullptr )
{
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
}
if ( head.ptr() )
guard.assign( node_traits::to_value_ptr( *head.ptr() ));
if ( m_pHead.load(memory_model::memory_order_acquire) == head ) {
- if ( head.ptr() == null_ptr<node_type *>() )
+ if ( head.ptr() == nullptr )
break;
value_type& val = *node_traits::to_value_ptr( *head.ptr() );
unlink( val );
/// Checks if the list is empty
bool empty() const
{
- return m_pHead.load(memory_model::memory_order_relaxed).all() == null_ptr<node_type *>();
+ return m_pHead.load( memory_model::memory_order_relaxed ).all() == nullptr;
}
/// Returns list's item count
// split-list support
bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
try_again:
pPrev = &refHead;
- pNext = null_ptr<node_type *>();
+ pNext = nullptr;
pCur = pPrev->load(memory_model::memory_order_relaxed);
pos.guards.assign( position::guard_current_item, node_traits::to_value_ptr( pCur.ptr() ) );
goto try_again;
while ( true ) {
- if ( pCur.ptr() == null_ptr<node_type *>() ) {
+ if ( pCur.ptr() == nullptr ) {
pos.pPrev = pPrev;
pos.pCur = pCur.ptr();
pos.pNext = pNext.ptr();
}
}
else {
- assert( pCur.ptr() != null_ptr<node_type *>() );
+ assert( pCur.ptr() != nullptr );
int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
if ( nCmp >= 0 ) {
pos.pPrev = pPrev;
atomic_ptr m_pNext ; ///< pointer to the next node in the container
node()
- : m_pNext( null_ptr<node *>())
+ : m_pNext( nullptr )
{}
};
//@cond
void clear_links( node_type * pNode )
{
- pNode->m_pNext.store( null_ptr<node_type *>(), memory_model::memory_order_release );
+ pNode->m_pNext.store( nullptr, memory_model::memory_order_release );
}
template <class Disposer>
bool link_node( node_type * pNode, position& pos )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
link_checker::is_empty( pNode );
pNode->m_pNext.store( pos.pCur, memory_model::memory_order_relaxed );
if ( pNode )
m_pNode = node_traits::to_value_ptr( *pNode );
else
- m_pNode = null_ptr<value_type *>();
+ m_pNode = nullptr;
}
}
if ( pNode )
m_pNode = node_traits::to_value_ptr( *pNode );
else
- m_pNode = null_ptr<value_type *>();
+ m_pNode = nullptr;
}
explicit iterator_type( atomic_node_ptr const& refNode)
{
if ( pNode )
m_pNode = node_traits::to_value_ptr( *pNode );
else
- m_pNode = null_ptr<value_type *>();
+ m_pNode = nullptr;
}
public:
typedef typename cds::details::make_const_type<value_type, IS_CONST>::reference value_ref;
iterator_type()
- : m_pNode(null_ptr<value_type *>())
+ : m_pNode( nullptr )
{}
iterator_type( const iterator_type& src )
value_ref operator *() const
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
return *m_pNode;
}
public:
/// Default constructor initializes empty list
MichaelList()
- : m_pHead( null_ptr<node_type *>())
+ : m_pHead( nullptr )
{
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
}
void clear( Disposer disp )
{
node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed);
- do {} while ( !m_pHead.compare_exchange_weak( pHead, null_ptr<node_type *>(), memory_model::memory_order_relaxed ));
+ do {} while ( !m_pHead.compare_exchange_weak( pHead, nullptr, memory_model::memory_order_relaxed ) );
while ( pHead ) {
node_type * p = pHead->m_pNext.load(memory_model::memory_order_relaxed);
/// Checks if the list is empty
bool empty() const
{
- return m_pHead.load(memory_model::memory_order_relaxed) == null_ptr<node_type *>();
+ return m_pHead.load( memory_model::memory_order_relaxed ) == nullptr;
}
/// Returns list's item count
// split-list support
bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
position pos;
if ( search( refHead, val, cmp, pos ) ) {
- assert( pos.pCur != null_ptr<node_type *>() );
+ assert( pos.pCur != nullptr );
unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val );
return true;
}
iterator it = find_at_( refHead, val, cmp );
if ( it != end() )
return &*it;
- return null_ptr<value_type *>();
+ return nullptr;
}
template <typename Q, typename Compare>
position pos;
if ( search( refHead, val, cmp, pos ) ) {
- assert( pos.pCur != null_ptr<node_type *>() );
+ assert( pos.pCur != nullptr );
return iterator( pos.pCur );
}
return end();
try_again:
pPrev = &refHead;
pCur = pPrev->load(memory_model::memory_order_acquire);
- pNext = null_ptr<node_type *>();
+ pNext = nullptr;
while ( true ) {
if ( !pCur ) {
goto try_again;
}
- assert( pCur != null_ptr<node_type *>() );
+ assert( pCur != nullptr );
int nCmp = cmp( *node_traits::to_value_ptr( *pCur ), val );
if ( nCmp >= 0 ) {
pos.pPrev = pPrev;
value_type * pFound;
get_functor()
- : pFound(null_ptr<value_type *>())
+ : pFound( nullptr )
{}
template <typename Q>
struct clear_and_dispose {
void operator()( value_type * p )
{
- assert( p != null_ptr<value_type *>() );
+ assert( p != nullptr );
clear_links( node_traits::to_node_ptr(p));
disposer()( p );
}
bool link_node( node_type * pNode, position& pos )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
link_checker::is_empty( pNode );
marked_node_ptr p( pos.pCur );
{
if ( m_pNode ) {
node_type * p = node_traits::to_node_ptr( *m_pNode )->m_pNext.load(memory_model::memory_order_relaxed).ptr();
- m_pNode = p ? node_traits::to_value_ptr(p) : null_ptr<value_type *>();
+ m_pNode = p ? node_traits::to_value_ptr( p ) : nullptr;
}
}
if ( pNode )
m_pNode = node_traits::to_value_ptr( *pNode );
else
- m_pNode = null_ptr<value_type *>();
+ m_pNode = nullptr;
}
explicit iterator_type( atomic_node_ptr const& refNode)
{
node_type * pNode = refNode.load(memory_model::memory_order_relaxed).ptr();
- m_pNode = pNode ? node_traits::to_value_ptr( *pNode ) : null_ptr<value_type *>();
+ m_pNode = pNode ? node_traits::to_value_ptr( *pNode ) : nullptr;
}
public:
typedef typename cds::details::make_const_type<value_type, IsConst>::reference value_ref;
iterator_type()
- : m_pNode(null_ptr<value_type *>())
+ : m_pNode( nullptr )
{}
iterator_type( const iterator_type& src )
value_ref operator *() const
{
- assert( m_pNode != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
return *m_pNode;
}
public:
/// Default constructor initializes empty list
MichaelList()
- : m_pHead( null_ptr<node_type *>())
+ : m_pHead( nullptr )
{
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
}
/// Check if the list is empty
bool empty() const
{
- return m_pHead.load(memory_model::memory_order_relaxed).all() == null_ptr<node_type *>();
+ return m_pHead.load( memory_model::memory_order_relaxed ).all() == nullptr;
}
/// Returns list's item count
// split-list support
bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
for (;;) {
if ( !search( refHead, val, pos, cmp ) )
- return null_ptr<value_type *>();
+ return nullptr;
if ( !unlink_node( pos )) {
bkoff();
continue;
rcu_lock l( bLock );
if ( search( refHead, val, pos, cmp ) ) {
- assert( pos.pCur != null_ptr<node_type *>() );
+ assert( pos.pCur != nullptr );
unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val );
return true;
}
value_type * get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) const
{
# ifdef CDS_CXX11_LAMBDA_SUPPORT
- value_type * pFound = null_ptr<value_type *>();
+ value_type * pFound = nullptr;
return find_at( refHead, val, cmp,
[&pFound](value_type& found, Q const& ) { pFound = &found; } )
- ? pFound : null_ptr<value_type *>();
+ ? pFound : nullptr;
# else
get_functor gf;
return find_at( refHead, val, cmp, cds::ref(gf) )
- ? gf.pFound : null_ptr<value_type *>();
+ ? gf.pFound : nullptr;
# endif
}
position pos;
if ( search( refHead, val, pos, cmp ) ) {
- assert( pos.pCur != null_ptr<node_type *>() );
+ assert( pos.pCur != nullptr );
return const_iterator( pos.pCur );
}
return end();
try_again:
pPrev = &refHead;
pCur = pPrev->load(memory_model::memory_order_acquire);
- pNext = null_ptr<node_type *>();
+ pNext = nullptr;
while ( true ) {
if ( !pCur.ptr() ) {
goto try_again;
}
- assert( pCur.ptr() != null_ptr<node_type *>() );
+ assert( pCur.ptr() != nullptr );
int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
if ( nCmp >= 0 ) {
pos.pPrev = pPrev;
public:
iterator()
- : m_pCurBucket( null_ptr<bucket_ptr>() )
+ : m_pCurBucket( nullptr )
, m_itList()
- , m_pEndBucket( null_ptr<bucket_ptr>() )
+ , m_pEndBucket( nullptr )
{}
iterator( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast )
value_ptr operator ->() const
{
- assert( m_pCurBucket != null_ptr<bucket_ptr>() );
+ assert( m_pCurBucket != nullptr );
return m_itList.operator ->();
}
value_ref operator *() const
{
- assert( m_pCurBucket != null_ptr<bucket_ptr>() );
+ assert( m_pCurBucket != nullptr );
return m_itList.operator *();
}
bucket_ptr bucket() const
{
- return m_pCurBucket != m_pEndBucket ? m_pCurBucket : null_ptr<bucket_ptr>();
+ return m_pCurBucket != m_pEndBucket ? m_pCurBucket : nullptr;
}
template <bool C>
h = res.guards.protect( 0, base_class::m_pHead, node_to_value() );
pNext = res.guards.protect( 1, h->m_pNext, node_to_value() );
- if ( pNext == null_ptr<node_type *>() )
+ if ( pNext == nullptr )
return false ; // queue is empty
if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
base_class::dispose_result( res );
return node_traits::to_value_ptr( *res.pNext );
}
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Synonym for \ref cds_intrusive_MoirQueue_dequeue "dequeue" function
/// Creates empty node
node()
- : m_pVal( null_ptr<value_type *>() )
+ : m_pVal( nullptr )
, m_nTag( tag_type(Empty) )
{}
m_Lock.unlock();
refBottom.m_nTag = tag_type(Empty);
value_type * pVal = refBottom.m_pVal;
- refBottom.m_pVal = null_ptr<value_type *>();
+ refBottom.m_pVal = nullptr;
refBottom.unlock();
node& refTop = m_Heap[ 1 ];
{
void operator()( value_type * p )
{
- assert( p != null_ptr<value_type *>());
+ assert( p != nullptr );
MSQueue::clear_links( node_traits::to_node_ptr(p) );
disposer()( p );
if ( m_pHead.load(memory_model::memory_order_acquire) != h )
continue;
- if ( pNext == null_ptr<node_type *>() )
+ if ( pNext == nullptr )
return false ; // empty queue
node_type * t = m_pTail.load(memory_model::memory_order_acquire);
static void clear_links( node_type * pNode )
{
- pNode->m_pNext.store( null_ptr<node_type *>(), memory_model::memory_order_release );
+ pNode->m_pNext.store( nullptr, memory_model::memory_order_release );
}
void dispose_result( dequeue_result& res )
public:
/// Initializes empty queue
MSQueue()
- : m_pHead( null_ptr<node_type *>() )
- , m_pTail( null_ptr<node_type *>() )
+ : m_pHead( nullptr )
+ , m_pTail( nullptr )
{
// GC and node_type::gc must be the same
static_assert(( std::is_same<gc, typename node_type::gc>::value ), "GC and node_type::gc must be the same");
node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed);
- assert( pHead != null_ptr<node_type *>() );
+ assert( pHead != nullptr );
assert( pHead == m_pTail.load(memory_model::memory_order_relaxed) );
- m_pHead.store( null_ptr<node_type *>(), memory_model::memory_order_relaxed );
- m_pTail.store( null_ptr<node_type *>(), memory_model::memory_order_relaxed );
+ m_pHead.store( nullptr, memory_model::memory_order_relaxed );
+ m_pTail.store( nullptr, memory_model::memory_order_relaxed );
dispose_node( pHead );
}
t = guard.protect( m_pTail, node_to_value() );
node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire);
- if ( pNext != null_ptr<node_type *>() ) {
+ if ( pNext != nullptr ) {
// Tail is misplaced, advance it
m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
m_Stat.onBadTail();
continue;
}
- node_type * tmp = null_ptr<node_type *>();
+ node_type * tmp = nullptr;
if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
break;
return node_traits::to_value_ptr( *res.pNext );
}
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Synonym for \ref cds_intrusive_MSQueue_enqueue "enqueue" function
bool empty() const
{
typename gc::Guard guard;
- return guard.protect( m_pHead, node_to_value() )->m_pNext.load(memory_model::memory_order_relaxed) == null_ptr<node_type *>();
+ return guard.protect( m_pHead, node_to_value() )->m_pNext.load( memory_model::memory_order_relaxed ) == nullptr;
}
/// Clear the queue
atomic_node_ptr m_pNext ; ///< Pointer to next node
CDS_CONSTEXPR node() CDS_NOEXCEPT
- : m_pPrev( null_ptr<node *>() )
- , m_pNext( null_ptr<node *>() )
+ : m_pPrev( nullptr )
+ , m_pNext( nullptr )
{}
};
*/
static void is_empty( const node_type * pNode )
{
- assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr<node_type *>() );
- assert( pNode->m_pPrev.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr<node_type *>() );
+ assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+ assert( pNode->m_pPrev.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
}
};
{
void operator ()( value_type * p )
{
- assert( p != null_ptr<value_type *>());
+ assert( p != nullptr );
OptimisticQueue::clear_links( node_traits::to_node_ptr(*p) );
disposer()( p );
//@cond
static void clear_links( node_type * pNode )
{
- pNode->m_pNext.store( null_ptr<node_type *>(), memory_model::memory_order_release );
- pNode->m_pPrev.store( null_ptr<node_type *>(), memory_model::memory_order_release );
+ pNode->m_pNext.store( nullptr, memory_model::memory_order_release );
+ pNode->m_pPrev.store( nullptr, memory_model::memory_order_release );
}
struct dequeue_result {
while ( true ) { // Try till success or empty
pHead = res.guards.protect( 0, m_pHead, node_to_value() );
pTail = res.guards.protect( 1, m_pTail, node_to_value() );
- assert( pHead != null_ptr<node_type *>() );
+ assert( pHead != nullptr );
pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, node_to_value() );
if ( pHead == m_pHead.load(memory_model::memory_order_relaxed)) {
if ( pTail != pHead ) {
- if ( pFirstNodePrev == null_ptr<node_type *>()
+ if ( pFirstNodePrev == nullptr
|| pFirstNodePrev->m_pNext.load(memory_model::memory_order_relaxed) != pHead )
{
fix_list( pTail, pHead );
void dispose_node( node_type * p )
{
- assert( p != null_ptr<node_type *>());
+ assert( p != nullptr );
if ( p != &m_Dummy ) {
gc::template retire<internal_disposer>( node_traits::to_value_ptr(p) );
public:
/// Constructor creates empty queue
OptimisticQueue()
- : m_pTail( null_ptr<node_type *>() )
- , m_pHead( null_ptr<node_type *>() )
+ : m_pTail( nullptr )
+ , m_pHead( nullptr )
{
// GC and node_type::gc must be the same
static_assert(( std::is_same<gc, typename node_type::gc>::value ), "GC and node_type::gc must be the same");
node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed);
CDS_DEBUG_DO( node_type * pTail = m_pTail.load(memory_model::memory_order_relaxed); )
CDS_DEBUG_DO( assert( pHead == pTail ); )
- assert( pHead != null_ptr<node_type *>() );
+ assert( pHead != nullptr );
- m_pHead.store( null_ptr<node_type *>(), memory_model::memory_order_relaxed );
- m_pTail.store( null_ptr<node_type *>(), memory_model::memory_order_relaxed );
+ m_pHead.store( nullptr, memory_model::memory_order_relaxed );
+ m_pTail.store( nullptr, memory_model::memory_order_relaxed );
dispose_node( pHead );
}
return node_traits::to_value_ptr( *res.pNext );
}
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Synonym for @ref cds_intrusive_OptimisticQueue_enqueue "enqueue"
void clear()
{
value_type * pv;
- while ( (pv = dequeue()) != null_ptr<value_type *>() );
+ while ( (pv = dequeue()) != nullptr );
}
/// Returns queue's item count
{
void operator()( segment * pSegment )
{
- assert( pSegment != null_ptr<segment *>());
+ assert( pSegment != nullptr );
free_segment( pSegment );
}
};
{
void operator()( segment * pSegment )
{
- assert( pSegment != null_ptr<segment *>());
+ assert( pSegment != nullptr );
retire_segment( pSegment );
}
};
public:
segment_list( size_t nQuasiFactor, stat& st )
- : m_pHead( null_ptr<segment *>() )
- , m_pTail( null_ptr<segment *>() )
+ : m_pHead( nullptr )
+ , m_pTail( nullptr )
, m_nQuasiFactor( nQuasiFactor )
, m_Stat( st )
{
scoped_lock l( m_Lock );
if ( m_List.empty() ) {
- m_pTail.store( null_ptr<segment *>(), memory_model::memory_order_relaxed );
- m_pHead.store( null_ptr<segment *>(), memory_model::memory_order_relaxed );
- return guard.assign( null_ptr<segment *>() );
+ m_pTail.store( nullptr, memory_model::memory_order_relaxed );
+ m_pHead.store( nullptr, memory_model::memory_order_relaxed );
+ return guard.assign( nullptr );
}
if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) {
m_List.pop_front();
if ( m_List.empty() ) {
- pRet = guard.assign( null_ptr<segment *>() );
- m_pTail.store( null_ptr<segment *>(), memory_model::memory_order_relaxed );
+ pRet = guard.assign( nullptr );
+ m_pTail.store( nullptr, memory_model::memory_order_relaxed );
}
else
pRet = guard.assign( &m_List.front() );
assert( pVal );
return pVal;
}
- return null_ptr<value_type *>();
+ return nullptr;
}
atomic_node_ptr m_pNext ; ///< pointer to the next node in the container
node()
- : m_pNext( null_ptr<node *>() )
+ : m_pNext( nullptr )
{}
};
atomic_node_ptr m_pNext ; ///< pointer to the next node in the container
node()
- : m_pNext(null_ptr<node *>())
+ : m_pNext( nullptr )
{}
protected:
virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC )
{
- assert( pGC != null_ptr<cds::gc::hrc::ThreadGC *>() );
+ assert( pGC != nullptr );
typename gc::GuardArray<2> aGuards( *pGC );
while ( true ) {
{
if ( bConcurrent ) {
node * pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed);
- do {} while ( !m_pNext.compare_exchange_weak( pNext, null_ptr<node *>(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ do {} while ( !m_pNext.compare_exchange_weak( pNext, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
}
else {
- m_pNext.store( null_ptr<node *>(), CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
}
}
};
*/
static void is_empty( const node_type * pNode )
{
- assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr<node_type *>() );
+ assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
}
};
public:
/// Constructs a node of height 1 (a bottom-list node)
node()
- : m_pNext( null_ptr<node *>())
+ : m_pNext( nullptr )
, m_nHeight(1)
- , m_arrNext( null_ptr<atomic_marked_ptr *>())
+ , m_arrNext( nullptr )
{}
/// Constructs a node of height \p nHeight
void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
{
assert( nHeight > 0 );
- assert( ( nHeight == 1 && nextTower == null_ptr<atomic_marked_ptr *>() ) // bottom-list node
- || ( nHeight > 1 && nextTower != null_ptr<atomic_marked_ptr *>() ) // node at level of more than 0
+ assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
+ || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
);
m_arrNext = nextTower;
atomic_marked_ptr * release_tower()
{
atomic_marked_ptr * pTower = m_arrNext;
- m_arrNext = null_ptr<atomic_marked_ptr *>();
+ m_arrNext = nullptr;
m_nHeight = 1;
return pTower;
}
atomic_marked_ptr& next( unsigned int nLevel )
{
assert( nLevel < height() );
- assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr<atomic_marked_ptr *>() ));
+ assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
}
atomic_marked_ptr const& next( unsigned int nLevel ) const
{
assert( nLevel < height() );
- assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr<atomic_marked_ptr *>() );
+ assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
}
/// Clears internal links
void clear()
{
- assert( m_arrNext == null_ptr<atomic_marked_ptr *>());
+ assert( m_arrNext == nullptr );
m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
}
bool is_cleared() const
{
return m_pNext == atomic_marked_ptr()
- && m_arrNext == null_ptr<atomic_marked_ptr *>()
+ && m_arrNext == nullptr
&& m_nHeight <= 1
;
}
static node_type * make_tower( node_type * pNode, unsigned int nHeight )
{
if ( nHeight > 1 )
- pNode->make_tower( nHeight, tower_allocator().NewArray( nHeight - 1, null_ptr<node_type *>() ));
+ pNode->make_tower( nHeight, tower_allocator().NewArray( nHeight - 1, nullptr ) );
return pNode;
}
public:
/// Constructs a node of height 1 (a bottom-list node)
node()
- : m_pNext( null_ptr<node *>())
+ : m_pNext( nullptr )
, m_nHeight(1)
- , m_arrNext( null_ptr<atomic_marked_ptr *>())
+ , m_arrNext( nullptr )
, m_bDel( false )
{}
void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
{
assert( nHeight > 0 );
- assert( ( nHeight == 1 && nextTower == null_ptr<atomic_marked_ptr *>() ) // bottom-list node
- || ( nHeight > 1 && nextTower != null_ptr<atomic_marked_ptr *>() ) // node at level of more than 0
+ assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
+ || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
);
m_arrNext = nextTower;
unsigned int nHeight = m_nHeight - 1;
atomic_marked_ptr * pTower = m_arrNext;
if ( pTower ) {
- m_arrNext = null_ptr<atomic_marked_ptr *>();
+ m_arrNext = nullptr;
m_nHeight = 1;
for ( unsigned int i = 0; i < nHeight; ++i )
pTower[i].store( marked_ptr(), CDS_ATOMIC::memory_order_release );
atomic_marked_ptr& next( unsigned int nLevel )
{
assert( nLevel < height() );
- assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr<atomic_marked_ptr *>() ));
+ assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
}
atomic_marked_ptr const& next( unsigned int nLevel ) const
{
assert( nLevel < height() );
- assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr<atomic_marked_ptr *>()) );
+ assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
}
public: // for internal use only!!!
iterator( node_type& refHead )
- : m_pNode( null_ptr<node_type *>() )
+ : m_pNode( nullptr )
{
back_off bkoff;
public:
iterator()
- : m_pNode( null_ptr<node_type *>())
+ : m_pNode( nullptr )
{}
iterator( iterator const& s)
value_type * operator ->() const
{
- assert( m_pNode != null_ptr< node_type *>() );
- assert( node_traits::to_value_ptr( m_pNode ) != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
+ assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
return node_traits::to_value_ptr( m_pNode );
}
value_ref operator *() const
{
- assert( m_pNode != null_ptr< node_type *>() );
- assert( node_traits::to_value_ptr( m_pNode ) != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
+ assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
return *node_traits::to_value_ptr( m_pNode );
}
static void dispose_node( value_type * pVal )
{
- assert( pVal != null_ptr<value_type *>() );
+ assert( pVal != nullptr );
typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
disposer()( pVal );
}
goto retry;
}
- if ( pCur.ptr() == null_ptr<node_type *>()) {
+ if ( pCur.ptr() == nullptr ) {
// end of the list at level nLevel - goto next level
break;
}
pos.pSucc[ nLevel ] = pCur.ptr();
}
- return (pos.pCur = pCur.ptr()) != null_ptr<node_type *>();
+ return (pos.pCur = pCur.ptr()) != nullptr;
}
bool find_max_position( position& pos )
goto retry;
}
- if ( pCur.ptr() == null_ptr<node_type *>()) {
+ if ( pCur.ptr() == nullptr ) {
// end of the list at level nLevel - goto next level
break;
}
pos.pSucc[ nLevel ] = pCur.ptr();
}
- return (pos.pCur = pCur.ptr()) != null_ptr<node_type *>();
+ return (pos.pCur = pCur.ptr()) != nullptr;
}
template <typename Func>
template <typename Func>
bool try_remove_at( node_type * pDel, position& pos, Func f )
{
- assert( pDel != null_ptr<node_type *>());
+ assert( pDel != nullptr );
marked_node_ptr pSucc;
typename gc::Guard gSucc;
node_type * pNode = node_traits::to_node_ptr( val );
scoped_node_ptr scp( pNode );
unsigned int nHeight = pNode->height();
- bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr<atomic_node_ptr *>();
+ bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
position pos;
node_type * pNode = node_traits::to_node_ptr( val );
scoped_node_ptr scp( pNode );
unsigned int nHeight = pNode->height();
- bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr<atomic_node_ptr *>();
+ bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
# ifndef CDS_CXX11_LAMBDA_SUPPORT
/// Checks if the set is empty
bool empty() const
{
- return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr<node_type *>();
+ return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
}
/// Clears the set (non-atomic)
public:
/// Constructs a node of height 1 (a bottom-list node)
node()
- : m_pNext( null_ptr<node *>())
+ : m_pNext( nullptr )
, m_nHeight(1)
- , m_arrNext( null_ptr<atomic_ptr *>())
+ , m_arrNext( nullptr )
{}
/// Constructs a node of height \p nHeight
void make_tower( unsigned int nHeight, atomic_ptr * nextTower )
{
assert( nHeight > 0 );
- assert( ( nHeight == 1 && nextTower == null_ptr<atomic_ptr *>() ) // bottom-list node
- || ( nHeight > 1 && nextTower != null_ptr<atomic_ptr *>() ) // node at level of more than 0
+ assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
+ || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
);
m_arrNext = nextTower;
atomic_ptr * release_tower()
{
atomic_ptr * pTower = m_arrNext;
- m_arrNext = null_ptr<atomic_ptr *>();
+ m_arrNext = nullptr;
m_nHeight = 1;
return pTower;
}
atomic_ptr& next( unsigned int nLevel )
{
assert( nLevel < height() );
- assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr<atomic_ptr *>() ));
+ assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
}
atomic_ptr const& next( unsigned int nLevel ) const
{
assert( nLevel < height() );
- assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr<atomic_ptr *>() );
+ assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
}
/// Clears internal links
void clear()
{
- assert( m_arrNext == null_ptr<atomic_ptr *>());
- m_pNext.store( null_ptr<node *>(), CDS_ATOMIC::memory_order_release );
+ assert( m_arrNext == nullptr );
+ m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release );
}
bool is_cleared() const
{
- return m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == null_ptr<atomic_ptr *>()
- && m_arrNext == null_ptr<atomic_ptr *>()
+ return m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr
+ && m_arrNext == nullptr
&& m_nHeight <= 1
;
}
public:
iterator()
- : m_pNode( null_ptr<node_type *>())
+ : m_pNode( nullptr )
{}
iterator( iterator const& s)
value_type * operator ->() const
{
- assert( m_pNode != null_ptr< node_type *>() );
- assert( node_traits::to_value_ptr( m_pNode ) != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
+ assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
return node_traits::to_value_ptr( m_pNode );
}
value_ref operator *() const
{
- assert( m_pNode != null_ptr< node_type *>() );
- assert( node_traits::to_value_ptr( m_pNode ) != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
+ assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
return *node_traits::to_value_ptr( m_pNode );
}
head_node( unsigned int nHeight )
{
for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i )
- m_Tower[i].store( null_ptr<node_type *>(), CDS_ATOMIC::memory_order_relaxed );
+ m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
node_type::make_tower( nHeight, m_Tower );
}
void clear()
{
for (unsigned int i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i )
- m_Tower[i].store( null_ptr<node_type *>(), CDS_ATOMIC::memory_order_relaxed );
- node_type::m_pNext.store( null_ptr<node_type *>(), CDS_ATOMIC::memory_order_relaxed );
+ m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ node_type::m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
}
};
//@endcond
static void dispose_node( node_type * pNode )
{
- assert( pNode != null_ptr<node_type *>() );
+ assert( pNode != nullptr );
typename node_builder::node_disposer()( pNode );
disposer()( node_traits::to_value_ptr( pNode ));
}
{
node_type * pPred;
node_type * pSucc;
- node_type * pCur = null_ptr<node_type *>();
+ node_type * pCur = nullptr;
int nCmp = 1;
unsigned int nHeight = pNode->height();
for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel )
- pNode->next(nLevel).store( null_ptr<node_type *>(), memory_model::memory_order_relaxed );
+ pNode->next( nLevel ).store( nullptr, memory_model::memory_order_relaxed );
{
node_type * p = pos.pSucc[0];
}
for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
- node_type * p = null_ptr<node_type *>();
+ node_type * p = nullptr;
while ( true ) {
node_type * q = pos.pSucc[ nLevel ];
}
else {
m_Stat.onFindFastFailed();
- return null_ptr<node_type *>();
+ return nullptr;
}
}
node_type * pNode = node_traits::to_node_ptr( val );
scoped_node_ptr scp( pNode );
unsigned int nHeight = pNode->height();
- bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr<atomic_node_ptr *>();
+ bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
position pos;
node_type * pNode = node_traits::to_node_ptr( val );
scoped_node_ptr scp( pNode );
unsigned int nHeight = pNode->height();
- bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr<atomic_node_ptr *>();
+ bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
# ifndef CDS_CXX11_LAMBDA_SUPPORT
template <typename Q, typename Func>
bool find( Q& val, Func f ) const
{
- return find_with_( val, key_comparator(), f ) != null_ptr<node_type *>();
+ return find_with_( val, key_comparator(), f ) != nullptr;
}
/// Finds the key \p val using \p pred predicate for comparing
template <typename Q, typename Less, typename Func>
bool find_with( Q& val, Less pred, Func f ) const
{
- return find_with_( val, cds::opt::details::make_comparator_from_less<Less>(), f ) != null_ptr<node_type *>();
+ return find_with_( val, cds::opt::details::make_comparator_from_less<Less>(), f ) != nullptr;
}
/// Finds the key \p val
template <typename Q, typename Func>
bool find( Q const& val, Func f ) const
{
- return find_with_( val, key_comparator(), f ) != null_ptr<node_type *>();
+ return find_with_( val, key_comparator(), f ) != nullptr;
}
/// Finds the key \p val using \p pred predicate for comparing
template <typename Q, typename Less, typename Func>
bool find_with( Q const& val, Less pred, Func f ) const
{
- return find_with_( val, cds::opt::details::make_comparator_from_less<Less>(), f ) != null_ptr<node_type *>();
+ return find_with_( val, cds::opt::details::make_comparator_from_less<Less>(), f ) != nullptr;
}
/// Finds the key \p val
# endif
if ( pNode )
return node_traits::to_value_ptr( pNode );
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Finds the key \p val using \p pred predicate for comparing
# endif
if ( pNode )
return node_traits::to_value_ptr( pNode );
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Gets minimum key from the set
pPred = pCur;
}
}
- return pPred && pPred != m_Head.head() ? node_traits::to_value_ptr( pPred ) : null_ptr<value_type *>();
+ return pPred && pPred != m_Head.head() ? node_traits::to_value_ptr( pPred ) : nullptr;
}
/// Clears the set (non-atomic)
/// Checks if the set is empty
bool empty() const
{
- return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr<node_type *>();
+ return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
}
/// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
public:
/// Constructs a node of height 1 (a bottom-list node)
node()
- : m_pNext( null_ptr<node *>())
- , m_pDelChain( null_ptr<node *>())
+ : m_pNext( nullptr )
+ , m_pDelChain( nullptr )
# ifdef _DEBUG
, m_bLinked( false )
, m_bUnlinked( false )
# endif
, m_nHeight(1)
- , m_arrNext( null_ptr<atomic_marked_ptr *>())
+ , m_arrNext( nullptr )
{}
# ifdef _DEBUG
void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
{
assert( nHeight > 0 );
- assert( ( nHeight == 1 && nextTower == null_ptr<atomic_marked_ptr *>() ) // bottom-list node
- || ( nHeight > 1 && nextTower != null_ptr<atomic_marked_ptr *>() ) // node at level of more than 0
+ assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
+ || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
);
m_arrNext = nextTower;
atomic_marked_ptr * release_tower()
{
atomic_marked_ptr * pTower = m_arrNext;
- m_arrNext = null_ptr<atomic_marked_ptr *>();
+ m_arrNext = nullptr;
m_nHeight = 1;
return pTower;
}
atomic_marked_ptr& next( unsigned int nLevel )
{
assert( nLevel < height() );
- assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr<atomic_marked_ptr *>() ));
+ assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
}
atomic_marked_ptr const& next( unsigned int nLevel ) const
{
assert( nLevel < height() );
- assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr<atomic_marked_ptr *>() );
+ assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
}
/// Clears internal links
void clear()
{
- assert( m_arrNext == null_ptr<atomic_marked_ptr *>());
+ assert( m_arrNext == nullptr );
m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
- m_pDelChain = null_ptr<node *>();
+ m_pDelChain = nullptr;
}
bool is_cleared() const
{
return m_pNext == atomic_marked_ptr()
- && m_arrNext == null_ptr<atomic_marked_ptr *>()
+ && m_arrNext == nullptr
&& m_nHeight <= 1;
}
};
public: // for internal use only!!!
iterator( node_type& refHead )
- : m_pNode( null_ptr<node_type *>() )
+ : m_pNode( nullptr )
{
// RCU should be locked before iterating!!!
assert( gc::is_locked() );
public:
iterator()
- : m_pNode( null_ptr<node_type *>())
+ : m_pNode( nullptr )
{
// RCU should be locked before iterating!!!
assert( gc::is_locked() );
value_type * operator ->() const
{
- assert( m_pNode != null_ptr< node_type *>() );
- assert( node_traits::to_value_ptr( m_pNode ) != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
+ assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
return node_traits::to_value_ptr( m_pNode );
}
value_ref operator *() const
{
- assert( m_pNode != null_ptr< node_type *>() );
- assert( node_traits::to_value_ptr( m_pNode ) != null_ptr<value_type *>() );
+ assert( m_pNode != nullptr );
+ assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
return *node_traits::to_value_ptr( m_pNode );
}
node_type * pDelChain;
position()
- : pDelChain( null_ptr<node_type *>())
+ : pDelChain( nullptr )
{}
# ifdef _DEBUG
~position()
{
- assert( pDelChain == null_ptr<node_type *>());
+ assert( pDelChain == nullptr );
}
# endif
};
goto retry;
}
- if ( pCur.ptr() == null_ptr<node_type *>()) {
+ if ( pCur.ptr() == nullptr ) {
// end of the list at level nLevel - goto next level
break;
}
pos.pPrev[ nLevel ] = pPred;
pos.pSucc[ nLevel ] = pCur.ptr();
}
- return (pos.pCur = pCur.ptr()) != null_ptr<node_type *>();
+ return (pos.pCur = pCur.ptr()) != nullptr;
}
bool find_max_position( position& pos )
goto retry;
}
- if ( pCur.ptr() == null_ptr<node_type *>()) {
+ if ( pCur.ptr() == nullptr ) {
// end of the list at level nLevel - goto next level
break;
}
pos.pSucc[ nLevel ] = pCur.ptr();
}
- return (pos.pCur = pCur.ptr()) != null_ptr<node_type *>();
+ return (pos.pCur = pCur.ptr()) != nullptr;
}
template <typename Func>
static void link_for_remove( position& pos, node_type * pDel )
{
- assert( pDel->m_pDelChain == null_ptr<node_type *>() );
+ assert( pDel->m_pDelChain == nullptr );
pDel->m_pDelChain = pos.pDelChain;
pos.pDelChain = pDel;
template <typename Func>
bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
{
- assert( pDel != null_ptr<node_type *>());
+ assert( pDel != nullptr );
assert( gc::is_locked() );
marked_node_ptr pSucc;
if ( !find_position( key, pos, cmp, false ) ) {
m_Stat.onExtractFailed();
- pDel = null_ptr<node_type *>();
+ pDel = nullptr;
}
else {
pDel = pos.pCur;
}
else {
m_Stat.onExtractFailed();
- pDel = null_ptr<node_type *>();
+ pDel = nullptr;
}
}
defer_chain( pos );
- return pDel ? node_traits::to_value_ptr(pDel) : null_ptr<value_type *>();
+ return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
}
template <typename ExemptPtr, typename Q>
{
rcu_lock l;
value_type * pDel = do_extract_key( key, key_comparator() );
- bReturn = pDel != null_ptr<value_type *>();
+ bReturn = pDel != nullptr;
if ( bReturn )
result = pDel;
}
{
rcu_lock l;
value_type * pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>() );
- bReturn = pDel != null_ptr<value_type *>();
+ bReturn = pDel != nullptr;
if ( bReturn )
result = pDel;
}
if ( !find_min_position( pos ) ) {
m_Stat.onExtractMinFailed();
- pDel = null_ptr<node_type *>();
+ pDel = nullptr;
}
else {
pDel = pos.pCur;
}
else {
m_Stat.onExtractMinFailed();
- pDel = null_ptr<node_type *>();
+ pDel = nullptr;
}
}
{
rcu_lock l;
node_type * pDel = do_extract_min();
- bReturn = pDel != null_ptr<node_type *>();
+ bReturn = pDel != nullptr;
if ( bReturn )
result = node_traits::to_value_ptr(pDel);
}
if ( !find_max_position( pos ) ) {
m_Stat.onExtractMaxFailed();
- pDel = null_ptr<node_type *>();
+ pDel = nullptr;
}
else {
pDel = pos.pCur;
}
else {
m_Stat.onExtractMaxFailed();
- pDel = null_ptr<node_type *>();
+ pDel = nullptr;
}
}
{
rcu_lock l;
node_type * pDel = do_extract_max();
- bReturn = pDel != null_ptr<node_type *>();
+ bReturn = pDel != nullptr;
if ( bReturn )
result = node_traits::to_value_ptr(pDel);
}
: pCur(p)
{}
deferred_list_iterator()
- : pCur( null_ptr<node_type *>())
+ : pCur( nullptr )
{}
cds::urcu::retired_ptr operator *() const
// Delete local chain
if ( pos.pDelChain ) {
dispose_chain( pos.pDelChain );
- pos.pDelChain = null_ptr<node_type *>();
+ pos.pDelChain = nullptr;
}
// Delete deferred chain
void dispose_deferred()
{
- dispose_chain( m_pDeferredDelChain.exchange( null_ptr<node_type *>(), memory_model::memory_order_acq_rel ));
+ dispose_chain( m_pDeferredDelChain.exchange( nullptr, memory_model::memory_order_acq_rel ) );
}
void defer_chain( position& pos )
pTail->m_pDelChain = pDeferList;
} while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ));
- pos.pDelChain = null_ptr<node_type *>();
+ pos.pDelChain = nullptr;
}
}
SkipListSet()
: m_Head( c_nMaxHeight )
, m_nHeight( c_nMinHeight )
- , m_pDeferredDelChain( null_ptr<node_type *>() )
+ , m_pDeferredDelChain( nullptr )
{
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
node_type * pNode = node_traits::to_node_ptr( val );
scoped_node_ptr scp( pNode );
unsigned int nHeight = pNode->height();
- bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr<atomic_node_ptr *>();
+ bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
rcu_lock rcuLock;
node_type * pNode = node_traits::to_node_ptr( val );
scoped_node_ptr scp( pNode );
unsigned int nHeight = pNode->height();
- bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr<atomic_node_ptr *>();
+ bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
bool bTowerMade = false;
# ifndef CDS_CXX11_LAMBDA_SUPPORT
# ifdef CDS_CXX11_LAMBDA_SUPPORT
value_type * pFound;
return do_find_with( val, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; } )
- ? pFound : null_ptr<value_type *>();
+ ? pFound : nullptr;
# else
get_functor gf;
return do_find_with( val, key_comparator(), cds::ref(gf) )
- ? gf.pFound : null_ptr<value_type *>();
+ ? gf.pFound : nullptr;
# endif
}
value_type * pFound;
return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(),
[&pFound](value_type& found, Q const& ) { pFound = &found; } )
- ? pFound : null_ptr<value_type *>();
+ ? pFound : nullptr;
# else
get_functor gf;
return do_find_with( val, cds::opt::details::make_comparator_from_less<Less>(), cds::ref(gf) )
- ? gf.pFound : null_ptr<value_type *>();
+ ? gf.pFound : nullptr;
# endif
}
/// Checks if the set is empty
bool empty() const
{
- return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr<node_type *>();
+ return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
}
/// Clears the set (non-atomic)
public:
bool insert_at( dummy_node_type * pHead, value_type& val )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::insert_at( h, val );
}
template <typename Func>
bool insert_at( dummy_node_type * pHead, value_type& val, Func f )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::insert_at( h, val, f );
}
template <typename Func>
std::pair<bool, bool> ensure_at( dummy_node_type * pHead, value_type& val, Func func )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::ensure_at( h, val, func );
}
bool unlink_at( dummy_node_type * pHead, value_type& val )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::unlink_at( h, val );
}
template <typename Q, typename Compare, typename Func>
bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::erase_at( h, val, cmp, f );
}
template <typename Q, typename Compare>
bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::erase_at( h, val, cmp );
}
template <typename Q, typename Compare>
bool extract_at( dummy_node_type * pHead, typename gc::Guard& guard, split_list::details::search_value_type<Q> const& val, Compare cmp )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::extract_at( h, guard, val, cmp );
}
template <typename Q, typename Compare, typename Func>
bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::find_at( h, val, cmp, f );
}
template <typename Q, typename Compare>
bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::find_at( h, val, cmp );
}
template <typename Q, typename Compare>
bool get_at( dummy_node_type * pHead, typename gc::Guard& guard, split_list::details::search_value_type<Q> const& val, Compare cmp )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::get_at( h, guard, val, cmp );
}
size_t nParent = parent_bucket( nBucket );
dummy_node_type * pParentBucket = m_Buckets.bucket( nParent );
- if ( pParentBucket == null_ptr<dummy_node_type *>() ) {
+ if ( pParentBucket == nullptr ) {
pParentBucket = init_bucket( nParent );
}
- assert( pParentBucket != null_ptr<dummy_node_type *>() );
+ assert( pParentBucket != nullptr );
// Allocate a dummy node for new bucket
{
back_off bkoff;
while ( true ) {
dummy_node_type volatile * p = m_Buckets.bucket( nBucket );
- if ( p != null_ptr<dummy_node_type volatile *>() )
+ if ( p != nullptr )
return const_cast<dummy_node_type *>( p );
bkoff();
}
size_t nBucket = bucket_no( nHash );
dummy_node_type * pHead = m_Buckets.bucket( nBucket );
- if ( pHead == null_ptr<dummy_node_type *>() )
+ if ( pHead == nullptr )
pHead = init_bucket( nBucket );
assert( pHead->is_dummy() );
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
# ifdef CDS_CXX11_LAMBDA_SUPPORT
return m_List.find_at( pHead, sv, cmp,
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
return m_List.find_at( pHead, sv, cmp );
}
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
return m_List.get_at( pHead, guard, sv, cmp );
}
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
if ( m_List.erase_at( pHead, sv, cmp, f )) {
--m_ItemCounter;
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
if ( m_List.erase_at( pHead, sv, cmp ) ) {
--m_ItemCounter;
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
if ( m_List.extract_at( pHead, guard, sv, cmp ) ) {
--m_ItemCounter;
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
if ( m_List.unlink_at( pHead, val ) ) {
--m_ItemCounter;
//@cond
void allocate_table()
{
- m_Table = bucket_table_allocator().NewArray( m_nCapacity, null_ptr<node_type *>() );
+ m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
}
void destroy_table()
void bucket( size_t nBucket, node_type * pNode )
{
assert( nBucket < capacity() );
- assert( bucket(nBucket) == null_ptr<node_type *>() );
+ assert( bucket( nBucket ) == nullptr );
m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
}
segment_type * allocate_table()
{
- return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, null_ptr<table_entry *>() );
+ return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
}
void destroy_table( segment_type * pTable )
table_entry * allocate_segment()
{
- return segment_allocator().NewArray( m_metrics.nSegmentSize, null_ptr<node_type *>() );
+ return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
}
void destroy_segment( table_entry * pSegment )
segment_type * pSegments = m_Segments;
for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
table_entry * pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
- if ( pEntry != null_ptr<table_entry *>() )
+ if ( pEntry != nullptr )
destroy_segment( pEntry );
}
destroy_table( pSegments );
assert( nSegment < m_metrics.nSegmentCount );
table_entry * pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
- if ( pSegment == null_ptr<table_entry *>() )
- return null_ptr<node_type *>() ; // uninitialized bucket
+ if ( pSegment == nullptr )
+ return nullptr; // uninitialized bucket
return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
}
assert( nSegment < m_metrics.nSegmentCount );
segment_type& segment = m_Segments[nSegment];
- if ( segment.load(memory_model::memory_order_relaxed) == null_ptr<table_entry *>() ) {
+ if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
table_entry * pNewSegment = allocate_segment();
- table_entry * pNull = null_ptr<table_entry *>();
+ table_entry * pNull = nullptr;
if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
destroy_segment( pNewSegment );
}
public:
list_iterator insert_at_( dummy_node_type * pHead, value_type& val )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(static_cast<list_node_type *>(pHead));
return base_class::insert_at_( h, val );
}
template <typename Func>
std::pair<list_iterator, bool> ensure_at_( dummy_node_type * pHead, value_type& val, Func func )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(static_cast<list_node_type *>(pHead));
return base_class::ensure_at_( h, val, func );
}
template <typename Q, typename Compare, typename Func>
bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(static_cast<list_node_type *>(pHead));
return base_class::find_at( h, val, cmp, f );
}
template <typename Q, typename Compare>
list_iterator find_at_( dummy_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(static_cast<list_node_type *>(pHead));
return base_class::find_at_( h, val, cmp );
}
size_t nParent = parent_bucket( nBucket );
dummy_node_type * pParentBucket = m_Buckets.bucket( nParent );
- if ( pParentBucket == null_ptr<dummy_node_type *>() ) {
+ if ( pParentBucket == nullptr ) {
pParentBucket = init_bucket( nParent );
}
- assert( pParentBucket != null_ptr<dummy_node_type *>() );
+ assert( pParentBucket != nullptr );
// Allocate a dummy node for new bucket
{
back_off bkoff;
while ( true ) {
dummy_node_type volatile * p = m_Buckets.bucket( nBucket );
- if ( p && p != null_ptr<dummy_node_type volatile *>() )
+ if ( p && p != nullptr )
return const_cast<dummy_node_type *>( p );
bkoff();
}
size_t nBucket = bucket_no( nHash );
dummy_node_type * pHead = m_Buckets.bucket( nBucket );
- if ( pHead == null_ptr<dummy_node_type *>() )
+ if ( pHead == nullptr )
pHead = init_bucket( nBucket );
assert( pHead->is_dummy() );
{
iterator it = find_( val );
if ( it == end() )
- return null_ptr<value_type *>();
+ return nullptr;
return &*it;
}
{
iterator it = find_with_( val, pred );
if ( it == end() )
- return null_ptr<value_type *>();
+ return nullptr;
return &*it;
}
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
return iterator( m_List.find_at_( pHead, sv, typename wrapped_ordered_list::template make_compare_from_less<Less>() ), m_List.end() );
}
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
return iterator( m_List.find_at_( pHead, sv, key_comparator() ), m_List.end() );
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
# ifdef CDS_CXX11_LAMBDA_SUPPORT
return m_List.find_at( pHead, sv, cmp,
[&f](value_type& item, split_list::details::search_value_type<Q>& val){ cds::unref(f)(item, val.val ); });
public:
bool insert_at( dummy_node_type * pHead, value_type& val )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::insert_at( h, val );
}
template <typename Func>
bool insert_at( dummy_node_type * pHead, value_type& val, Func f )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::insert_at( h, val, f );
}
template <typename Func>
std::pair<bool, bool> ensure_at( dummy_node_type * pHead, value_type& val, Func func )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::ensure_at( h, val, func );
}
bool unlink_at( dummy_node_type * pHead, value_type& val )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::unlink_at( h, val );
}
template <typename Q, typename Compare, typename Func>
bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::erase_at( h, val, cmp, f );
}
template <typename Q, typename Compare>
bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::erase_at( h, val, cmp );
}
template <typename Q, typename Compare>
value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::extract_at( h, val, cmp );
}
template <typename Q, typename Compare, typename Func>
bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f ) const
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::find_at( h, val, cmp, f );
}
template <typename Q, typename Compare>
bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp ) const
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::find_at( h, val, cmp );
}
template <typename Q, typename Compare>
value_type * get_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp ) const
{
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::get_at( h, val, cmp );
}
size_t nParent = parent_bucket( nBucket );
dummy_node_type * pParentBucket = m_Buckets.bucket( nParent );
- if ( pParentBucket == null_ptr<dummy_node_type *>() ) {
+ if ( pParentBucket == nullptr ) {
pParentBucket = init_bucket( nParent );
}
- assert( pParentBucket != null_ptr<dummy_node_type *>() );
+ assert( pParentBucket != nullptr );
// Allocate a dummy node for new bucket
{
back_off bkoff;
while ( true ) {
dummy_node_type volatile * p = m_Buckets.bucket( nBucket );
- if ( p != null_ptr<dummy_node_type volatile *>() )
+ if ( p != nullptr )
return const_cast<dummy_node_type *>( p );
bkoff();
}
size_t nBucket = bucket_no( nHash );
dummy_node_type * pHead = m_Buckets.bucket( nBucket );
- if ( pHead == null_ptr<dummy_node_type *>() )
+ if ( pHead == nullptr )
pHead = init_bucket( nBucket );
assert( pHead->is_dummy() );
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
# ifdef CDS_CXX11_LAMBDA_SUPPORT
return m_List.find_at( pHead, sv, cmp,
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
return m_List.find_at( pHead, sv, cmp );
}
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
return m_List.get_at( pHead, sv, cmp );
}
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
value_type * pNode = m_List.extract_at( pHead, sv, cmp );
if ( pNode )
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
if ( m_List.erase_at( pHead, sv, cmp ) ) {
--m_ItemCounter;
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
if ( m_List.erase_at( pHead, sv, cmp, f )) {
--m_ItemCounter;
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
{
size_t nHash = hash_value( val );
dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != null_ptr<dummy_node_type *>() );
+ assert( pHead != nullptr );
if ( m_List.unlink_at( pHead, val ) ) {
--m_ItemCounter;
public:
/// Default ctor. The initial capacity is 16.
StripedSet()
- : m_Buckets( null_ptr<bucket_type *>() )
+ : m_Buckets( nullptr )
, m_nBucketMask( c_nMinimalCapacity - 1 )
, m_MutexPolicy( c_nMinimalCapacity )
{
StripedSet(
size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16.
)
- : m_Buckets( null_ptr<bucket_type *>() )
+ : m_Buckets( nullptr )
, m_nBucketMask( calc_init_capacity(nCapacity) - 1 )
, m_MutexPolicy( m_nBucketMask + 1 )
{
size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16.
,resizing_policy const& resizingPolicy ///< Resizing policy
)
- : m_Buckets( null_ptr<bucket_type *>() )
+ : m_Buckets( nullptr )
, m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 )
, m_MutexPolicy( m_nBucketMask + 1 )
, m_ResizingPolicy( resizingPolicy )
size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16.
,resizing_policy&& resizingPolicy ///< Resizing policy
)
- : m_Buckets( null_ptr<bucket_type *>() )
+ : m_Buckets( nullptr )
, m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 )
, m_MutexPolicy( m_nBucketMask + 1 )
, m_ResizingPolicy( resizingPolicy )
value_type * erase( Q const& key, Func f )
{
iterator it = m_Set.find( key, key_comparator() );
- if ( it == m_Set.end() )
- return null_ptr<value_type *>();
+ if (it == m_Set.end())
+ return nullptr;
value_type& val = *it;
cds::unref(f)( val );
m_Set.erase( it );
value_type * erase( Q const& key, Less pred, Func f )
{
iterator it = m_Set.find( key, pred );
- if ( it == m_Set.end() )
- return null_ptr<value_type *>();
+ if (it == m_Set.end())
+ return nullptr;
value_type& val = *it;
cds::unref(f)( val );
m_Set.erase( it );
{
iterator it = find_key( key, find_predicate() );
if ( it == m_List.end() || key_comparator()( key, *it ) != 0 )
- return null_ptr<value_type *>();
+ return nullptr;
// key exists
value_type& val = *it;
{
iterator it = find_key( key, pred );
if ( it == m_List.end() || pred( key, *it ) || pred( *it, key ) )
- return null_ptr<value_type *>();
+ return nullptr;
// key exists
value_type& val = *it;
{
std::pair< iterator, bool > pos = find_prev_item_cmp( key, cmp );
if ( !pos.second )
- return null_ptr<value_type *>();
+ return nullptr;
// key exists
iterator it = pos.first;
{
iterator it = m_Set.find( key, typename container_type::hasher(), typename container_type::key_equal() );
if ( it == m_Set.end() )
- return null_ptr<value_type *>();
+ return nullptr;
value_type& val = *it;
cds::unref(f)( val );
m_Set.erase( it );
{
iterator it = m_Set.find( key, typename container_type::hasher(), equal_from_compare<Less>(pred) );
if ( it == m_Set.end() )
- return null_ptr<value_type *>();
+ return nullptr;
value_type& val = *it;
cds::unref(f)( val );
m_Set.erase( it );
CDS_ATOMIC::atomic<unsigned int> nStatus; ///< Internal elimination status
operation()
- : pVal( null_ptr<T *>() )
+ : pVal( nullptr )
, nStatus(0)
{}
};
himOp->pVal = op.pVal;
else
op.pVal = himOp->pVal;
- slot.pRec = null_ptr<elimination_rec *>();
+ slot.pRec = nullptr;
slot.lock.unlock();
himOp->nStatus.store( op_collided, CDS_ATOMIC::memory_order_release );
{
slot_scoped_lock l( slot.lock );
if ( slot.pRec == myRec )
- slot.pRec = null_ptr<elimination_rec *>();
+ slot.pRec = nullptr;
}
bool bCollided = op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) == op_collided;
//@cond
void clear_links( node_type * pNode ) CDS_NOEXCEPT
{
- pNode->m_pNext.store( null_ptr<node_type *>(), memory_model::memory_order_relaxed );
+ pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed );
}
template <bool EnableElimination>
public:
/// Constructs empty stack
TreiberStack()
- : m_Top(null_ptr<node_type *>())
+ : m_Top( nullptr )
{
init();
}
\p nCollisionCapacity parameter specifies the capacity of collision array.
*/
TreiberStack( size_t nCollisionCapacity )
- : m_Top(null_ptr<node_type *>())
+ : m_Top( nullptr )
, m_Backoff( nCollisionCapacity )
{
init();
while ( true ) {
node_type * t = guard.protect( m_Top, node_to_value() );
- if ( t == null_ptr<node_type *>() )
- return null_ptr<value_type *>() ; // stack is empty
+ if ( t == nullptr )
+ return nullptr; // stack is empty
node_type * pNext = t->m_pNext.load(memory_model::memory_order_relaxed);
if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { // #2
bool empty() const
{
// http://www.manning-sandbox.com/thread.jspa?threadID=46245&tstart=0
- return m_Top.load(memory_model::memory_order_relaxed) == null_ptr<node_type *>();
+ return m_Top.load( memory_model::memory_order_relaxed ) == nullptr;
}
/// Clear the stack
node_type * pTop;
while ( true ) {
pTop = m_Top.load( memory_model::memory_order_relaxed );
- if ( pTop == null_ptr<node_type *>() )
+ if ( pTop == nullptr )
return;
- if ( m_Top.compare_exchange_weak( pTop, null_ptr<node_type *>(), memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) { // sync-with #1 and #2
+ if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) ) { // sync-with #1 and #2
m_ItemCounter.reset();
break;
}
//@cond
static CDS_CONSTEXPR value_type * free0() CDS_NOEXCEPT
{
- return null_ptr<value_type *>();
+ return nullptr;
}
static CDS_CONSTEXPR value_type * free1() CDS_NOEXCEPT
{
} while ( bkoff(), true );
// No control path reaches this line!
- return null_ptr<value_type *>();
+ return nullptr;
}
/// Synonym of \ref cds_intrusive_TsigasQueue_enqueue "enqueue"
void clear( Disposer f )
{
value_type * pv;
- while ( (pv = pop()) != null_ptr<value_type *>() ) {
+ while ( (pv = pop()) != nullptr ) {
unref(f)( pv );
}
}
*/
value_type * dequeue()
{
- value_type * p = null_ptr<value_type *>();
- return base_class::dequeue( p ) ? p : null_ptr<value_type *>();
+ value_type * p = nullptr;
+ return base_class::dequeue( p ) ? p : nullptr;
}
/// Synonym of \ref enqueue
void clear( Disposer f )
{
value_type * pv;
- while ( (pv = pop()) != null_ptr<value_type *>() ) {
+ while ( (pv = pop()) != nullptr ) {
unref(f)( pv );
}
}
// Only for internal use!!!
array()
- : m_arrLocks( null_ptr<lock_type *>() )
+ : m_arrLocks( nullptr )
, m_nCapacity(0)
{}
array( select_cell_policy const& policy )
- : m_arrLocks( null_ptr<lock_type *>() )
+ : m_arrLocks( nullptr )
, m_nCapacity(0)
, m_SelectCellPolicy( policy )
{}
array(
size_t nCapacity ///< [in] Array size
)
- : m_arrLocks( null_ptr<lock_type *>() )
+ : m_arrLocks( nullptr )
, m_nCapacity( nCapacity )
{
m_arrLocks = create_lock_array( nCapacity );
size_t nCapacity, ///< [in] Array size
select_cell_policy const& policy ///< Cell selection policy (copy-constructible)
)
- : m_arrLocks( null_ptr<lock_type *>() )
+ : m_arrLocks( nullptr )
, m_nCapacity( nCapacity )
, m_SelectCellPolicy( policy )
{
size_t nCapacity, ///< [in] Array size
select_cell_policy&& policy ///< Cell selection policy (move-constructible)
)
- : m_arrLocks( null_ptr<lock_type *>() )
+ : m_arrLocks( nullptr )
, m_nCapacity( nCapacity )
, m_SelectCellPolicy( std::forward<select_cell_policy>( policy ))
{
struct make_null_ptr {
void operator ()(void *& p)
{
- p = null_ptr<void *>();
+ p = nullptr;
}
};
#endif
{
auto_lock al(m_access);
if ( base_class::empty() )
- return null_ptr<T *>();
+ return nullptr;
T& rDesc = base_class::front();
base_class::pop_front();
assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
{
auto_lock al( m_access );
if ( base_class::empty() )
- return null_ptr<T *>();
+ return nullptr;
T& rDesc = base_class::front();
base_class::pop_front();
assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
/// Removes \p pDesc descriptor from the free-list
bool unlink( T * pDesc )
{
- assert( pDesc != null_ptr<T *>() );
+ assert(pDesc != nullptr);
auto_lock al( m_access );
// !inited(pDesc) is equal to "pDesc is being linked to partial list"
if ( !base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) ) {
//@cond
superblock_desc()
- : pSB( null_ptr<byte *>() )
- , pProcHeap( null_ptr<processor_heap_base *>() )
+ : pSB(nullptr)
+ , pProcHeap( nullptr )
{}
//@endcond
};
public:
CDS_CONSTEXPR active_tag() CDS_NOEXCEPT
- : pDesc(null_ptr<superblock_desc *>())
+ : pDesc( nullptr )
, nCredits(0)
{}
void clear()
{
- pDesc = null_ptr<superblock_desc *>();
+ pDesc = nullptr;
nCredits = 0;
}
public:
active_tag() CDS_NOEXCEPT
- : pDesc( null_ptr<superblock_desc *>() )
+ : pDesc( nullptr )
{}
# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
// Clang 3.1: error: first argument to atomic operation must be a pointer to a trivially-copyable type
//@cond
processor_heap_base() CDS_NOEXCEPT
- : pProcDesc( null_ptr<processor_desc *>() )
- , pSizeClass( null_ptr<size_class *>() )
- , pPartial( null_ptr<superblock_desc *>() )
+ : pProcDesc( nullptr )
+ , pSizeClass( nullptr )
+ , pPartial( nullptr )
{
assert( (reinterpret_cast<uptr_atomic_t>(this) & (c_nAlignment - 1)) == 0 );
}
pDesc = partialList.pop();
break;
}
- } while ( !pPartial.compare_exchange_weak( pDesc, null_ptr<superblock_desc *>(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed));
+ } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
//assert( pDesc == NULL || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
//assert( pDesc == NULL || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
assert( pPartial != pDesc );
//assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
- superblock_desc * pCur = null_ptr<superblock_desc *>();
+ superblock_desc * pCur = nullptr;
if ( !pPartial.compare_exchange_strong(pCur, pDesc, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed) )
partialList.push( pDesc );
}
//@cond
processor_desc()
- : arrProcHeap( null_ptr<processor_heap *>() )
- , pageHeaps( null_ptr<page_heap *>() )
+ : arrProcHeap( nullptr )
+ , pageHeaps( nullptr )
{}
//@endcond
};
++nCollision;
oldActive = pProcHeap->active.load(CDS_ATOMIC::memory_order_acquire);
if ( !oldActive.ptr() )
- return null_ptr<block_header *>();
+ return nullptr;
unsigned int nCredits = oldActive.credits();
active_tag newActive ; // default = 0
if ( nCredits != 0 ) {
retry:
superblock_desc * pDesc = pProcHeap->get_partial();
if ( !pDesc )
- return null_ptr<block_header *>();
+ return nullptr;
// reserve blocks
anchor_tag oldAnchor;
block_header * alloc_from_new_superblock( processor_heap * pProcHeap )
{
superblock_desc * pDesc = new_superblock_desc( pProcHeap );
- assert( pDesc != null_ptr<superblock_desc *>() );
+ assert( pDesc != nullptr );
pDesc->pSB = new_superblock_buffer( pProcHeap );
anchor_tag anchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_relaxed);
}
free_superblock( pDesc );
- return null_ptr<block_header *>();
+ return nullptr;
}
/// Find appropriate processor heap based on size-class selected
(pDesc->pageHeaps + i)->page_heap::~page_heap();
//m_IntHeap.free( pDesc->pageHeaps );
- pDesc->pageHeaps = null_ptr<page_heap *>();
+ pDesc->pageHeaps = nullptr;
pDesc->processor_desc::~processor_desc();
m_AlignedHeap.free( pDesc );
{
anchor_tag anchor;
superblock_desc * pDesc = pProcHeap->pProcDesc->listSBDescFree.pop();
- if ( pDesc == null_ptr<superblock_desc *>() ) {
+ if ( pDesc == nullptr ) {
pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 );
if ( !pProcHeap )
return alloc_from_OS( nSize );
- if ( (pBlock = alloc_from_active( pProcHeap )) != null_ptr<block_header *>() )
+ if ( (pBlock = alloc_from_active( pProcHeap )) != nullptr )
break;
- if ( (pBlock = alloc_from_partial( pProcHeap )) != null_ptr<block_header *>() )
+ if ( (pBlock = alloc_from_partial( pProcHeap )) != nullptr )
break;
- if ( (pBlock = alloc_from_new_superblock( pProcHeap )) != null_ptr<block_header *>() )
+ if ( (pBlock = alloc_from_new_superblock( pProcHeap )) != nullptr )
break;
}
pProcHeap->stat.incAllocatedBytes( pProcHeap->pSizeClass->nBlockSize );
- assert( pBlock != null_ptr<block_header *>() );
+ assert( pBlock != nullptr );
return pBlock;
}
free_superblock( pDesc );
}
else if (oldAnchor.state == SBSTATE_FULL ) {
- assert( pProcHeap != null_ptr<processor_heap_base *>() );
+ assert( pProcHeap != nullptr );
pProcHeap->stat.decDescFull();
pProcHeap->add_partial( pDesc );
}
{
if ( nNewSize == 0 ) {
free( pMemory );
- return null_ptr<void *>();
+ return nullptr;
}
const size_t nOrigSize = nNewSize;
// Reallocation of aligned block is not possible
if ( pBlock->isAligned() ) {
assert( false );
- return null_ptr<void *>();
+ return nullptr;
}
if ( pBlock->isOSAllocated() ) {
return pNew;
}
- return null_ptr<void *>();
+ return nullptr;
}
/// Allocate aligned memory block
public:
/// Initializes the generator of arbitrary length \p nLength
random_shuffle_permutation( size_t nLength )
- : m_pCur( null_ptr<integer_type *>() )
+ : m_pCur( nullptr )
{
m_pFirst = new integer_type[nLength];
m_pLast = m_pFirst + nLength;
if (cds::gc::HP::isUsed() )
m_hpManager = new (m_hpManagerPlaceholder) cds::gc::HP::thread_gc_impl;
else
- m_hpManager = null_ptr<cds::gc::HP::thread_gc_impl *>();
+ m_hpManager = nullptr;
if ( cds::gc::HRC::isUsed() )
m_hrcManager = new (m_hrcManagerPlaceholder) cds::gc::HRC::thread_gc_impl;
else
- m_hrcManager = null_ptr<cds::gc::HRC::thread_gc_impl *>();
+ m_hrcManager = nullptr;
if ( cds::gc::PTB::isUsed() )
m_ptbManager = new (m_ptbManagerPlaceholder) cds::gc::PTB::thread_gc_impl;
else
- m_ptbManager = null_ptr<cds::gc::PTB::thread_gc_impl *>();
+ m_ptbManager = nullptr;
}
~ThreadData()
if ( m_hpManager ) {
typedef cds::gc::HP::thread_gc_impl hp_thread_gc_impl;
m_hpManager->~hp_thread_gc_impl();
- m_hpManager = null_ptr<cds::gc::HP::thread_gc_impl *>();
+ m_hpManager = nullptr;
}
if ( m_hrcManager ) {
typedef cds::gc::HRC::thread_gc_impl hrc_thread_gc_impl;
m_hrcManager->~hrc_thread_gc_impl();
- m_hrcManager = null_ptr<cds::gc::HRC::thread_gc_impl *>();
+ m_hrcManager = nullptr;
}
if ( m_ptbManager ) {
typedef cds::gc::PTB::thread_gc_impl ptb_thread_gc_impl;
m_ptbManager->~ptb_thread_gc_impl();
- m_ptbManager = null_ptr<cds::gc::PTB::thread_gc_impl *>();
+ m_ptbManager = nullptr;
}
assert( m_pGPIRCU == NULL );
CDS_ATOMIC::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
thread_list_record()
- : m_pNext( null_ptr<ThreadData *>() )
+ : m_pNext( nullptr )
, m_idOwner( cds::OS::nullThreadId() )
{}
public:
thread_list()
- : m_pHead( null_ptr<thread_record *>())
+ : m_pHead( nullptr )
{}
~thread_list()
void retire( thread_record * pRec )
{
- assert( pRec != null_ptr<thread_record *>() );
+ assert( pRec != nullptr );
pRec->m_list.m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release );
}
void detach_all()
{
- thread_record * pNext = null_ptr<thread_record *>();
+ thread_record * pNext = nullptr;
cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId();
for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) {
CDS_DEBUG_DO( cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId() ;)
CDS_DEBUG_DO( cds::OS::ThreadId const mainThreadId = cds::OS::getCurrentThreadId() ;)
- thread_record * p = m_pHead.exchange( null_ptr<thread_record *>(), CDS_ATOMIC::memory_order_seq_cst );
+ thread_record * p = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_seq_cst );
while ( p ) {
thread_record * pNext = p->m_list.m_pNext;
inline void gp_thread_gc<RCUtag>::access_lock()
{
thread_record * pRec = get_thread_record();
- assert( pRec != null_ptr<thread_record *>());
+ assert( pRec != nullptr );
uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
inline void gp_thread_gc<RCUtag>::access_unlock()
{
thread_record * pRec = get_thread_record();
- assert( pRec != null_ptr<thread_record *>());
+ assert( pRec != nullptr );
//CDS_COMPILER_RW_BARRIER;
pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
inline bool gp_thread_gc<RCUtag>::is_locked()
{
thread_record * pRec = get_thread_record();
- assert( pRec != null_ptr<thread_record *>());
+ assert( pRec != nullptr );
return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
}
/// Checks if the singleton is created and ready to use
static bool isUsed()
{
- return singleton_ptr::s_pRCU != null_ptr<singleton_vtbl *>();
+ return singleton_ptr::s_pRCU != nullptr;
}
protected:
if ( bDetachAll )
instance()->m_ThreadList.detach_all();
delete instance();
- singleton_ptr::s_pRCU = null_ptr<singleton_vtbl *>();
+ singleton_ptr::s_pRCU = nullptr;
}
}
/// Checks if the singleton is created and ready to use
static bool isUsed()
{
- return singleton_ptr::s_pRCU != null_ptr<singleton_vtbl *>();
+ return singleton_ptr::s_pRCU != nullptr;
}
protected:
if ( bDetachAll )
instance()->m_ThreadList.detach_all();
delete instance();
- singleton_ptr::s_pRCU = null_ptr<singleton_vtbl *>();
+ singleton_ptr::s_pRCU = nullptr;
}
}
/// Checks if the singleton is created and ready to use
static bool isUsed()
{
- return singleton_ptr::s_pRCU != null_ptr<singleton_vtbl *>();
+ return singleton_ptr::s_pRCU != nullptr;
}
protected:
pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
delete pThis;
- singleton_ptr::s_pRCU = null_ptr<singleton_vtbl *>();
+ singleton_ptr::s_pRCU = nullptr;
}
}
inline void sh_thread_gc<RCUtag>::access_lock()
{
thread_record * pRec = get_thread_record();
- assert( pRec != null_ptr<thread_record *>());
+ assert( pRec != nullptr );
uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
inline void sh_thread_gc<RCUtag>::access_unlock()
{
thread_record * pRec = get_thread_record();
- assert( pRec != null_ptr<thread_record *>());
+ assert( pRec != nullptr);
CDS_COMPILER_RW_BARRIER;
pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
inline bool sh_thread_gc<RCUtag>::is_locked()
{
thread_record * pRec = get_thread_record();
- assert( pRec != null_ptr<thread_record *>());
+ assert( pRec != nullptr);
return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
}
sigact.sa_flags = SA_SIGINFO;
sigemptyset( &sigact.sa_mask );
//sigaddset( &sigact.sa_mask, m_nSigNo );
- sigaction( m_nSigNo, &sigact, null_ptr<struct sigaction *>() );
+ sigaction( m_nSigNo, &sigact, nullptr );
sigaddset( &sigact.sa_mask, m_nSigNo );
pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, NULL );
/// Checks if the singleton is created and ready to use
static bool isUsed()
{
- return singleton_ptr::s_pRCU != null_ptr<singleton_vtbl *>();
+ return singleton_ptr::s_pRCU != nullptr;
}
protected:
if ( bDetachAll )
instance()->m_ThreadList.detach_all();
delete instance();
- singleton_ptr::s_pRCU = null_ptr<singleton_vtbl *>();
+ singleton_ptr::s_pRCU = nullptr;
}
}
/// Checks if the singleton is created and ready to use
static bool isUsed()
{
- return singleton_ptr::s_pRCU != null_ptr<singleton_vtbl *>();
+ return singleton_ptr::s_pRCU != nullptr;
}
protected:
pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
delete pThis;
- singleton_ptr::s_pRCU = null_ptr<singleton_vtbl *>();
+ singleton_ptr::s_pRCU = nullptr;
}
}
bQuit = m_bQuit;
nCurEpoch = m_nCurEpoch;
pBuffer = m_pBuffer;
- m_pBuffer = null_ptr<buffer_type *>();
+ m_pBuffer = nullptr;
}
if ( pBuffer )
public:
//@cond
dispose_thread()
- : m_pBuffer( null_ptr<buffer_type *>() )
+ : m_pBuffer( nullptr )
, m_nCurEpoch(0)
, m_bQuit( false )
, m_bReady( false )
public:
/// Constructs empty pointer
exempt_ptr() CDS_NOEXCEPT
- : m_pNode( null_ptr<node_type *>())
+ : m_pNode( nullptr )
{}
/// Releases the pointer
/// Checks if the pointer is \p NULL
bool empty() const CDS_NOEXCEPT
{
- return m_pNode == null_ptr<node_type *>();
+ return m_pNode == nullptr;
}
/// Dereference operator
value_type * operator->() const CDS_NOEXCEPT
{
- return !empty() ? node_to_value_cast()( m_pNode ) : null_ptr<value_type *>();
+ return !empty() ? node_to_value_cast()(m_pNode) : nullptr;
}
/// Returns a reference to the value
assert( !rcu::is_locked() );
if ( !empty() ) {
rcu::template retire_ptr<disposer>( m_pNode );
- m_pNode = null_ptr<node_type *>();
+ m_pNode = nullptr;
}
}
};
public:
/// Constructs empty pointer
exempt_ptr() CDS_NOEXCEPT
- : m_pNode( null_ptr<node_type *>())
+ : m_pNode( nullptr )
{}
/// Releases the pointer
/// Checks if the pointer is \p NULL
bool empty() const CDS_NOEXCEPT
{
- return m_pNode == null_ptr<node_type *>();
+ return m_pNode == nullptr;
}
/// Dereference operator.
value_type * operator->() const CDS_NOEXCEPT
{
- return !empty() ? m_pNode : null_ptr<value_type *>();
+ return !empty() ? m_pNode : nullptr;
}
/// Returns a reference to the value
assert( !rcu::is_locked() );
if ( !empty() ) {
rcu::template retire_ptr<disposer>( m_pNode );
- m_pNode = null_ptr<node_type *>();
+ m_pNode = nullptr;
}
}
};
namespace cds { namespace gc {
namespace hrc {
- GarbageCollector * GarbageCollector::m_pGC = null_ptr<GarbageCollector *>();
+ GarbageCollector * GarbageCollector::m_pGC = nullptr;
GarbageCollector::GarbageCollector(
size_t nHazardPtrCount,
size_t nMaxThreadCount,
size_t nRetiredNodeArraySize
)
- : m_pListHead( null_ptr<thread_list_node *>()),
+ : m_pListHead( nullptr ),
m_bStatEnabled( true ),
m_nHazardPointerCount( nHazardPtrCount ),
m_nMaxThreadCount( nMaxThreadCount ),
}
delete m_pGC;
- m_pGC = null_ptr<GarbageCollector *>();
+ m_pGC = nullptr;
}
}
assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
ContainerNode * pItem;
for ( size_t n = 0; n < pNode->m_arrRetired.capacity(); ++n ) {
- if ( (pItem = pNode->m_arrRetired[n].m_pNode.load(CDS_ATOMIC::memory_order_relaxed)) != null_ptr<ContainerNode *>() ) {
+ if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed )) != nullptr ) {
pNode->m_arrRetired[n].m_funcFree( pItem );
//pItem->destroy();
- pNode->m_arrRetired[n].m_pNode.store( null_ptr<ContainerNode *>(), CDS_ATOMIC::memory_order_relaxed );
+ pNode->m_arrRetired[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
}
}
assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
return hprec;
}
}
- return null_ptr<GarbageCollector::thread_list_node *>();
+ return nullptr;
}
details::thread_descriptor * GarbageCollector::allocateHRCThreadDesc( ThreadGC * pThreadGC )
after thread termination
*/
assert( pNode->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != cds::OS::nullThreadId() );
- pNode->m_pOwner = null_ptr<ThreadGC *>();
+ pNode->m_pOwner = nullptr;
pNode->m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release );
assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
}
}
// We own threadDesc.
- assert( pRec->m_pOwner == null_ptr<ThreadGC *>() );
+ assert( pRec->m_pOwner == nullptr );
if ( !pRec->m_bFree ) {
// All undeleted pointers is moved to pThis (it is private for the current thread)
details::retired_vector::iterator it = src.begin();
for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
- if ( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr<ContainerNode *>() )
+ if ( it->m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr )
continue;
dest.push( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed), it->m_funcFree );
CDS_DEBUG_DO( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
- m_pListHead.store( null_ptr<hplist_node *>(), CDS_ATOMIC::memory_order_relaxed );
+ m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
hplist_node * pNext = NULL;
for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
assert( (nBucketCount & (nBucketCount - 1)) == 0 );
m_Buckets = allocator_type().NewArray( nBucketCount );
- std::fill( m_Buckets, m_Buckets + nBucketCount, null_ptr<item_type>());
+ std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr );
}
~liberate_set()
void insert( retired_ptr_node& node )
{
- node.m_pNext = null_ptr<item_type>();
+ node.m_pNext = nullptr;
item_type& refBucket = bucket( node );
if ( refBucket ) {
item_type p = refBucket;
do {
if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
- assert( node.m_pNextFree == null_ptr<details::retired_ptr_node *>() );
+ assert( node.m_pNextFree == nullptr );
node.m_pNextFree = p->m_pNextFree;
p->m_pNextFree = &node;
{
item_type& refBucket = bucket( ptr );
item_type p = refBucket;
- item_type pPrev = null_ptr<item_type>();
+ item_type pPrev = nullptr;
while ( p ) {
if ( p->m_ptr.m_p == ptr ) {
pPrev->m_pNext = p->m_pNext;
else
refBucket = p->m_pNext;
- p->m_pNext = null_ptr<item_type>();
+ p->m_pNext = nullptr;
return p;
}
pPrev = p;
p = p->m_pNext;
}
- return null_ptr<item_type>();
+ return nullptr;
}
typedef std::pair<item_type, item_type> list_range;
list_range free_all()
{
- item_type pTail = null_ptr<item_type>();
+ item_type pTail = nullptr;
list_range ret = std::make_pair( pTail, pTail );
item_type const * pEndBucket = m_Buckets + m_nBucketCount;
for (;;) {
item_type pNext = pTail->m_pNext;
pTail->m_ptr.free();
- pTail->m_pNext = null_ptr<item_type>();
+ pTail->m_pNext = nullptr;
while ( pTail->m_pNextFree ) {
pTail = pTail->m_pNextFree;
pTail->m_ptr.free();
- pTail->m_pNext = null_ptr<item_type>();
+ pTail->m_pNext = nullptr;
}
if ( pNext )
}
if ( pTail )
- pTail->m_pNextFree = null_ptr<item_type>();
+ pTail->m_pNextFree = nullptr;
ret.second = pTail;
return ret;
}
liberate();
#if 0
- details::retired_ptr_node * pHead = null_ptr<details::retired_ptr_node *>();
- details::retired_ptr_node * pTail = null_ptr<details::retired_ptr_node *>();
+ details::retired_ptr_node * pHead = nullptr;
+ details::retired_ptr_node * pTail = nullptr;
for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_relaxed)) {
details::guard_data::handoff_ptr h = pGuard->pHandOff;
- pGuard->pHandOff = null_ptr<details::guard_data::handoff_ptr>();
+ pGuard->pHandOff = nullptr;
while ( h ) {
details::guard_data::handoff_ptr pNext = h->m_pNextFree;
if ( h->m_ptr.m_p )
details::retired_ptr_node * pHead = retiredList.first;
while ( pHead ) {
details::retired_ptr_node * pNext = pHead->m_pNext;
- pHead->m_pNextFree = null_ptr<details::retired_ptr_node *>();
+ pHead->m_pNextFree = nullptr;
set.insert( *pHead );
pHead = pNext;
}
m_RetiredAllocator.inc_epoch();
if ( range.first ) {
- assert( range.second != null_ptr<details::retired_ptr_node *>() );
+ assert( range.second != nullptr );
m_RetiredAllocator.free_range( range.first, range.second );
}
else {
#if 0
void GarbageCollector::liberate( details::liberate_set& set )
{
- details::guard_data::handoff_ptr const nullHandOff = null_ptr<details::guard_data::handoff_ptr>();
+ details::guard_data::handoff_ptr const nullHandOff = nullptr;
for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_acquire) )
{
cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
if ( valGuarded == pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire) ) {
if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) {
- h = nullHandOff ; //null_ptr<details::guard_data::handoff_ptr>();
+ h = nullHandOff ; //nullptr;
details::retired_ptr_node * pTail = pGuard->pHandOff;
while ( pTail->m_pNextFree )
pTail = pTail->m_pNextFree;
namespace cds { namespace urcu { namespace details {
- template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_instant_tag >::s_pRCU = null_ptr<singleton_vtbl *>();
- template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_buffered_tag >::s_pRCU = null_ptr<singleton_vtbl *>();
- template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_threaded_tag >::s_pRCU = null_ptr<singleton_vtbl *>();
+ template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_instant_tag >::s_pRCU = nullptr;
+ template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_buffered_tag >::s_pRCU = nullptr;
+ template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_threaded_tag >::s_pRCU = nullptr;
}}} // namespace cds::urcu::details
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
namespace cds { namespace urcu { namespace details {
- template<> CDS_EXPORT_API singleton_vtbl * sh_singleton_instance< signal_buffered_tag >::s_pRCU = null_ptr<singleton_vtbl *>();
- template<> CDS_EXPORT_API singleton_vtbl * sh_singleton_instance< signal_threaded_tag >::s_pRCU = null_ptr<singleton_vtbl *>();
+ template<> CDS_EXPORT_API singleton_vtbl * sh_singleton_instance< signal_buffered_tag >::s_pRCU = nullptr;
+ template<> CDS_EXPORT_API singleton_vtbl * sh_singleton_instance< signal_threaded_tag >::s_pRCU = nullptr;
}}} // namespace cds::urcu::details
{
lock_guard l( m_Lock );
if ( m_List.empty() )
- return cds::null_ptr<T *>();
+ return cds::nullptr;
value_type& v = m_List.front();
m_List.pop_front();
return &v;