From: khizmax Date: Thu, 18 Sep 2014 14:48:43 +0000 (+0400) Subject: replace null_ptr<>() with nullptr X-Git-Tag: v2.0.0~348 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=98aa954aa9d1b640f6f6d81018542eec1c2046bb;p=libcds.git replace null_ptr<>() with nullptr --- diff --git a/cds/algo/elimination.h b/cds/algo/elimination.h index 52595d90..e85207b6 100644 --- a/cds/algo/elimination.h +++ b/cds/algo/elimination.h @@ -50,7 +50,7 @@ namespace cds { namespace algo { /// Releases elimination record for the current thread static inline void clear_record() { - cds::threading::elimination_record().pOp = null_ptr(); + cds::threading::elimination_record().pOp = nullptr; } } // namespace elimination }} // namespace cds::algo diff --git a/cds/algo/elimination_tls.h b/cds/algo/elimination_tls.h index 2b0091f4..545e0a1c 100644 --- a/cds/algo/elimination_tls.h +++ b/cds/algo/elimination_tls.h @@ -19,13 +19,13 @@ namespace cds { namespace algo { namespace elimination { /// Initialization record() - : pOp( null_ptr() ) + : pOp( nullptr ) {} /// Checks if the record is free bool is_free() const { - return pOp == null_ptr(); + return pOp == nullptr; } }; diff --git a/cds/algo/flat_combining.h b/cds/algo/flat_combining.h index 30a8961a..d5b780b6 100644 --- a/cds/algo/flat_combining.h +++ b/cds/algo/flat_combining.h @@ -104,8 +104,8 @@ namespace cds { namespace algo { : nRequest( req_EmptyRecord ) , nState( inactive ) , nAge(0) - , pNext( null_ptr() ) - , pOwner( null_ptr() ) + , pNext( nullptr ) + , pOwner( nullptr ) {} /// Returns the value of \p nRequest field @@ -280,7 +280,7 @@ namespace cds { namespace algo { */ kernel() : m_nCount(0) - , m_pHead( null_ptr< publication_record_type *>()) + , m_pHead( nullptr ) , m_pThreadRec( tls_cleanup ) , m_nCompactFactor( 64 - 1 ) // binary mask , m_nCombinePassCount( 8 ) @@ -294,7 +294,7 @@ namespace cds { namespace algo { ,unsigned int nCombinePassCount ///< Number of combining passes for combiner thread ) : m_nCount(0) - , m_pHead( null_ptr< publication_record_type *>()) + , m_pHead( nullptr ) , m_pThreadRec( tls_cleanup ) , m_nCompactFactor( (unsigned int)( cds::beans::ceil2( nCompactFactor ) - 1 )) // binary mask , m_nCombinePassCount( nCombinePassCount ) @@ -307,7 +307,7 @@ namespace cds { namespace algo { { // mark all publication record as detached for ( publication_record * p = m_pHead; p; p = p->pNext.load( memory_model::memory_order_relaxed )) - p->pOwner = null_ptr(); + p->pOwner = nullptr; } /// Gets publication list record for the current thread @@ -475,7 +475,7 @@ namespace cds { namespace algo { public: /// Initializes an empty iterator object iterator() - : m_pRec( null_ptr()) + : m_pRec( nullptr ) {} /// Copy ctor @@ -554,7 +554,7 @@ namespace cds { namespace algo { void init() { - assert( m_pThreadRec.get() == null_ptr() ); + assert( m_pThreadRec.get() == nullptr ); publication_record_type * pRec = cxx11_allocator().New(); m_pHead = pRec; pRec->pOwner = this; @@ -667,7 +667,7 @@ namespace cds { namespace algo { template bool combining_pass( Container& owner, unsigned int nCurAge ) { - publication_record * pPrev = null_ptr(); + publication_record * pPrev = nullptr; publication_record * p = m_pHead; bool bOpDone = false; while ( p ) { @@ -740,7 +740,7 @@ namespace cds { namespace algo { void compact_list( unsigned int const nCurAge ) { // Thinning publication list - publication_record * pPrev = null_ptr(); + publication_record * pPrev = nullptr; for ( publication_record * p = m_pHead; p; ) { if ( p->nState.load( memory_model::memory_order_acquire ) == active && p->nAge + m_nCompactFactor < nCurAge ) { if ( pPrev ) { diff --git a/cds/container/details/make_skip_list_map.h b/cds/container/details/make_skip_list_map.h index b91c9983..a8b5dbde 100644 --- a/cds/container/details/make_skip_list_map.h +++ b/cds/container/details/make_skip_list_map.h @@ -78,9 +78,9 @@ namespace cds { namespace container { namespace details { unsigned char * pMem = base_class::alloc_space( nHeight ); return new( pMem ) node_type( nHeight, - nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) - : null_ptr(), - key, val ); + nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) : nullptr, + key, val + ); } # ifdef CDS_EMPLACE_SUPPORT template @@ -88,9 +88,10 @@ namespace cds { namespace container { namespace details { { unsigned char * pMem = base_class::alloc_space( nHeight ); return new( pMem ) - node_type( nHeight, nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) - : null_ptr(), - std::forward(args)... ); + node_type( nHeight, + nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) : nullptr, + std::forward(args)... + ); } # endif }; diff --git a/cds/container/ellen_bintree_map_rcu.h b/cds/container/ellen_bintree_map_rcu.h index 6634814f..42aa769d 100644 --- a/cds/container/ellen_bintree_map_rcu.h +++ b/cds/container/ellen_bintree_map_rcu.h @@ -642,7 +642,7 @@ namespace cds { namespace container { value_type * get( Q const& key ) const { leaf_node * pNode = base_class::get( key ); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } /// Finds \p key with \p pred predicate and return the item found @@ -659,7 +659,7 @@ namespace cds { namespace container { { leaf_node * pNode = base_class::get_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } /// Clears the map diff --git a/cds/container/ellen_bintree_set_rcu.h b/cds/container/ellen_bintree_set_rcu.h index 7ca083e1..e606ec3b 100644 --- a/cds/container/ellen_bintree_set_rcu.h +++ b/cds/container/ellen_bintree_set_rcu.h @@ -703,7 +703,7 @@ namespace cds { namespace container { value_type * get( Q const& key ) const { leaf_node * pNode = base_class::get( key ); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } /// Finds \p key with \p pred predicate and return the item found @@ -720,7 +720,7 @@ namespace cds { namespace container { { leaf_node * pNode = base_class::get_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } /// Clears the set (non-atomic) diff --git a/cds/container/lazy_kvlist_impl.h b/cds/container/lazy_kvlist_impl.h index b6a9b868..4b09260f 100644 --- a/cds/container/lazy_kvlist_impl.h +++ b/cds/container/lazy_kvlist_impl.h @@ -292,21 +292,21 @@ namespace cds { namespace container { key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.first; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.second; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Data) : null_ptr(); + return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const @@ -805,7 +805,7 @@ namespace cds { namespace container { //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *p )) { diff --git a/cds/container/lazy_kvlist_nogc.h b/cds/container/lazy_kvlist_nogc.h index 73f572d2..5b8b5c4f 100644 --- a/cds/container/lazy_kvlist_nogc.h +++ b/cds/container/lazy_kvlist_nogc.h @@ -100,7 +100,7 @@ namespace cds { namespace container { node_type * m_pItemFound; ensure_functor() - : m_pItemFound( null_ptr() ) + : m_pItemFound( nullptr ) {} void operator ()(bool, node_type& item, node_type& ) @@ -223,21 +223,21 @@ namespace cds { namespace container { key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.first; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.second; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Data) : null_ptr(); + return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const @@ -533,12 +533,12 @@ namespace cds { namespace container { //@cond node_type * insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *p )) return p.release(); - return null_ptr(); + return nullptr; } template @@ -563,7 +563,7 @@ namespace cds { namespace container { return pNode.release(); } - return null_ptr(); + return nullptr; } @@ -571,7 +571,7 @@ namespace cds { namespace container { std::pair< node_type *, bool > ensure_at( head_type& refHead, const K& key ) { scoped_node_ptr pNode( alloc_node( key )); - node_type * pItemFound = null_ptr(); + node_type * pItemFound = nullptr; # ifdef CDS_CXX11_LAMBDA_SUPPORT std::pair ret = base_class::ensure_at( &refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; } ); @@ -583,7 +583,7 @@ namespace cds { namespace container { if ( ret.first && ret.second ) pNode.release(); - assert( pItemFound != null_ptr() ); + assert( pItemFound != nullptr ); return std::make_pair( pItemFound, ret.second ); } diff --git a/cds/container/lazy_kvlist_rcu.h b/cds/container/lazy_kvlist_rcu.h index bbb592f9..dcc9de86 100644 --- a/cds/container/lazy_kvlist_rcu.h +++ b/cds/container/lazy_kvlist_rcu.h @@ -303,21 +303,21 @@ namespace cds { namespace container { key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.first; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.second; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Data) : null_ptr(); + return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const @@ -821,7 +821,7 @@ namespace cds { namespace container { //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *p )) { @@ -936,7 +936,7 @@ namespace cds { namespace container { value_type * get_at( head_type& refHead, K const& val, Compare cmp ) const { node_type * pNode = base_class::get_at( &refHead, val, cmp ); - return pNode ? &pNode->m_Data : null_ptr(); + return pNode ? &pNode->m_Data : nullptr; } //@endcond diff --git a/cds/container/lazy_list_impl.h b/cds/container/lazy_list_impl.h index 62793198..8b675db3 100644 --- a/cds/container/lazy_list_impl.h +++ b/cds/container/lazy_list_impl.h @@ -302,7 +302,7 @@ namespace cds { namespace container { value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Value) : null_ptr(); + return p ? &(p->m_Value) : nullptr; } value_ref operator *() const @@ -825,7 +825,7 @@ namespace cds { namespace container { //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *pNode )) { diff --git a/cds/container/lazy_list_nogc.h b/cds/container/lazy_list_nogc.h index b10f44c7..642931ab 100644 --- a/cds/container/lazy_list_nogc.h +++ b/cds/container/lazy_list_nogc.h @@ -84,7 +84,7 @@ namespace cds { namespace container { node_type * m_pItemFound; ensure_functor() - : m_pItemFound( null_ptr() ) + : m_pItemFound( nullptr ) {} void operator ()(bool, node_type& item, node_type& ) @@ -185,7 +185,7 @@ namespace cds { namespace container { value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Value) : null_ptr(); + return p ? &(p->m_Value) : nullptr; } value_ref operator *() const @@ -411,12 +411,12 @@ namespace cds { namespace container { //@cond node_type * insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *p )) return p.release(); - return null_ptr(); + return nullptr; } template @@ -436,7 +436,7 @@ namespace cds { namespace container { std::pair< node_type *, bool > ensure_at( head_type& refHead, Q const& val ) { scoped_node_ptr pNode( alloc_node( val )); - node_type * pItemFound = null_ptr(); + node_type * pItemFound = nullptr; # ifdef CDS_CXX11_LAMBDA_SUPPORT std::pair ret = base_class::ensure_at( &refHead, *pNode, @@ -446,7 +446,7 @@ namespace cds { namespace container { std::pair ret = base_class::ensure_at( &refHead, *pNode, boost::ref(func) ); pItemFound = func.m_pItemFound; # endif - assert( pItemFound != null_ptr() ); + assert( pItemFound != nullptr ); if ( ret.first && ret.second ) pNode.release(); diff --git a/cds/container/lazy_list_rcu.h b/cds/container/lazy_list_rcu.h index 4332a8f1..f93d2d5c 100644 --- a/cds/container/lazy_list_rcu.h +++ b/cds/container/lazy_list_rcu.h @@ -313,7 +313,7 @@ namespace cds { namespace container { value_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Value) : null_ptr(); + return p ? &(p->m_Value) : nullptr; } value_ref operator *() const @@ -854,7 +854,7 @@ namespace cds { namespace container { //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( &refHead, *pNode )) { @@ -986,7 +986,7 @@ namespace cds { namespace container { value_type * get_at( head_type& refHead, Q const& val, Compare cmp ) const { node_type * pNode = base_class::get_at( &refHead, val, cmp ); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } //@endcond diff --git a/cds/container/michael_deque.h b/cds/container/michael_deque.h index 1d80e502..da9b7cb8 100644 --- a/cds/container/michael_deque.h +++ b/cds/container/michael_deque.h @@ -192,7 +192,7 @@ namespace cds { namespace container { bool push_node_back( node_type * pNode ) { - assert( pNode != null_ptr()); + assert( pNode != nullptr ); scoped_node_ptr p(pNode); if ( base_class::push_back( *pNode ) ) { @@ -204,7 +204,7 @@ namespace cds { namespace container { bool push_node_front( node_type * pNode ) { - assert( pNode != null_ptr()); + assert( pNode != nullptr ); scoped_node_ptr p(pNode); if ( base_class::push_front( *pNode ) ) { @@ -353,7 +353,7 @@ namespace cds { namespace container { */ bool pop_back() { - return base_class::pop_back() != null_ptr(); + return base_class::pop_back() != nullptr; } /// Pops back side a value using copy functor @@ -406,7 +406,7 @@ namespace cds { namespace container { */ bool pop_front() { - return base_class::pop_front() != null_ptr(); + return base_class::pop_front() != nullptr; } /// Pops front side a value using copy functor diff --git a/cds/container/michael_kvlist_impl.h b/cds/container/michael_kvlist_impl.h index 02cd37fc..f67727a6 100644 --- a/cds/container/michael_kvlist_impl.h +++ b/cds/container/michael_kvlist_impl.h @@ -278,14 +278,14 @@ namespace cds { namespace container { key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.first; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Data) : null_ptr(); + return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const @@ -297,7 +297,7 @@ namespace cds { namespace container { value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.second; } @@ -793,7 +793,7 @@ namespace cds { namespace container { //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( refHead, *pNode )) { p.release(); diff --git a/cds/container/michael_kvlist_nogc.h b/cds/container/michael_kvlist_nogc.h index e814a3b7..33a5017c 100644 --- a/cds/container/michael_kvlist_nogc.h +++ b/cds/container/michael_kvlist_nogc.h @@ -101,7 +101,7 @@ namespace cds { namespace container { node_type * m_pItemFound; ensure_functor() - : m_pItemFound( null_ptr() ) + : m_pItemFound( nullptr ) {} void operator ()(bool, node_type& item, node_type& ) @@ -214,21 +214,21 @@ namespace cds { namespace container { key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.first; } value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.second; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Data) : null_ptr(); + return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const @@ -517,11 +517,11 @@ namespace cds { namespace container { //@cond node_type * insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( refHead, *pNode )) return p.release(); - return null_ptr(); + return nullptr; } template @@ -545,14 +545,14 @@ namespace cds { namespace container { cds::unref(f)( pNode->m_Data ); return pNode.release(); } - return null_ptr(); + return nullptr; } template std::pair< node_type *, bool > ensure_at( head_type& refHead, const K& key ) { scoped_node_ptr pNode( alloc_node( key )); - node_type * pItemFound = null_ptr(); + node_type * pItemFound = nullptr; # ifdef CDS_CXX11_LAMBDA_SUPPORT std::pair ret = base_class::ensure_at( refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; }); @@ -561,7 +561,7 @@ namespace cds { namespace container { std::pair ret = base_class::ensure_at( refHead, *pNode, boost::ref(func) ); pItemFound = func.m_pItemFound; # endif - assert( pItemFound != null_ptr() ); + assert( pItemFound != nullptr ); if ( ret.first && ret.second ) pNode.release(); diff --git a/cds/container/michael_kvlist_rcu.h b/cds/container/michael_kvlist_rcu.h index 175df490..1092ed16 100644 --- a/cds/container/michael_kvlist_rcu.h +++ b/cds/container/michael_kvlist_rcu.h @@ -290,14 +290,14 @@ namespace cds { namespace container { key_type const& key() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.first; } pair_ptr operator ->() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - return p ? &(p->m_Data) : null_ptr(); + return p ? &(p->m_Data) : nullptr; } pair_ref operator *() const @@ -309,7 +309,7 @@ namespace cds { namespace container { value_ref val() const { typename iterator_base::value_ptr p = iterator_base::operator ->(); - assert( p != null_ptr() ); + assert( p != nullptr ); return p->m_Data.second; } @@ -805,7 +805,7 @@ namespace cds { namespace container { //@cond bool insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p( pNode ); if ( base_class::insert_at( refHead, *pNode )) { p.release(); @@ -918,7 +918,7 @@ namespace cds { namespace container { value_type * get_at( head_type& refHead, K const& val, Compare cmp ) const { node_type * pNode = base_class::get_at( refHead, val, cmp ); - return pNode ? &pNode->m_Data : null_ptr(); + return pNode ? &pNode->m_Data : nullptr; } //@endcond diff --git a/cds/container/michael_list_nogc.h b/cds/container/michael_list_nogc.h index bd07cff4..ed02c4e6 100644 --- a/cds/container/michael_list_nogc.h +++ b/cds/container/michael_list_nogc.h @@ -395,12 +395,12 @@ namespace cds { namespace container { //@cond node_type * insert_node_at( head_type& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p(pNode); if ( base_class::insert_at( refHead, *pNode )) return p.release(); - return null_ptr(); + return nullptr; } template @@ -413,7 +413,7 @@ namespace cds { namespace container { std::pair< node_type *, bool > ensure_at( head_type& refHead, const Q& val ) { scoped_node_ptr pNode( alloc_node( val )); - node_type * pItemFound = null_ptr(); + node_type * pItemFound = nullptr; # ifdef CDS_CXX11_LAMBDA_SUPPORT std::pair ret = base_class::ensure_at( refHead, *pNode, [&pItemFound](bool, node_type& item, node_type&) { pItemFound = &item; }); @@ -422,7 +422,7 @@ namespace cds { namespace container { std::pair ret = base_class::ensure_at( refHead, *pNode, boost::ref(func) ); pItemFound = func.m_pItemFound; # endif - assert( pItemFound != null_ptr() ); + assert( pItemFound != nullptr ); if ( ret.first && ret.second ) pNode.release(); diff --git a/cds/container/michael_list_rcu.h b/cds/container/michael_list_rcu.h index f16a487f..91295e1a 100644 --- a/cds/container/michael_list_rcu.h +++ b/cds/container/michael_list_rcu.h @@ -956,7 +956,7 @@ namespace cds { namespace container { value_type * get_at( head_type& refHead, Q const& val, Compare cmp ) const { node_type * pNode = base_class::get_at( refHead, val, cmp ); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } //@endcond diff --git a/cds/container/michael_map.h b/cds/container/michael_map.h index a6c5d761..102aaf31 100644 --- a/cds/container/michael_map.h +++ b/cds/container/michael_map.h @@ -253,14 +253,14 @@ namespace cds { namespace container { /// Dereference operator pair_ptr operator ->() const { - assert( base_class::m_pCurBucket != null_ptr() ); + assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator ->(); } /// Dereference operator pair_ref operator *() const { - assert( base_class::m_pCurBucket != null_ptr() ); + assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator *(); } diff --git a/cds/container/michael_map_nogc.h b/cds/container/michael_map_nogc.h index 7cb0f2d5..90880666 100644 --- a/cds/container/michael_map_nogc.h +++ b/cds/container/michael_map_nogc.h @@ -137,14 +137,14 @@ namespace cds { namespace container { /// Dereference operator pair_ptr operator ->() const { - assert( base_class::m_pCurBucket != null_ptr() ); + assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator ->(); } /// Dereference operator pair_ref operator *() const { - assert( base_class::m_pCurBucket != null_ptr() ); + assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator *(); } diff --git a/cds/container/michael_map_rcu.h b/cds/container/michael_map_rcu.h index eaabe397..9e829e65 100644 --- a/cds/container/michael_map_rcu.h +++ b/cds/container/michael_map_rcu.h @@ -178,14 +178,14 @@ namespace cds { namespace container { /// Dereference operator pair_ptr operator ->() const { - assert( base_class::m_pCurBucket != null_ptr() ); + assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator ->(); } /// Dereference operator pair_ref operator *() const { - assert( base_class::m_pCurBucket != null_ptr() ); + assert( base_class::m_pCurBucket != nullptr ); return base_class::m_itList.operator *(); } diff --git a/cds/container/rwqueue.h b/cds/container/rwqueue.h index 7533682e..e2c66b8c 100644 --- a/cds/container/rwqueue.h +++ b/cds/container/rwqueue.h @@ -82,18 +82,18 @@ namespace cds { namespace container { value_type m_value ; ///< Value stored in the node node_type( value_type const& v ) - : m_pNext(null_ptr()) + : m_pNext( nullptr ) , m_value(v) {} node_type() - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) {} # ifdef CDS_EMPLACE_SUPPORT template node_type( Args&&... args ) - : m_pNext(null_ptr()) + : m_pNext( nullptr ) , m_value( std::forward(args)...) {} # endif @@ -147,7 +147,7 @@ namespace cds { namespace container { bool enqueue_node( node_type * p ) { - assert( p != null_ptr()); + assert( p != nullptr ); { auto_lock lock( m_TailLock ); m_pTail = @@ -269,7 +269,7 @@ namespace cds { namespace container { auto_lock lock( m_HeadLock ); pNode = m_pHead; node_type * pNewHead = pNode->m_pNext; - if ( pNewHead == null_ptr() ) + if ( pNewHead == nullptr ) return false; unref(f)( dest, pNewHead->m_value ); m_pHead = pNewHead; @@ -321,7 +321,7 @@ namespace cds { namespace container { bool empty() const { auto_lock lock( m_HeadLock ); - return m_pHead->m_pNext == null_ptr(); + return m_pHead->m_pNext == nullptr; } /// Clears queue @@ -329,7 +329,7 @@ namespace cds { namespace container { { auto_lock lockR( m_HeadLock ); auto_lock lockW( m_TailLock ); - while ( m_pHead->m_pNext != null_ptr() ) { + while ( m_pHead->m_pNext != nullptr ) { node_type * pHead = m_pHead; m_pHead = m_pHead->m_pNext; free_node( pHead ); diff --git a/cds/container/skip_list_base.h b/cds/container/skip_list_base.h index c52f0155..c753e1ce 100644 --- a/cds/container/skip_list_base.h +++ b/cds/container/skip_list_base.h @@ -166,7 +166,7 @@ namespace cds { namespace container { static void free_space( unsigned char * p, unsigned int nHeight ) { - assert( p != null_ptr() ); + assert( p != nullptr ); if ( nHeight == 1 ) node_allocator_type().deallocate( reinterpret_cast(p), 1 ); else @@ -179,7 +179,7 @@ namespace cds { namespace container { { unsigned char * pMem = alloc_space( nHeight ); return new( pMem ) - node_type( nHeight, nHeight > 1 ? reinterpret_cast( pMem + c_nNodeSize ) : null_ptr(), v ); + node_type( nHeight, nHeight > 1 ? reinterpret_cast(pMem + c_nNodeSize) : nullptr, v ); } # ifdef CDS_EMPLACE_SUPPORT @@ -188,14 +188,14 @@ namespace cds { namespace container { { unsigned char * pMem = alloc_space( nHeight ); return new( pMem ) - node_type( nHeight, nHeight > 1 ? reinterpret_cast( pMem + c_nNodeSize ): null_ptr(), + node_type( nHeight, nHeight > 1 ? reinterpret_cast(pMem + c_nNodeSize) : nullptr, std::forward(args)... ); } # endif void Delete( node_type * p ) { - assert( p != null_ptr() ); + assert( p != nullptr ); unsigned int nHeight = p->height(); node_allocator_type().destroy( p ); diff --git a/cds/container/skip_list_map_rcu.h b/cds/container/skip_list_map_rcu.h index f15309ae..68336c77 100644 --- a/cds/container/skip_list_map_rcu.h +++ b/cds/container/skip_list_map_rcu.h @@ -165,7 +165,7 @@ namespace cds { namespace container { value_type * to_value_ptr( node_type * pNode ) const CDS_NOEXCEPT { - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } //@endcond diff --git a/cds/container/skip_list_set_nogc.h b/cds/container/skip_list_set_nogc.h index 0dcee9e0..4c669577 100644 --- a/cds/container/skip_list_set_nogc.h +++ b/cds/container/skip_list_set_nogc.h @@ -362,7 +362,7 @@ namespace cds { namespace container { value_type * get_min() const { node_type * pNode = base_class::get_min(); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } /// Gets maximum key from the set @@ -372,7 +372,7 @@ namespace cds { namespace container { value_type * get_max() const { node_type * pNode = base_class::get_max(); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } /// Clears the set (non-atomic) diff --git a/cds/container/skip_list_set_rcu.h b/cds/container/skip_list_set_rcu.h index a94aba44..a00db0de 100644 --- a/cds/container/skip_list_set_rcu.h +++ b/cds/container/skip_list_set_rcu.h @@ -196,7 +196,7 @@ namespace cds { namespace container { value_type * to_value_ptr( node_type * pNode ) const CDS_NOEXCEPT { - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } //@endcond diff --git a/cds/container/split_list_set.h b/cds/container/split_list_set.h index ffdb9a57..ff176fd8 100644 --- a/cds/container/split_list_set.h +++ b/cds/container/split_list_set.h @@ -215,7 +215,7 @@ namespace cds { namespace container { bool insert_node( node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p(pNode); if ( base_class::insert( *pNode ) ) { diff --git a/cds/container/split_list_set_nogc.h b/cds/container/split_list_set_nogc.h index 2dfaa74a..ccee6600 100644 --- a/cds/container/split_list_set_nogc.h +++ b/cds/container/split_list_set_nogc.h @@ -241,7 +241,7 @@ namespace cds { namespace container { //@cond iterator insert_node( node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p(pNode); iterator it( base_class::insert_( *pNode )); diff --git a/cds/container/split_list_set_rcu.h b/cds/container/split_list_set_rcu.h index 1080adc5..7ca37017 100644 --- a/cds/container/split_list_set_rcu.h +++ b/cds/container/split_list_set_rcu.h @@ -261,7 +261,7 @@ namespace cds { namespace container { bool insert_node( node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); scoped_node_ptr p(pNode); if ( base_class::insert( *pNode ) ) { @@ -912,7 +912,7 @@ namespace cds { namespace container { value_type * get( Q const& val ) { node_type * pNode = base_class::get( val ); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } /// Finds the key \p val and return the item found @@ -928,7 +928,7 @@ namespace cds { namespace container { value_type * get_with( Q const& val, Less pred ) { node_type * pNode = base_class::get_with( val, typename maker::template predicate_wrapper::type()); - return pNode ? &pNode->m_Value : null_ptr(); + return pNode ? &pNode->m_Value : nullptr; } /// Clears the set (non-atomic) diff --git a/cds/details/defs.h b/cds/details/defs.h index f8a7e5f4..813c7ab8 100644 --- a/cds/details/defs.h +++ b/cds/details/defs.h @@ -469,16 +469,6 @@ namespace cds { virtual const char * what( ) const throw() { return _msg; } \ } - - //@cond - // This template function should be replaced with nullptr keyword when all compilers will support it - template - static inline CDS_CONSTEXPR T null_ptr() CDS_NOEXCEPT - { - return reinterpret_cast( NULL ); - } - //@endcond - } // namespace cds diff --git a/cds/details/marked_ptr.h b/cds/details/marked_ptr.h index 3645e052..24eed5d3 100644 --- a/cds/details/marked_ptr.h +++ b/cds/details/marked_ptr.h @@ -33,7 +33,7 @@ namespace cds { public: /// Constructs null marked pointer. The flag is cleared. CDS_CONSTEXPR marked_ptr() CDS_NOEXCEPT - : m_ptr( null_ptr() ) + : m_ptr( nullptr ) {} /// Constructs marked pointer with \p ptr value. The least bit(s) of \p ptr is the flag. @@ -340,7 +340,7 @@ CDS_CXX11_ATOMIC_BEGIN_NAMESPACE } CDS_CONSTEXPR atomic() CDS_NOEXCEPT - : m_atomic( cds::null_ptr() ) + : m_atomic( nullptr ) {} CDS_CONSTEXPR explicit atomic(marked_ptr val) CDS_NOEXCEPT diff --git a/cds/gc/details/retired_ptr.h b/cds/gc/details/retired_ptr.h index 92d84d30..da19d1e9 100644 --- a/cds/gc/details/retired_ptr.h +++ b/cds/gc/details/retired_ptr.h @@ -63,8 +63,8 @@ namespace cds { namespace gc { assert( m_p != NULL ); m_funcFree( m_p ); - CDS_STRICT_DO( m_p = null_ptr() ); - CDS_STRICT_DO( m_funcFree = null_ptr()); + CDS_STRICT_DO( m_p = nullptr ); + CDS_STRICT_DO( m_funcFree = nullptr ); } }; diff --git a/cds/gc/guarded_ptr.h b/cds/gc/guarded_ptr.h index be93c565..d59baeed 100644 --- a/cds/gc/guarded_ptr.h +++ b/cds/gc/guarded_ptr.h @@ -118,7 +118,7 @@ namespace cds { namespace gc { /// Checks if the guarded pointer is \p NULL bool empty() const CDS_NOEXCEPT { - return m_guard.template get() == null_ptr(); + return m_guard.template get() == nullptr; } /// Clears guarded pointer @@ -198,7 +198,7 @@ namespace cds { namespace gc { bool empty() const CDS_NOEXCEPT { - return m_guard.template get() == null_ptr(); + return m_guard.template get() == nullptr; } void release() CDS_NOEXCEPT diff --git a/cds/gc/hrc/details/hrc_retired.h b/cds/gc/hrc/details/hrc_retired.h index 79262891..cee87104 100644 --- a/cds/gc/hrc/details/hrc_retired.h +++ b/cds/gc/hrc/details/hrc_retired.h @@ -24,8 +24,8 @@ namespace cds { namespace gc { namespace hrc { /// Default ctor retired_node() - : m_pNode( null_ptr() ) - , m_funcFree( null_ptr() ) + : m_pNode( nullptr ) + , m_funcFree( nullptr ) , m_nNextFree(0) , m_nClaim(0) , m_bDone( false ) @@ -62,7 +62,7 @@ namespace cds { namespace gc { namespace hrc { /// Invokes destructor function for the pointer void free() { - assert( m_funcFree != null_ptr() ); + assert( m_funcFree != nullptr ); m_funcFree( m_pNode.load( CDS_ATOMIC::memory_order_relaxed )); } }; @@ -116,7 +116,7 @@ namespace cds { namespace gc { namespace hrc { size_t nCount = 0; const size_t nCapacity = capacity(); for ( size_t i = 0; i < nCapacity; ++i ) { - if ( m_arr[i].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) != null_ptr() ) + if ( m_arr[i].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) != nullptr ) ++nCount; } return nCount; @@ -128,7 +128,7 @@ namespace cds { namespace gc { namespace hrc { assert( !isFull()); size_t n = m_nFreeList; - assert( m_arr[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == null_ptr() ); + assert( m_arr[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); m_nFreeList = m_arr[n].m_nNextFree; CDS_DEBUG_DO( m_arr[n].m_nNextFree = m_nEndFreeList ; ) m_arr[n].set( p, pFunc ); @@ -138,7 +138,7 @@ namespace cds { namespace gc { namespace hrc { void pop( size_t n ) { assert( n < capacity() ); - m_arr[n].m_pNode.store( null_ptr(), CDS_ATOMIC::memory_order_release ); + m_arr[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_release ); m_arr[n].m_nNextFree = m_nFreeList; m_nFreeList = n; } diff --git a/cds/gc/hrc/hrc.h b/cds/gc/hrc/hrc.h index 9fe760f7..09844d25 100644 --- a/cds/gc/hrc/hrc.h +++ b/cds/gc/hrc/hrc.h @@ -300,15 +300,15 @@ namespace cds { namespace gc { //@cond thread_list_node( const GarbageCollector& HzpMgr ) : thread_descriptor( HzpMgr ), - m_pNext(null_ptr()), - m_pOwner( null_ptr() ), + m_pNext( nullptr ), + m_pOwner( nullptr ), m_idOwner( cds::OS::nullThreadId() ), m_bFree( false ) {} ~thread_list_node() { - assert( m_pOwner == null_ptr() ); + assert( m_pOwner == nullptr ); assert( m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::nullThreadId() ); } //@endcond @@ -371,7 +371,7 @@ namespace cds { namespace gc { /// Checks if global GC object is constructed and may be used static bool isUsed() { - return m_pGC != null_ptr(); + return m_pGC != nullptr; } /// Get max count of hazard pointers as defined in @ref Construct call @@ -480,7 +480,7 @@ namespace cds { namespace gc { //@cond ThreadGC() : m_gc( GarbageCollector::instance() ) - , m_pDesc( null_ptr() ) + , m_pDesc( nullptr ) {} ~ThreadGC() { @@ -489,7 +489,7 @@ namespace cds { namespace gc { //@endcond /// Checks if thread GC is initialized - bool isInitialized() const { return m_pDesc != null_ptr() ; } + bool isInitialized() const { return m_pDesc != nullptr; } /// Initialization. Multiple calls is allowed void init() @@ -505,7 +505,7 @@ namespace cds { namespace gc { cleanUpLocal(); m_gc.Scan( this ); details::thread_descriptor * pRec = m_pDesc; - m_pDesc = null_ptr(); + m_pDesc = nullptr; if ( pRec ) m_gc.retireHRCThreadDesc( pRec ); } @@ -515,14 +515,14 @@ namespace cds { namespace gc { /// Initializes HP guard \p guard details::HPGuard& allocGuard() { - assert( m_pDesc != null_ptr() ); + assert( m_pDesc != nullptr ); return m_pDesc->m_hzp.alloc(); } /// Frees HP guard \p guard void freeGuard( details::HPGuard& guard ) { - assert( m_pDesc != null_ptr() ); + assert( m_pDesc != nullptr ); m_pDesc->m_hzp.free( guard ); } @@ -530,7 +530,7 @@ namespace cds { namespace gc { template void allocGuard( details::HPArray& arr ) { - assert( m_pDesc != null_ptr() ); + assert( m_pDesc != nullptr ); m_pDesc->m_hzp.alloc( arr ); } @@ -538,7 +538,7 @@ namespace cds { namespace gc { template void freeGuard( details::HPArray& arr ) { - assert( m_pDesc != null_ptr() ); + assert( m_pDesc != nullptr ); m_pDesc->m_hzp.free( arr ); } diff --git a/cds/gc/hzp/details/hp_alloc.h b/cds/gc/hzp/details/hp_alloc.h index b6e14201..7d9d5f15 100644 --- a/cds/gc/hzp/details/hp_alloc.h +++ b/cds/gc/hzp/details/hp_alloc.h @@ -39,7 +39,7 @@ namespace cds { public: HPGuardT() CDS_NOEXCEPT - : base_class( null_ptr() ) + : base_class( nullptr ) {} ~HPGuardT() CDS_NOEXCEPT {} @@ -81,7 +81,7 @@ namespace cds { void clear() CDS_NOEXCEPT { // memory order is not necessary here - base_class::store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + base_class::store( nullptr, CDS_ATOMIC::memory_order_relaxed ); //CDS_COMPILER_RW_BARRIER; } }; diff --git a/cds/gc/ptb/ptb.h b/cds/gc/ptb/ptb.h index a3e01684..075deafe 100644 --- a/cds/gc/ptb/ptb.h +++ b/cds/gc/ptb/ptb.h @@ -90,25 +90,25 @@ namespace cds { namespace gc { //@cond guard_data() - : pPost( null_ptr()) + : pPost( nullptr ) #if 0 - , pHandOff( null_ptr() ) + , pHandOff( nullptr ) #endif - , pGlobalNext( null_ptr() ) - , pNextFree( null_ptr() ) - , pThreadNext( null_ptr() ) + , pGlobalNext( nullptr ) + , pNextFree( nullptr ) + , pThreadNext( nullptr ) {} void init() { - pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); } //@endcond /// Checks if the guard is free, that is, it does not contain any pointer guarded bool isFree() const { - return pPost.load( CDS_ATOMIC::memory_order_acquire ) == null_ptr(); + return pPost.load( CDS_ATOMIC::memory_order_acquire ) == nullptr; } }; @@ -152,15 +152,15 @@ namespace cds { namespace gc { public: // Default ctor guard_allocator() - : m_GuardList( null_ptr() ) - , m_FreeGuardList( null_ptr() ) + : m_GuardList( nullptr ) + , m_FreeGuardList( nullptr ) {} // Destructor ~guard_allocator() { guard_data * pNext; - for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != null_ptr(); pData = pNext ) { + for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != nullptr; pData = pNext ) { pNext = pData->pGlobalNext.load( CDS_ATOMIC::memory_order_relaxed ); m_GuardAllocator.Delete( pData ); } @@ -191,7 +191,7 @@ namespace cds { namespace gc { */ void free( guard_data * pGuard ) { - pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); cds::lock::scoped_lock al( m_freeListLock ); pGuard->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); @@ -222,7 +222,7 @@ namespace cds { namespace gc { pLast = p; } - pLast->pNextFree.store( pLast->pThreadNext = null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pLast->pNextFree.store( pLast->pThreadNext = nullptr, CDS_ATOMIC::memory_order_relaxed ); return pHead; } @@ -235,11 +235,11 @@ namespace cds { namespace gc { */ void freeList( guard_data * pList ) { - assert( pList != null_ptr() ); + assert( pList != nullptr ); guard_data * pLast = pList; while ( pLast->pThreadNext ) { - pLast->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pLast->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); guard_data * p; pLast->pNextFree.store( p = pLast->pThreadNext, CDS_ATOMIC::memory_order_relaxed ); pLast = p; @@ -271,13 +271,13 @@ namespace cds { namespace gc { public: //@cond retired_ptr_buffer() - : m_pHead( null_ptr() ) + : m_pHead( nullptr ) , m_nItemCount(0) {} ~retired_ptr_buffer() { - assert( m_pHead.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr()); + assert( m_pHead.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); } //@endcond @@ -305,7 +305,7 @@ namespace cds { namespace gc { privatize_result privatize() { privatize_result res; - res.first = m_pHead.exchange( null_ptr(), CDS_ATOMIC::memory_order_acq_rel ); + res.first = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_acq_rel ); // Item counter is needed only as a threshold for liberate function // So, we may clear the item counter without synchronization with m_pHead @@ -360,7 +360,7 @@ namespace cds { namespace gc { item * pLastItem = pNew->items + m_nItemPerBlock - 1; for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) { pItem->m_pNextFree = pItem + 1; - CDS_STRICT_DO( pItem->m_pNext = null_ptr() ); + CDS_STRICT_DO( pItem->m_pNext = nullptr ); } // link new block to block list @@ -395,12 +395,12 @@ namespace cds { namespace gc { public: //@cond retired_ptr_pool() - : m_pBlockListHead(null_ptr()) + : m_pBlockListHead( nullptr ) , m_nCurEpoch(0) - , m_pGlobalFreeHead( null_ptr()) + , m_pGlobalFreeHead( nullptr ) { for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i ) - m_pEpochFree[i].store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_pEpochFree[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed ); allocNewBlock(); } @@ -456,7 +456,7 @@ namespace cds { namespace gc { } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); success: - CDS_STRICT_DO( pItem->m_pNextFree = null_ptr() ); + CDS_STRICT_DO( pItem->m_pNextFree = nullptr ); return *pItem; } @@ -474,8 +474,8 @@ namespace cds { namespace gc { */ void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) { - assert( pHead != null_ptr() ); - assert( pTail != null_ptr() ); + assert( pHead != nullptr ); + assert( pTail != nullptr ); unsigned int nEpoch; item * pCurHead; @@ -495,7 +495,7 @@ namespace cds { namespace gc { public: /// Initialize empty guard. guard() - : m_pGuard(null_ptr()) + : m_pGuard( nullptr ) {} /// Object destructor, does nothing @@ -505,7 +505,7 @@ namespace cds { namespace gc { /// Guards pointer \p p void set( void * p ) { - assert( m_pGuard != null_ptr() ); + assert( m_pGuard != nullptr ); m_pGuard->pPost.store( p, CDS_ATOMIC::memory_order_release ); //CDS_COMPILER_RW_BARRIER; } @@ -513,8 +513,8 @@ namespace cds { namespace gc { /// Clears the guard void clear() { - assert( m_pGuard != null_ptr() ); - m_pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + assert( m_pGuard != nullptr ); + m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER ); } @@ -537,7 +537,7 @@ namespace cds { namespace gc { /// Set guard data void set_guard( details::guard_data * pGuard ) { - assert( m_pGuard == null_ptr() ); + assert( m_pGuard == nullptr ); m_pGuard = pGuard; } @@ -761,7 +761,7 @@ namespace cds { namespace gc { */ static GarbageCollector& instance() { - if ( m_pManager == null_ptr() ) + if ( m_pManager == nullptr ) throw PTBManagerEmpty(); return *m_pManager; } @@ -769,7 +769,7 @@ namespace cds { namespace gc { /// Checks if global GC object is constructed and may be used static bool isUsed() CDS_NOEXCEPT { - return m_pManager != null_ptr(); + return m_pManager != nullptr; } public: @@ -882,8 +882,8 @@ namespace cds { namespace gc { public: ThreadGC() : m_gc( GarbageCollector::instance() ) - , m_pList( null_ptr() ) - , m_pFree( null_ptr() ) + , m_pList( nullptr ) + , m_pFree( nullptr ) {} /// Dtor calls fini() @@ -907,7 +907,7 @@ namespace cds { namespace gc { if ( m_pList ) { m_gc.freeGuardList( m_pList ); m_pList = - m_pFree = null_ptr(); + m_pFree = nullptr; } } @@ -915,7 +915,7 @@ namespace cds { namespace gc { /// Initializes guard \p g void allocGuard( Guard& g ) { - assert( m_pList != null_ptr() ); + assert( m_pList != nullptr ); if ( m_pFree ) { g.m_pGuard = m_pFree; m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed); @@ -930,8 +930,8 @@ namespace cds { namespace gc { /// Frees guard \p g void freeGuard( Guard& g ) { - assert( m_pList != null_ptr() ); - g.m_pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + assert( m_pList != nullptr ); + g.m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); g.m_pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed ); m_pFree = g.m_pGuard; } @@ -940,7 +940,7 @@ namespace cds { namespace gc { template void allocGuard( GuardArray& arr ) { - assert( m_pList != null_ptr() ); + assert( m_pList != nullptr ); size_t nCount = 0; while ( m_pFree && nCount < Count ) { @@ -961,16 +961,16 @@ namespace cds { namespace gc { template void freeGuard( GuardArray& arr ) { - assert( m_pList != null_ptr() ); + assert( m_pList != nullptr ); details::guard_data * pGuard; for ( size_t i = 0; i < Count - 1; ++i ) { pGuard = arr[i].get_guard(); - pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); pGuard->pNextFree.store( arr[i+1].get_guard(), CDS_ATOMIC::memory_order_relaxed ); } pGuard = arr[Count-1].get_guard(); - pGuard->pPost.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed ); m_pFree = arr[0].get_guard(); } diff --git a/cds/intrusive/basket_queue.h b/cds/intrusive/basket_queue.h index 2d4f204d..4ee0f5b8 100644 --- a/cds/intrusive/basket_queue.h +++ b/cds/intrusive/basket_queue.h @@ -42,7 +42,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container node() - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) {} }; @@ -60,13 +60,13 @@ namespace cds { namespace intrusive { atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container node() - : m_pNext(null_ptr()) + : m_pNext( nullptr ) {} protected: virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC ) { - assert( pGC != null_ptr() ); + assert( pGC != nullptr ); typename gc::template GuardArray<2> aGuards( *pGC ); while ( true ) { @@ -424,7 +424,7 @@ namespace cds { namespace intrusive { { void operator()( value_type * p ) { - assert( p != null_ptr()); + assert( p != nullptr ); BasketQueue::clear_links( node_traits::to_node_ptr(p) ); disposer()( p ); @@ -581,7 +581,7 @@ namespace cds { namespace intrusive { static void clear_links( node_type * pNode ) { - pNode->m_pNext.store( marked_ptr( null_ptr()), memory_model::memory_order_release ); + pNode->m_pNext.store( marked_ptr( nullptr ), memory_model::memory_order_release ); } void dispose_node( node_type * p ) @@ -597,8 +597,8 @@ namespace cds { namespace intrusive { public: /// Initializes empty queue BasketQueue() - : m_pHead( null_ptr() ) - , m_pTail( null_ptr() ) + : m_pHead( nullptr ) + , m_pTail( nullptr ) , m_nMaxHops( 3 ) { // GC and node_type::gc must be the same @@ -629,7 +629,7 @@ namespace cds { namespace intrusive { clear(); node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed).ptr(); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); { node_type * pNext = pHead->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); @@ -643,8 +643,8 @@ namespace cds { namespace intrusive { //m_pTail.store( marked_ptr( pHead ), memory_model::memory_order_relaxed ); } - m_pHead.store( marked_ptr( null_ptr()), memory_model::memory_order_relaxed ); - m_pTail.store( marked_ptr( null_ptr()), memory_model::memory_order_relaxed ); + m_pHead.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed ); + m_pTail.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed ); dispose_node( pHead ); } @@ -686,7 +686,7 @@ namespace cds { namespace intrusive { marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_acquire ); - if ( pNext.ptr() == null_ptr() ) { + if ( pNext.ptr() == nullptr ) { pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release ); if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) { if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed )) @@ -730,7 +730,7 @@ namespace cds { namespace intrusive { marked_ptr p; bool bTailOk = true; - while ( ( p = pNext->m_pNext.load(memory_model::memory_order_relaxed) ).ptr() != null_ptr() ) + while ( (p = pNext->m_pNext.load( memory_model::memory_order_relaxed )).ptr() != nullptr ) { bTailOk = m_pTail.load( memory_model::memory_order_relaxed ) == t; if ( !bTailOk ) @@ -769,7 +769,7 @@ namespace cds { namespace intrusive { if ( do_dequeue( res, true )) return node_traits::to_value_ptr( *res.pNext ); - return null_ptr(); + return nullptr; } /// Synonym for \ref cds_intrusive_BasketQueue_enqueue "enqueue" function diff --git a/cds/intrusive/cuckoo_set.h b/cds/intrusive/cuckoo_set.h index 99d1c4be..59333fe6 100644 --- a/cds/intrusive/cuckoo_set.h +++ b/cds/intrusive/cuckoo_set.h @@ -121,7 +121,7 @@ namespace cds { namespace intrusive { node * m_pNext; CDS_CONSTEXPR node() CDS_NOEXCEPT - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) {} void store_hash( size_t * ) @@ -131,12 +131,12 @@ namespace cds { namespace intrusive { { // This node type does not store hash values!!! assert(false); - return null_ptr(); + return nullptr; } void clear() { - m_pNext = null_ptr(); + m_pNext = nullptr; } }; @@ -153,7 +153,7 @@ namespace cds { namespace intrusive { size_t m_arrHash[ hash_array_size ]; node() CDS_NOEXCEPT - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) { memset( m_arrHash, 0, sizeof(m_arrHash)); } @@ -170,7 +170,7 @@ namespace cds { namespace intrusive { void clear() { - m_pNext = null_ptr(); + m_pNext = nullptr; } }; @@ -194,7 +194,7 @@ namespace cds { namespace intrusive { { // This node type does not store hash values!!! assert(false); - return null_ptr(); + return nullptr; } void clear() @@ -447,7 +447,7 @@ namespace cds { namespace intrusive { } } else { - std::fill( m_guard, m_guard + c_nArity, null_ptr() ); + std::fill( m_guard, m_guard + c_nArity, nullptr ); } policy.m_Stat.onCellTryLock(); } @@ -1201,7 +1201,7 @@ namespace cds { namespace intrusive { public: iterator() - : pNode( null_ptr()) + : pNode( nullptr ) {} iterator( node_type * p ) : pNode( p ) @@ -1228,7 +1228,7 @@ namespace cds { namespace intrusive { } node_type& operator*() { - assert( pNode != null_ptr()); + assert( pNode != nullptr ); return *pNode; } @@ -1252,7 +1252,7 @@ namespace cds { namespace intrusive { public: bucket_entry() - : pHead( null_ptr()) + : pHead( nullptr ) , nSize(0) { static_assert(( std::is_same::value ), "Incompatible node type" ); @@ -1307,7 +1307,7 @@ namespace cds { namespace intrusive { } nSize = 0; - pHead = null_ptr(); + pHead = nullptr; } template @@ -1321,7 +1321,7 @@ namespace cds { namespace intrusive { } nSize = 0; - pHead = null_ptr(); + pHead = nullptr; } unsigned int size() const @@ -1373,7 +1373,7 @@ namespace cds { namespace intrusive { public: iterator() - : pArr( null_ptr() ) + : pArr( nullptr ) {} iterator( node_type ** p ) : pArr(p) @@ -1390,13 +1390,13 @@ namespace cds { namespace intrusive { node_type * operator->() { - assert( pArr != null_ptr()); + assert( pArr != nullptr ); return *pArr; } node_type& operator*() { - assert( pArr != null_ptr()); - assert( *pArr != null_ptr()); + assert( pArr != nullptr ); + assert( *pArr != nullptr ); return *(*pArr); } @@ -2034,7 +2034,7 @@ namespace cds { namespace intrusive { bucket_table_allocator alloc; for ( unsigned int i = 0; i < c_nArity; ++i ) { alloc.Delete( pTable[i], nCapacity ); - pTable[i] = null_ptr(); + pTable[i] = nullptr; } } void free_bucket_tables() @@ -2078,7 +2078,7 @@ namespace cds { namespace intrusive { } m_Stat.onEraseFailed(); - return null_ptr(); + return nullptr; } template diff --git a/cds/intrusive/details/dummy_node_holder.h b/cds/intrusive/details/dummy_node_holder.h index 5d4120a0..af21db42 100644 --- a/cds/intrusive/details/dummy_node_holder.h +++ b/cds/intrusive/details/dummy_node_holder.h @@ -37,9 +37,9 @@ namespace cds { namespace intrusive { namespace details { struct dummy_node_disposer { void operator()( node_type * p ) { - assert( p != null_ptr()); + assert( p != nullptr ); - p->m_pNext.store( null_ptr(), CDS_ATOMIC::memory_order_release ); + p->m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release ); allocator_type().Delete( p ); } }; diff --git a/cds/intrusive/details/ellen_bintree_base.h b/cds/intrusive/details/ellen_bintree_base.h index d24cb7b7..8c6e615d 100644 --- a/cds/intrusive/details/ellen_bintree_base.h +++ b/cds/intrusive/details/ellen_bintree_base.h @@ -73,7 +73,7 @@ namespace cds { namespace intrusive { update_desc * pNextRetire ; // for local retired list (RCU) update_desc() - : pNextRetire( null_ptr() ) + : pNextRetire( nullptr ) {} //@endcond }; @@ -210,8 +210,8 @@ namespace cds { namespace intrusive { /// Default ctor internal_node() : base_class( true ) - , m_pLeft( null_ptr() ) - , m_pRight( null_ptr() ) + , m_pLeft( nullptr ) + , m_pRight( nullptr ) , m_pUpdate( update_ptr() ) , m_nEmptyUpdate(0) {} diff --git a/cds/intrusive/ellen_bintree_impl.h b/cds/intrusive/ellen_bintree_impl.h index 30f36175..5bfdd0a0 100644 --- a/cds/intrusive/ellen_bintree_impl.h +++ b/cds/intrusive/ellen_bintree_impl.h @@ -225,9 +225,9 @@ namespace cds { namespace intrusive { bool bRightParent ; // true if pParent is right child of pGrandParent, false otherwise search_result() - :pGrandParent( null_ptr() ) - ,pParent( null_ptr() ) - ,pLeaf( null_ptr() ) + :pGrandParent( nullptr ) + , pParent( nullptr ) + , pLeaf( nullptr ) ,bRightLeaf( false ) ,bRightParent( false ) {} @@ -909,8 +909,8 @@ namespace cds { namespace intrusive { void unsafe_clear() { while ( true ) { - internal_node * pParent = null_ptr< internal_node *>(); - internal_node * pGrandParent = null_ptr(); + internal_node * pParent = nullptr; + internal_node * pGrandParent = nullptr; tree_node * pLeaf = const_cast( &m_Root ); // Get leftmost leaf @@ -1008,7 +1008,7 @@ namespace cds { namespace intrusive { // See whether pParent->m_pUpdate has not been changed if ( pParent->m_pUpdate.load( memory_model::memory_order_acquire ) != updParent ) { // update has been changed - returns nullptr as a flag to search retry - return null_ptr(); + return nullptr; } if ( p && p->is_leaf() ) @@ -1032,7 +1032,7 @@ namespace cds { namespace intrusive { bool search( search_result& res, KeyValue const& key, Compare cmp ) const { internal_node * pParent; - internal_node * pGrandParent = null_ptr(); + internal_node * pGrandParent = nullptr; update_ptr updParent; update_ptr updGrandParent; bool bRightLeaf; @@ -1041,9 +1041,9 @@ namespace cds { namespace intrusive { int nCmp = 0; retry: - pParent = null_ptr< internal_node *>(); - //pGrandParent = null_ptr(); - updParent = null_ptr(); + pParent = nullptr; + //pGrandParent = nullptr; + updParent = nullptr; bRightLeaf = false; tree_node * pLeaf = const_cast( &m_Root ); while ( pLeaf->is_internal() ) { @@ -1096,9 +1096,9 @@ namespace cds { namespace intrusive { update_ptr updGrandParent; retry: - pParent = null_ptr< internal_node *>(); - pGrandParent = null_ptr(); - updParent = null_ptr(); + pParent = nullptr; + pGrandParent = nullptr; + updParent = nullptr; tree_node * pLeaf = const_cast( &m_Root ); while ( pLeaf->is_internal() ) { res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); @@ -1149,9 +1149,9 @@ namespace cds { namespace intrusive { bool bRightParent = false; retry: - pParent = null_ptr< internal_node *>(); - pGrandParent = null_ptr(); - updParent = null_ptr(); + pParent = nullptr; + pGrandParent = nullptr; + updParent = nullptr; bRightLeaf = false; tree_node * pLeaf = const_cast( &m_Root ); while ( pLeaf->is_internal() ) { @@ -1238,7 +1238,7 @@ namespace cds { namespace intrusive { { // precondition: all member of res must be guarded - assert( res.pGrandParent != null_ptr() ); + assert( res.pGrandParent != nullptr ); return static_cast( @@ -1394,7 +1394,7 @@ namespace cds { namespace intrusive { template bool erase_( Q const& val, Compare cmp, Equal eq, Func f ) { - update_desc * pOp = null_ptr(); + update_desc * pOp = nullptr; search_result res; for ( ;; ) { @@ -1428,7 +1428,7 @@ namespace cds { namespace intrusive { cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf )); break; } - pOp = null_ptr(); + pOp = nullptr; } } } @@ -1476,7 +1476,7 @@ namespace cds { namespace intrusive { bool extract_max_( typename gc::Guard& guard ) { - update_desc * pOp = null_ptr(); + update_desc * pOp = nullptr; search_result res; for ( ;; ) { @@ -1508,7 +1508,7 @@ namespace cds { namespace intrusive { { if ( help_delete( pOp )) break; - pOp = null_ptr(); + pOp = nullptr; } } } @@ -1524,7 +1524,7 @@ namespace cds { namespace intrusive { bool extract_min_( typename gc::Guard& guard ) { - update_desc * pOp = null_ptr(); + update_desc * pOp = nullptr; search_result res; for ( ;; ) { @@ -1556,7 +1556,7 @@ namespace cds { namespace intrusive { { if ( help_delete( pOp )) break; - pOp = null_ptr(); + pOp = nullptr; } } } diff --git a/cds/intrusive/ellen_bintree_rcu.h b/cds/intrusive/ellen_bintree_rcu.h index 52ba3ae2..eac4ee06 100644 --- a/cds/intrusive/ellen_bintree_rcu.h +++ b/cds/intrusive/ellen_bintree_rcu.h @@ -27,7 +27,7 @@ namespace cds { namespace intrusive { /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node explicit base_node( bool bInternal ) : basic_node( bInternal ? internal : 0 ) - , m_pNextRetired( null_ptr() ) + , m_pNextRetired( nullptr ) {} }; @@ -533,9 +533,9 @@ namespace cds { namespace intrusive { bool bRightParent ; // true if pParent is right child of pGrandParent, false otherwise search_result() - :pGrandParent( null_ptr() ) - ,pParent( null_ptr() ) - ,pLeaf( null_ptr() ) + :pGrandParent( nullptr ) + , pParent( nullptr ) + , pLeaf( nullptr ) ,bRightLeaf( false ) ,bRightParent( false ) {} @@ -610,8 +610,8 @@ namespace cds { namespace intrusive { {} forward_iterator() - : m_pUpdate( null_ptr() ) - , m_pNode( null_ptr< tree_node *>() ) + : m_pUpdate( nullptr ) + , m_pNode( nullptr ) {} cds::urcu::retired_ptr operator *() @@ -630,7 +630,7 @@ namespace cds { namespace intrusive { reinterpret_cast( free_internal_node ) ); } } - return cds::urcu::retired_ptr( null_ptr(), + return cds::urcu::retired_ptr( nullptr, reinterpret_cast( free_update_desc ) ); } @@ -656,8 +656,8 @@ namespace cds { namespace intrusive { public: retired_list() - : pUpdateHead( null_ptr() ) - , pNodeHead( null_ptr() ) + : pUpdateHead( nullptr ) + , pNodeHead( nullptr ) {} ~retired_list() @@ -1329,8 +1329,8 @@ namespace cds { namespace intrusive { rcu_lock l; while ( true ) { - internal_node * pParent = null_ptr< internal_node *>(); - internal_node * pGrandParent = null_ptr(); + internal_node * pParent = nullptr; + internal_node * pGrandParent = nullptr; tree_node * pLeaf = const_cast( &m_Root ); // Get leftmost leaf @@ -1454,7 +1454,7 @@ namespace cds { namespace intrusive { bool check_delete_precondition( search_result& res ) { - assert( res.pGrandParent != null_ptr() ); + assert( res.pGrandParent != nullptr ); return static_cast( res.bRightParent @@ -1538,7 +1538,7 @@ namespace cds { namespace intrusive { assert( gc::is_locked() ); internal_node * pParent; - internal_node * pGrandParent = null_ptr(); + internal_node * pGrandParent = nullptr; tree_node * pLeaf; update_ptr updParent; update_ptr updGrandParent; @@ -1548,9 +1548,9 @@ namespace cds { namespace intrusive { int nCmp = 0; retry: - pParent = null_ptr(); + pParent = nullptr; pLeaf = const_cast( &m_Root ); - updParent = null_ptr(); + updParent = nullptr; bRightLeaf = false; while ( pLeaf->is_internal() ) { pGrandParent = pParent; @@ -1591,13 +1591,13 @@ namespace cds { namespace intrusive { assert( gc::is_locked() ); internal_node * pParent; - internal_node * pGrandParent = null_ptr(); + internal_node * pGrandParent = nullptr; tree_node * pLeaf; update_ptr updParent; update_ptr updGrandParent; retry: - pParent = null_ptr< internal_node *>(); + pParent = nullptr; pLeaf = const_cast( &m_Root ); while ( pLeaf->is_internal() ) { pGrandParent = pParent; @@ -1635,7 +1635,7 @@ namespace cds { namespace intrusive { assert( gc::is_locked() ); internal_node * pParent; - internal_node * pGrandParent = null_ptr(); + internal_node * pGrandParent = nullptr; tree_node * pLeaf; update_ptr updParent; update_ptr updGrandParent; @@ -1643,7 +1643,7 @@ namespace cds { namespace intrusive { bool bRightParent = false; retry: - pParent = null_ptr< internal_node *>(); + pParent = nullptr; pLeaf = const_cast( &m_Root ); bRightLeaf = false; while ( pLeaf->is_internal() ) { @@ -1691,7 +1691,7 @@ namespace cds { namespace intrusive { check_deadlock_policy::check(); retired_list updRetire; - update_desc * pOp = null_ptr(); + update_desc * pOp = nullptr; search_result res; { @@ -1729,7 +1729,7 @@ namespace cds { namespace intrusive { cds::unref(f)( *node_traits::to_value_ptr( res.pLeaf )); break; } - pOp = null_ptr(); + pOp = nullptr; } else { // updGP has been changed by CAS @@ -1766,7 +1766,7 @@ namespace cds { namespace intrusive { check_deadlock_policy::check(); retired_list updRetire; - update_desc * pOp = null_ptr(); + update_desc * pOp = nullptr; search_result res; { @@ -1803,7 +1803,7 @@ namespace cds { namespace intrusive { ptr = node_traits::to_value_ptr( res.pLeaf ); break; } - pOp = null_ptr(); + pOp = nullptr; } else { // updGP has been changed by CAS @@ -1828,7 +1828,7 @@ namespace cds { namespace intrusive { check_deadlock_policy::check(); retired_list updRetire; - update_desc * pOp = null_ptr(); + update_desc * pOp = nullptr; search_result res; { @@ -1866,7 +1866,7 @@ namespace cds { namespace intrusive { result = node_traits::to_value_ptr( res.pLeaf ); break; } - pOp = null_ptr(); + pOp = nullptr; } else { // updGP has been changed by CAS @@ -1889,7 +1889,7 @@ namespace cds { namespace intrusive { check_deadlock_policy::check(); retired_list updRetire; - update_desc * pOp = null_ptr(); + update_desc * pOp = nullptr; search_result res; { @@ -1927,7 +1927,7 @@ namespace cds { namespace intrusive { result = node_traits::to_value_ptr( res.pLeaf ); break; } - pOp = null_ptr(); + pOp = nullptr; } else { // updGP has been changed by CAS @@ -1998,7 +1998,7 @@ namespace cds { namespace intrusive { } m_Stat.onFindFailed(); - return null_ptr(); + return nullptr; } diff --git a/cds/intrusive/fcqueue.h b/cds/intrusive/fcqueue.h index ac56e021..87a1436d 100644 --- a/cds/intrusive/fcqueue.h +++ b/cds/intrusive/fcqueue.h @@ -184,7 +184,7 @@ namespace cds { namespace intrusive { value_type * dequeue() { fc_record * pRec = m_FlatCombining.acquire_record(); - pRec->pVal = null_ptr(); + pRec->pVal = nullptr; if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_deq, pRec, *this ); diff --git a/cds/intrusive/fcstack.h b/cds/intrusive/fcstack.h index 517b0208..2cd01940 100644 --- a/cds/intrusive/fcstack.h +++ b/cds/intrusive/fcstack.h @@ -176,7 +176,7 @@ namespace cds { namespace intrusive { value_type * pop() { fc_record * pRec = m_FlatCombining.acquire_record(); - pRec->pVal = null_ptr(); + pRec->pVal = nullptr; if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_pop, pRec, *this ); diff --git a/cds/intrusive/lazy_list_base.h b/cds/intrusive/lazy_list_base.h index ce6ab5a3..c6558cbb 100644 --- a/cds/intrusive/lazy_list_base.h +++ b/cds/intrusive/lazy_list_base.h @@ -50,7 +50,7 @@ namespace cds { namespace intrusive { /// Default ctor node() - : m_pNext( null_ptr()) + : m_pNext( nullptr ) {} }; @@ -177,7 +177,7 @@ namespace cds { namespace intrusive { */ static void is_empty( node_type const * pNode ) { - assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr()); + assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); } }; diff --git a/cds/intrusive/lazy_list_hrc.h b/cds/intrusive/lazy_list_hrc.h index dc0c8eb8..9d397ed0 100644 --- a/cds/intrusive/lazy_list_hrc.h +++ b/cds/intrusive/lazy_list_hrc.h @@ -30,19 +30,19 @@ namespace cds { namespace intrusive { namespace lazy_list { } node() - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) {} protected: virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC ) { - assert( pGC != null_ptr() ); + assert( pGC != nullptr ); typename gc::GuardArray<2> aGuards( *pGC ); while ( true ) { marked_ptr pNextMarked( aGuards.protect( 0, m_pNext )); node * pNext = pNextMarked.ptr(); - if ( pNext != null_ptr() && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + if ( pNext != nullptr && pNext->m_bDeleted.load( CDS_ATOMIC::memory_order_acquire ) ) { marked_ptr p = aGuards.protect( 1, pNext->m_pNext ); m_pNext.compare_exchange_weak( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); continue; diff --git a/cds/intrusive/lazy_list_impl.h b/cds/intrusive/lazy_list_impl.h index e97d4ebd..22923819 100644 --- a/cds/intrusive/lazy_list_impl.h +++ b/cds/intrusive/lazy_list_impl.h @@ -326,7 +326,7 @@ namespace cds { namespace intrusive { void retire_node( node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); gc::template retire( node_traits::to_value_ptr( *pNode ) ); } //@endcond @@ -344,12 +344,12 @@ namespace cds { namespace intrusive { void next() { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); if ( m_pNode ) { typename gc::Guard g; node_type * pCur = node_traits::to_node_ptr( m_pNode ); - if ( pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr() != null_ptr() ) { // if pCur is not tail node + if ( pCur->m_pNext.load( memory_model::memory_order_relaxed ).ptr() != nullptr ) { // if pCur is not tail node node_type * pNext; do { pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr(); @@ -363,7 +363,7 @@ namespace cds { namespace intrusive { void skip_deleted() { - if ( m_pNode != null_ptr() ) { + if ( m_pNode != nullptr ) { typename gc::Guard g; node_type * pNode = node_traits::to_node_ptr( m_pNode ); @@ -390,7 +390,7 @@ namespace cds { namespace intrusive { typedef typename cds::details::make_const_type::reference value_ref; iterator_type() - : m_pNode(null_ptr()) + : m_pNode( nullptr ) {} iterator_type( iterator_type const& src ) @@ -399,7 +399,7 @@ namespace cds { namespace intrusive { m_pNode = m_Guard.assign( src.m_pNode ); } else - m_pNode = null_ptr(); + m_pNode = nullptr; } value_ptr operator ->() const @@ -409,7 +409,7 @@ namespace cds { namespace intrusive { value_ref operator *() const { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); return *m_pNode; } @@ -941,7 +941,7 @@ namespace cds { namespace intrusive { // split-list support bool insert_aux_node( node_type * pHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type @@ -1213,7 +1213,7 @@ namespace cds { namespace intrusive { break; bkoff(); } - assert( pCur.ptr() != null_ptr() ); + assert( pCur.ptr() != nullptr ); } pos.pCur = pCur.ptr(); diff --git a/cds/intrusive/lazy_list_nogc.h b/cds/intrusive/lazy_list_nogc.h index d0e9b5f5..da544691 100644 --- a/cds/intrusive/lazy_list_nogc.h +++ b/cds/intrusive/lazy_list_nogc.h @@ -24,7 +24,7 @@ namespace cds { namespace intrusive { mutable lock_type m_Lock ; ///< Node lock node() - : m_pNext( null_ptr()) + : m_pNext( nullptr ) {} }; } // namespace lazy_list @@ -152,7 +152,7 @@ namespace cds { namespace intrusive { //@cond void clear_links( node_type * pNode ) { - pNode->m_pNext.store( null_ptr(), memory_model::memory_order_relaxed ); + pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed ); } template @@ -189,11 +189,11 @@ namespace cds { namespace intrusive { void next() { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); node_type * pNode = node_traits::to_node_ptr( m_pNode ); node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_relaxed); - if ( pNext != null_ptr() ) + if ( pNext != nullptr ) m_pNode = node_traits::to_value_ptr( pNext ); } @@ -207,7 +207,7 @@ namespace cds { namespace intrusive { typedef typename cds::details::make_const_type::reference value_ref; iterator_type() - : m_pNode(null_ptr()) + : m_pNode( nullptr ) {} iterator_type( const iterator_type& src ) @@ -221,7 +221,7 @@ namespace cds { namespace intrusive { value_ref operator *() const { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); return *m_pNode; } @@ -317,7 +317,7 @@ namespace cds { namespace intrusive { clear(); assert( m_Head.m_pNext.load(memory_model::memory_order_relaxed) == &m_Tail ); - m_Head.m_pNext.store( null_ptr(), memory_model::memory_order_relaxed ); + m_Head.m_pNext.store( nullptr, memory_model::memory_order_relaxed ); } /// Inserts new node @@ -525,8 +525,8 @@ namespace cds { namespace intrusive { // split-list support bool insert_aux_node( node_type * pHead, node_type * pNode ) { - assert( pHead != null_ptr() ); - assert( pNode != null_ptr() ); + assert( pHead != nullptr ); + assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type @@ -628,7 +628,7 @@ namespace cds { namespace intrusive { iterator it = find_at_( pHead, val, cmp ); if ( it != end() ) return &*it; - return null_ptr(); + return nullptr; } template diff --git a/cds/intrusive/lazy_list_rcu.h b/cds/intrusive/lazy_list_rcu.h index 77d8b1d4..32e41b1d 100644 --- a/cds/intrusive/lazy_list_rcu.h +++ b/cds/intrusive/lazy_list_rcu.h @@ -36,7 +36,7 @@ namespace cds { namespace intrusive { /// Default ctor node() - : m_pNext( null_ptr()) + : m_pNext( nullptr ) {} /// Clears internal fields @@ -193,7 +193,7 @@ namespace cds { namespace intrusive { value_type * pFound; get_functor() - : pFound(null_ptr()) + : pFound( nullptr ) {} template @@ -216,7 +216,7 @@ namespace cds { namespace intrusive { struct clear_and_dispose { void operator()( value_type * p ) { - assert( p != null_ptr() ); + assert( p != nullptr ); clear_links( node_traits::to_node_ptr(p)); disposer()( p ); } @@ -267,17 +267,17 @@ namespace cds { namespace intrusive { void next() { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); node_type * pNode = node_traits::to_node_ptr( m_pNode ); node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr(); - if ( pNext != null_ptr() ) + if ( pNext != nullptr ) m_pNode = node_traits::to_value_ptr( pNext ); } void skip_deleted() { - if ( m_pNode != null_ptr() ) { + if ( m_pNode != nullptr ) { node_type * pNode = node_traits::to_node_ptr( m_pNode ); // Dummy tail node could not be marked @@ -300,7 +300,7 @@ namespace cds { namespace intrusive { typedef typename cds::details::make_const_type::reference value_ref; iterator_type() - : m_pNode(null_ptr()) + : m_pNode( nullptr ) {} iterator_type( iterator_type const& src ) @@ -314,7 +314,7 @@ namespace cds { namespace intrusive { value_ref operator *() const { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); return *m_pNode; } @@ -885,8 +885,8 @@ namespace cds { namespace intrusive { // split-list support bool insert_aux_node( node_type * pHead, node_type * pNode ) { - assert( pHead != null_ptr() ); - assert( pNode != null_ptr() ); + assert( pHead != nullptr ); + assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type @@ -1118,7 +1118,7 @@ namespace cds { namespace intrusive { if ( nResult ) { if ( nResult > 0 ) return node_traits::to_value_ptr( pos.pCur ); - return null_ptr(); + return nullptr; } } } @@ -1170,12 +1170,12 @@ namespace cds { namespace intrusive { value_type * get_at( node_type * pHead, Q const& val, Compare cmp ) const { # ifdef CDS_CXX11_LAMBDA_SUPPORT - value_type * pFound = null_ptr(); + value_type * pFound = nullptr; return find_at( pHead, val, cmp, [&pFound](value_type& found, Q const& ) { pFound = &found; } ) - ? pFound : null_ptr(); + ? pFound : nullptr; # else get_functor gf; - return find_at( pHead , val, cmp, cds::ref(gf) ) ? gf.pFound : null_ptr(); + return find_at( pHead, val, cmp, cds::ref( gf ) ) ? gf.pFound : nullptr; # endif } diff --git a/cds/intrusive/michael_deque.h b/cds/intrusive/michael_deque.h index 8989034c..a4e8df61 100644 --- a/cds/intrusive/michael_deque.h +++ b/cds/intrusive/michael_deque.h @@ -390,7 +390,7 @@ namespace cds { namespace intrusive { { void operator()( value_type * p ) { - assert( p != null_ptr()); + assert( p != nullptr ); MichaelDeque::clear_links( node_traits::to_node_ptr(p) ); disposer()( p ); @@ -479,7 +479,7 @@ namespace cds { namespace intrusive { node_type * pNode; at_functor() - : pNode( null_ptr()) + : pNode( nullptr ) {} void operator()( value_type& v, unsigned int nIdx ) @@ -520,7 +520,7 @@ namespace cds { namespace intrusive { # if defined(CDS_CXX11_LAMBDA_SUPPORT) && !((CDS_COMPILER == CDS_COMPILER_MSVC ||CDS_COMPILER == CDS_COMPILER_INTEL) && _MSC_VER < 1700) // MS VC++2010 bug: error C2955: 'cds::intrusive::node_traits' : use of class template requires template argument list // see declaration of 'cds::intrusive::node_traits' - node_type * pNode = null_ptr(); + node_type * pNode = nullptr; if ( m_set.find( nIdx, [&pNode](value_type& v, unsigned int nIdx) { pNode = node_traits::to_node_ptr(v); @@ -533,7 +533,7 @@ namespace cds { namespace intrusive { if ( m_set.find( nIdx, cds::ref(f) )) return f.pNode; # endif - return null_ptr(); + return nullptr; } }; //@endcond @@ -928,7 +928,7 @@ namespace cds { namespace intrusive { return res.pPopped; } - return null_ptr(); + return nullptr; } /// Pop front @@ -945,7 +945,7 @@ namespace cds { namespace intrusive { return res.pPopped; } - return null_ptr(); + return nullptr; } /// Returns deque's item count @@ -976,7 +976,7 @@ namespace cds { namespace intrusive { */ void clear() { - while ( pop_back() != null_ptr() ); + while ( pop_back() != nullptr ); } /// Returns reference to internal statistics diff --git a/cds/intrusive/michael_list_base.h b/cds/intrusive/michael_list_base.h index 96fff3f3..eccb1d49 100644 --- a/cds/intrusive/michael_list_base.h +++ b/cds/intrusive/michael_list_base.h @@ -36,7 +36,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container CDS_CONSTEXPR node() CDS_NOEXCEPT - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) {} }; @@ -129,7 +129,7 @@ namespace cds { namespace intrusive { */ static void is_empty( const node_type * pNode ) { - assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ); + assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); } }; diff --git a/cds/intrusive/michael_list_hrc.h b/cds/intrusive/michael_list_hrc.h index bded07f9..b2aea04f 100644 --- a/cds/intrusive/michael_list_hrc.h +++ b/cds/intrusive/michael_list_hrc.h @@ -20,7 +20,7 @@ namespace cds { namespace intrusive { namespace michael_list { atomic_marked_ptr m_pNext ; ///< pointer to the next node in the stack node() - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) {} protected: diff --git a/cds/intrusive/michael_list_impl.h b/cds/intrusive/michael_list_impl.h index bf6a1b9a..479690f1 100644 --- a/cds/intrusive/michael_list_impl.h +++ b/cds/intrusive/michael_list_impl.h @@ -260,13 +260,13 @@ namespace cds { namespace intrusive { //@cond void retire_node( node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); gc::template retire( node_traits::to_value_ptr( *pNode ) ); } bool link_node( node_type * pNode, position& pos ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); link_checker::is_empty( pNode ); marked_node_ptr cur(pos.pCur); @@ -276,8 +276,8 @@ namespace cds { namespace intrusive { bool unlink_node( position& pos ) { - assert( pos.pPrev != null_ptr() ); - assert( pos.pCur != null_ptr() ); + assert( pos.pPrev != nullptr ); + assert( pos.pCur != nullptr ); // Mark the node (logical deleting) marked_node_ptr next(pos.pNext, 0); @@ -320,7 +320,7 @@ namespace cds { namespace intrusive { m_pNode = m_Guard.assign( g.template get() ); } else { - m_pNode = null_ptr(); + m_pNode = nullptr; m_Guard.clear(); } } @@ -334,7 +334,7 @@ namespace cds { namespace intrusive { m_pNode = m_Guard.assign( node_traits::to_value_ptr( p.ptr() ) ); } else { - m_pNode = null_ptr(); + m_pNode = nullptr; m_Guard.clear(); } if ( p == pNode.load(memory_model::memory_order_acquire) ) @@ -347,7 +347,7 @@ namespace cds { namespace intrusive { typedef typename cds::details::make_const_type::reference value_ref; iterator_type() - : m_pNode( null_ptr() ) + : m_pNode( nullptr ) {} iterator_type( iterator_type const& src ) @@ -356,7 +356,7 @@ namespace cds { namespace intrusive { m_pNode = m_Guard.assign( src.m_pNode ); } else - m_pNode = null_ptr(); + m_pNode = nullptr; } value_ptr operator ->() const @@ -366,7 +366,7 @@ namespace cds { namespace intrusive { value_ref operator *() const { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); return *m_pNode; } @@ -504,7 +504,7 @@ namespace cds { namespace intrusive { public: /// Default constructor initializes empty list MichaelList() - : m_pHead(null_ptr()) + : m_pHead( nullptr ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); } @@ -875,7 +875,7 @@ namespace cds { namespace intrusive { if ( head.ptr() ) guard.assign( node_traits::to_value_ptr( *head.ptr() )); if ( m_pHead.load(memory_model::memory_order_acquire) == head ) { - if ( head.ptr() == null_ptr() ) + if ( head.ptr() == nullptr ) break; value_type& val = *node_traits::to_value_ptr( *head.ptr() ); unlink( val ); @@ -886,7 +886,7 @@ namespace cds { namespace intrusive { /// Checks if the list is empty bool empty() const { - return m_pHead.load(memory_model::memory_order_relaxed).all() == null_ptr(); + return m_pHead.load( memory_model::memory_order_relaxed ).all() == nullptr; } /// Returns list's item count @@ -913,7 +913,7 @@ namespace cds { namespace intrusive { // split-list support bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type @@ -1112,7 +1112,7 @@ namespace cds { namespace intrusive { try_again: pPrev = &refHead; - pNext = null_ptr(); + pNext = nullptr; pCur = pPrev->load(memory_model::memory_order_relaxed); pos.guards.assign( position::guard_current_item, node_traits::to_value_ptr( pCur.ptr() ) ); @@ -1120,7 +1120,7 @@ try_again: goto try_again; while ( true ) { - if ( pCur.ptr() == null_ptr() ) { + if ( pCur.ptr() == nullptr ) { pos.pPrev = pPrev; pos.pCur = pCur.ptr(); pos.pNext = pNext.ptr(); @@ -1152,7 +1152,7 @@ try_again: } } else { - assert( pCur.ptr() != null_ptr() ); + assert( pCur.ptr() != nullptr ); int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); if ( nCmp >= 0 ) { pos.pPrev = pPrev; diff --git a/cds/intrusive/michael_list_nogc.h b/cds/intrusive/michael_list_nogc.h index 107c04b5..7b8b7990 100644 --- a/cds/intrusive/michael_list_nogc.h +++ b/cds/intrusive/michael_list_nogc.h @@ -25,7 +25,7 @@ namespace cds { namespace intrusive { atomic_ptr m_pNext ; ///< pointer to the next node in the container node() - : m_pNext( null_ptr()) + : m_pNext( nullptr ) {} }; @@ -99,7 +99,7 @@ namespace cds { namespace intrusive { //@cond void clear_links( node_type * pNode ) { - pNode->m_pNext.store( null_ptr(), memory_model::memory_order_release ); + pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); } template @@ -117,7 +117,7 @@ namespace cds { namespace intrusive { bool link_node( node_type * pNode, position& pos ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); link_checker::is_empty( pNode ); pNode->m_pNext.store( pos.pCur, memory_model::memory_order_relaxed ); @@ -140,7 +140,7 @@ namespace cds { namespace intrusive { if ( pNode ) m_pNode = node_traits::to_value_ptr( *pNode ); else - m_pNode = null_ptr(); + m_pNode = nullptr; } } @@ -150,7 +150,7 @@ namespace cds { namespace intrusive { if ( pNode ) m_pNode = node_traits::to_value_ptr( *pNode ); else - m_pNode = null_ptr(); + m_pNode = nullptr; } explicit iterator_type( atomic_node_ptr const& refNode) { @@ -158,7 +158,7 @@ namespace cds { namespace intrusive { if ( pNode ) m_pNode = node_traits::to_value_ptr( *pNode ); else - m_pNode = null_ptr(); + m_pNode = nullptr; } public: @@ -166,7 +166,7 @@ namespace cds { namespace intrusive { typedef typename cds::details::make_const_type::reference value_ref; iterator_type() - : m_pNode(null_ptr()) + : m_pNode( nullptr ) {} iterator_type( const iterator_type& src ) @@ -180,7 +180,7 @@ namespace cds { namespace intrusive { value_ref operator *() const { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); return *m_pNode; } @@ -271,7 +271,7 @@ namespace cds { namespace intrusive { public: /// Default constructor initializes empty list MichaelList() - : m_pHead( null_ptr()) + : m_pHead( nullptr ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); } @@ -441,7 +441,7 @@ namespace cds { namespace intrusive { void clear( Disposer disp ) { node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); - do {} while ( !m_pHead.compare_exchange_weak( pHead, null_ptr(), memory_model::memory_order_relaxed )); + do {} while ( !m_pHead.compare_exchange_weak( pHead, nullptr, memory_model::memory_order_relaxed ) ); while ( pHead ) { node_type * p = pHead->m_pNext.load(memory_model::memory_order_relaxed); @@ -463,7 +463,7 @@ namespace cds { namespace intrusive { /// Checks if the list is empty bool empty() const { - return m_pHead.load(memory_model::memory_order_relaxed) == null_ptr(); + return m_pHead.load( memory_model::memory_order_relaxed ) == nullptr; } /// Returns list's item count @@ -490,7 +490,7 @@ namespace cds { namespace intrusive { // split-list support bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type @@ -558,7 +558,7 @@ namespace cds { namespace intrusive { position pos; if ( search( refHead, val, cmp, pos ) ) { - assert( pos.pCur != null_ptr() ); + assert( pos.pCur != nullptr ); unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val ); return true; } @@ -571,7 +571,7 @@ namespace cds { namespace intrusive { iterator it = find_at_( refHead, val, cmp ); if ( it != end() ) return &*it; - return null_ptr(); + return nullptr; } template @@ -580,7 +580,7 @@ namespace cds { namespace intrusive { position pos; if ( search( refHead, val, cmp, pos ) ) { - assert( pos.pCur != null_ptr() ); + assert( pos.pCur != nullptr ); return iterator( pos.pCur ); } return end(); @@ -603,7 +603,7 @@ namespace cds { namespace intrusive { try_again: pPrev = &refHead; pCur = pPrev->load(memory_model::memory_order_acquire); - pNext = null_ptr(); + pNext = nullptr; while ( true ) { if ( !pCur ) { @@ -624,7 +624,7 @@ namespace cds { namespace intrusive { goto try_again; } - assert( pCur != null_ptr() ); + assert( pCur != nullptr ); int nCmp = cmp( *node_traits::to_value_ptr( *pCur ), val ); if ( nCmp >= 0 ) { pos.pPrev = pPrev; diff --git a/cds/intrusive/michael_list_rcu.h b/cds/intrusive/michael_list_rcu.h index 61258440..f54c1f21 100644 --- a/cds/intrusive/michael_list_rcu.h +++ b/cds/intrusive/michael_list_rcu.h @@ -117,7 +117,7 @@ namespace cds { namespace intrusive { value_type * pFound; get_functor() - : pFound(null_ptr()) + : pFound( nullptr ) {} template @@ -137,7 +137,7 @@ namespace cds { namespace intrusive { struct clear_and_dispose { void operator()( value_type * p ) { - assert( p != null_ptr() ); + assert( p != nullptr ); clear_links( node_traits::to_node_ptr(p)); disposer()( p ); } @@ -160,7 +160,7 @@ namespace cds { namespace intrusive { bool link_node( node_type * pNode, position& pos ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); link_checker::is_empty( pNode ); marked_node_ptr p( pos.pCur ); @@ -195,7 +195,7 @@ namespace cds { namespace intrusive { { if ( m_pNode ) { node_type * p = node_traits::to_node_ptr( *m_pNode )->m_pNext.load(memory_model::memory_order_relaxed).ptr(); - m_pNode = p ? node_traits::to_value_ptr(p) : null_ptr(); + m_pNode = p ? node_traits::to_value_ptr( p ) : nullptr; } } @@ -205,12 +205,12 @@ namespace cds { namespace intrusive { if ( pNode ) m_pNode = node_traits::to_value_ptr( *pNode ); else - m_pNode = null_ptr(); + m_pNode = nullptr; } explicit iterator_type( atomic_node_ptr const& refNode) { node_type * pNode = refNode.load(memory_model::memory_order_relaxed).ptr(); - m_pNode = pNode ? node_traits::to_value_ptr( *pNode ) : null_ptr(); + m_pNode = pNode ? node_traits::to_value_ptr( *pNode ) : nullptr; } public: @@ -218,7 +218,7 @@ namespace cds { namespace intrusive { typedef typename cds::details::make_const_type::reference value_ref; iterator_type() - : m_pNode(null_ptr()) + : m_pNode( nullptr ) {} iterator_type( const iterator_type& src ) @@ -232,7 +232,7 @@ namespace cds { namespace intrusive { value_ref operator *() const { - assert( m_pNode != null_ptr() ); + assert( m_pNode != nullptr ); return *m_pNode; } @@ -323,7 +323,7 @@ namespace cds { namespace intrusive { public: /// Default constructor initializes empty list MichaelList() - : m_pHead( null_ptr()) + : m_pHead( nullptr ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); } @@ -761,7 +761,7 @@ namespace cds { namespace intrusive { /// Check if the list is empty bool empty() const { - return m_pHead.load(memory_model::memory_order_relaxed).all() == null_ptr(); + return m_pHead.load( memory_model::memory_order_relaxed ).all() == nullptr; } /// Returns list's item count @@ -788,7 +788,7 @@ namespace cds { namespace intrusive { // split-list support bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); // Hack: convert node_type to value_type. // In principle, auxiliary node can be non-reducible to value_type @@ -956,7 +956,7 @@ namespace cds { namespace intrusive { for (;;) { if ( !search( refHead, val, pos, cmp ) ) - return null_ptr(); + return nullptr; if ( !unlink_node( pos )) { bkoff(); continue; @@ -974,7 +974,7 @@ namespace cds { namespace intrusive { rcu_lock l( bLock ); if ( search( refHead, val, pos, cmp ) ) { - assert( pos.pCur != null_ptr() ); + assert( pos.pCur != nullptr ); unref(f)( *node_traits::to_value_ptr( *pos.pCur ), val ); return true; } @@ -992,14 +992,14 @@ namespace cds { namespace intrusive { value_type * get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) const { # ifdef CDS_CXX11_LAMBDA_SUPPORT - value_type * pFound = null_ptr(); + value_type * pFound = nullptr; return find_at( refHead, val, cmp, [&pFound](value_type& found, Q const& ) { pFound = &found; } ) - ? pFound : null_ptr(); + ? pFound : nullptr; # else get_functor gf; return find_at( refHead, val, cmp, cds::ref(gf) ) - ? gf.pFound : null_ptr(); + ? gf.pFound : nullptr; # endif } @@ -1010,7 +1010,7 @@ namespace cds { namespace intrusive { position pos; if ( search( refHead, val, pos, cmp ) ) { - assert( pos.pCur != null_ptr() ); + assert( pos.pCur != nullptr ); return const_iterator( pos.pCur ); } return end(); @@ -1036,7 +1036,7 @@ namespace cds { namespace intrusive { try_again: pPrev = &refHead; pCur = pPrev->load(memory_model::memory_order_acquire); - pNext = null_ptr(); + pNext = nullptr; while ( true ) { if ( !pCur.ptr() ) { @@ -1060,7 +1060,7 @@ namespace cds { namespace intrusive { goto try_again; } - assert( pCur.ptr() != null_ptr() ); + assert( pCur.ptr() != nullptr ); int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val ); if ( nCmp >= 0 ) { pos.pPrev = pPrev; diff --git a/cds/intrusive/michael_set_base.h b/cds/intrusive/michael_set_base.h index 61f9e78a..abd6169c 100644 --- a/cds/intrusive/michael_set_base.h +++ b/cds/intrusive/michael_set_base.h @@ -128,9 +128,9 @@ namespace cds { namespace intrusive { public: iterator() - : m_pCurBucket( null_ptr() ) + : m_pCurBucket( nullptr ) , m_itList() - , m_pEndBucket( null_ptr() ) + , m_pEndBucket( nullptr ) {} iterator( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) @@ -150,13 +150,13 @@ namespace cds { namespace intrusive { value_ptr operator ->() const { - assert( m_pCurBucket != null_ptr() ); + assert( m_pCurBucket != nullptr ); return m_itList.operator ->(); } value_ref operator *() const { - assert( m_pCurBucket != null_ptr() ); + assert( m_pCurBucket != nullptr ); return m_itList.operator *(); } @@ -177,7 +177,7 @@ namespace cds { namespace intrusive { bucket_ptr bucket() const { - return m_pCurBucket != m_pEndBucket ? m_pCurBucket : null_ptr(); + return m_pCurBucket != m_pEndBucket ? m_pCurBucket : nullptr; } template diff --git a/cds/intrusive/moir_queue.h b/cds/intrusive/moir_queue.h index 29e08121..0ccbbdc3 100644 --- a/cds/intrusive/moir_queue.h +++ b/cds/intrusive/moir_queue.h @@ -123,7 +123,7 @@ namespace cds { namespace intrusive { h = res.guards.protect( 0, base_class::m_pHead, node_to_value() ); pNext = res.guards.protect( 1, h->m_pNext, node_to_value() ); - if ( pNext == null_ptr() ) + if ( pNext == nullptr ) return false ; // queue is empty if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { @@ -158,7 +158,7 @@ namespace cds { namespace intrusive { base_class::dispose_result( res ); return node_traits::to_value_ptr( *res.pNext ); } - return null_ptr(); + return nullptr; } /// Synonym for \ref cds_intrusive_MoirQueue_dequeue "dequeue" function diff --git a/cds/intrusive/mspriority_queue.h b/cds/intrusive/mspriority_queue.h index fa651cc7..f287f7bc 100644 --- a/cds/intrusive/mspriority_queue.h +++ b/cds/intrusive/mspriority_queue.h @@ -186,7 +186,7 @@ namespace cds { namespace intrusive { /// Creates empty node node() - : m_pVal( null_ptr() ) + : m_pVal( nullptr ) , m_nTag( tag_type(Empty) ) {} @@ -311,7 +311,7 @@ namespace cds { namespace intrusive { m_Lock.unlock(); refBottom.m_nTag = tag_type(Empty); value_type * pVal = refBottom.m_pVal; - refBottom.m_pVal = null_ptr(); + refBottom.m_pVal = nullptr; refBottom.unlock(); node& refTop = m_Heap[ 1 ]; diff --git a/cds/intrusive/msqueue.h b/cds/intrusive/msqueue.h index aa37ba5b..c27514f3 100644 --- a/cds/intrusive/msqueue.h +++ b/cds/intrusive/msqueue.h @@ -155,7 +155,7 @@ namespace cds { namespace intrusive { { void operator()( value_type * p ) { - assert( p != null_ptr()); + assert( p != nullptr ); MSQueue::clear_links( node_traits::to_node_ptr(p) ); disposer()( p ); @@ -199,7 +199,7 @@ namespace cds { namespace intrusive { if ( m_pHead.load(memory_model::memory_order_acquire) != h ) continue; - if ( pNext == null_ptr() ) + if ( pNext == nullptr ) return false ; // empty queue node_type * t = m_pTail.load(memory_model::memory_order_acquire); @@ -227,7 +227,7 @@ namespace cds { namespace intrusive { static void clear_links( node_type * pNode ) { - pNode->m_pNext.store( null_ptr(), memory_model::memory_order_release ); + pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); } void dispose_result( dequeue_result& res ) @@ -256,8 +256,8 @@ namespace cds { namespace intrusive { public: /// Initializes empty queue MSQueue() - : m_pHead( null_ptr() ) - , m_pTail( null_ptr() ) + : m_pHead( nullptr ) + , m_pTail( nullptr ) { // GC and node_type::gc must be the same static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); @@ -288,11 +288,11 @@ namespace cds { namespace intrusive { node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); assert( pHead == m_pTail.load(memory_model::memory_order_relaxed) ); - m_pHead.store( null_ptr(), memory_model::memory_order_relaxed ); - m_pTail.store( null_ptr(), memory_model::memory_order_relaxed ); + m_pHead.store( nullptr, memory_model::memory_order_relaxed ); + m_pTail.store( nullptr, memory_model::memory_order_relaxed ); dispose_node( pHead ); } @@ -333,14 +333,14 @@ namespace cds { namespace intrusive { t = guard.protect( m_pTail, node_to_value() ); node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire); - if ( pNext != null_ptr() ) { + if ( pNext != nullptr ) { // Tail is misplaced, advance it m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); m_Stat.onBadTail(); continue; } - node_type * tmp = null_ptr(); + node_type * tmp = nullptr; if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) break; @@ -388,7 +388,7 @@ namespace cds { namespace intrusive { return node_traits::to_value_ptr( *res.pNext ); } - return null_ptr(); + return nullptr; } /// Synonym for \ref cds_intrusive_MSQueue_enqueue "enqueue" function @@ -407,7 +407,7 @@ namespace cds { namespace intrusive { bool empty() const { typename gc::Guard guard; - return guard.protect( m_pHead, node_to_value() )->m_pNext.load(memory_model::memory_order_relaxed) == null_ptr(); + return guard.protect( m_pHead, node_to_value() )->m_pNext.load( memory_model::memory_order_relaxed ) == nullptr; } /// Clear the queue diff --git a/cds/intrusive/optimistic_queue.h b/cds/intrusive/optimistic_queue.h index eed476bc..0bb895f3 100644 --- a/cds/intrusive/optimistic_queue.h +++ b/cds/intrusive/optimistic_queue.h @@ -37,8 +37,8 @@ namespace cds { namespace intrusive { atomic_node_ptr m_pNext ; ///< Pointer to next node CDS_CONSTEXPR node() CDS_NOEXCEPT - : m_pPrev( null_ptr() ) - , m_pNext( null_ptr() ) + : m_pPrev( nullptr ) + , m_pNext( nullptr ) {} }; @@ -118,8 +118,8 @@ namespace cds { namespace intrusive { */ static void is_empty( const node_type * pNode ) { - assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ); - assert( pNode->m_pPrev.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ); + assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); + assert( pNode->m_pPrev.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); } }; @@ -353,7 +353,7 @@ namespace cds { namespace intrusive { { void operator ()( value_type * p ) { - assert( p != null_ptr()); + assert( p != nullptr ); OptimisticQueue::clear_links( node_traits::to_node_ptr(*p) ); disposer()( p ); @@ -377,8 +377,8 @@ namespace cds { namespace intrusive { //@cond static void clear_links( node_type * pNode ) { - pNode->m_pNext.store( null_ptr(), memory_model::memory_order_release ); - pNode->m_pPrev.store( null_ptr(), memory_model::memory_order_release ); + pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); + pNode->m_pPrev.store( nullptr, memory_model::memory_order_release ); } struct dequeue_result { @@ -398,12 +398,12 @@ namespace cds { namespace intrusive { while ( true ) { // Try till success or empty pHead = res.guards.protect( 0, m_pHead, node_to_value() ); pTail = res.guards.protect( 1, m_pTail, node_to_value() ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, node_to_value() ); if ( pHead == m_pHead.load(memory_model::memory_order_relaxed)) { if ( pTail != pHead ) { - if ( pFirstNodePrev == null_ptr() + if ( pFirstNodePrev == nullptr || pFirstNodePrev->m_pNext.load(memory_model::memory_order_relaxed) != pHead ) { fix_list( pTail, pHead ); @@ -462,7 +462,7 @@ namespace cds { namespace intrusive { void dispose_node( node_type * p ) { - assert( p != null_ptr()); + assert( p != nullptr ); if ( p != &m_Dummy ) { gc::template retire( node_traits::to_value_ptr(p) ); @@ -474,8 +474,8 @@ namespace cds { namespace intrusive { public: /// Constructor creates empty queue OptimisticQueue() - : m_pTail( null_ptr() ) - , m_pHead( null_ptr() ) + : m_pTail( nullptr ) + , m_pHead( nullptr ) { // GC and node_type::gc must be the same static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); @@ -493,10 +493,10 @@ namespace cds { namespace intrusive { node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); CDS_DEBUG_DO( node_type * pTail = m_pTail.load(memory_model::memory_order_relaxed); ) CDS_DEBUG_DO( assert( pHead == pTail ); ) - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); - m_pHead.store( null_ptr(), memory_model::memory_order_relaxed ); - m_pTail.store( null_ptr(), memory_model::memory_order_relaxed ); + m_pHead.store( nullptr, memory_model::memory_order_relaxed ); + m_pTail.store( nullptr, memory_model::memory_order_relaxed ); dispose_node( pHead ); } @@ -559,7 +559,7 @@ namespace cds { namespace intrusive { return node_traits::to_value_ptr( *res.pNext ); } - return null_ptr(); + return nullptr; } /// Synonym for @ref cds_intrusive_OptimisticQueue_enqueue "enqueue" @@ -589,7 +589,7 @@ namespace cds { namespace intrusive { void clear() { value_type * pv; - while ( (pv = dequeue()) != null_ptr() ); + while ( (pv = dequeue()) != nullptr ); } /// Returns queue's item count diff --git a/cds/intrusive/segmented_queue.h b/cds/intrusive/segmented_queue.h index 0900d9f1..2fb03952 100644 --- a/cds/intrusive/segmented_queue.h +++ b/cds/intrusive/segmented_queue.h @@ -257,7 +257,7 @@ namespace cds { namespace intrusive { { void operator()( segment * pSegment ) { - assert( pSegment != null_ptr()); + assert( pSegment != nullptr ); free_segment( pSegment ); } }; @@ -266,15 +266,15 @@ namespace cds { namespace intrusive { { void operator()( segment * pSegment ) { - assert( pSegment != null_ptr()); + assert( pSegment != nullptr ); retire_segment( pSegment ); } }; public: segment_list( size_t nQuasiFactor, stat& st ) - : m_pHead( null_ptr() ) - , m_pTail( null_ptr() ) + : m_pHead( nullptr ) + , m_pTail( nullptr ) , m_nQuasiFactor( nQuasiFactor ) , m_Stat( st ) { @@ -355,9 +355,9 @@ namespace cds { namespace intrusive { scoped_lock l( m_Lock ); if ( m_List.empty() ) { - m_pTail.store( null_ptr(), memory_model::memory_order_relaxed ); - m_pHead.store( null_ptr(), memory_model::memory_order_relaxed ); - return guard.assign( null_ptr() ); + m_pTail.store( nullptr, memory_model::memory_order_relaxed ); + m_pHead.store( nullptr, memory_model::memory_order_relaxed ); + return guard.assign( nullptr ); } if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) { @@ -369,8 +369,8 @@ namespace cds { namespace intrusive { m_List.pop_front(); if ( m_List.empty() ) { - pRet = guard.assign( null_ptr() ); - m_pTail.store( null_ptr(), memory_model::memory_order_relaxed ); + pRet = guard.assign( nullptr ); + m_pTail.store( nullptr, memory_model::memory_order_relaxed ); } else pRet = guard.assign( &m_List.front() ); @@ -529,7 +529,7 @@ namespace cds { namespace intrusive { assert( pVal ); return pVal; } - return null_ptr(); + return nullptr; } diff --git a/cds/intrusive/single_link_struct.h b/cds/intrusive/single_link_struct.h index c17f342e..eea61765 100644 --- a/cds/intrusive/single_link_struct.h +++ b/cds/intrusive/single_link_struct.h @@ -38,7 +38,7 @@ namespace cds { namespace intrusive { atomic_node_ptr m_pNext ; ///< pointer to the next node in the container node() - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) {} }; @@ -54,13 +54,13 @@ namespace cds { namespace intrusive { atomic_node_ptr m_pNext ; ///< pointer to the next node in the container node() - : m_pNext(null_ptr()) + : m_pNext( nullptr ) {} protected: virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC ) { - assert( pGC != null_ptr() ); + assert( pGC != nullptr ); typename gc::GuardArray<2> aGuards( *pGC ); while ( true ) { @@ -80,10 +80,10 @@ namespace cds { namespace intrusive { { if ( bConcurrent ) { node * pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed); - do {} while ( !m_pNext.compare_exchange_weak( pNext, null_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + do {} while ( !m_pNext.compare_exchange_weak( pNext, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); } else { - m_pNext.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); } } }; @@ -166,7 +166,7 @@ namespace cds { namespace intrusive { */ static void is_empty( const node_type * pNode ) { - assert( pNode->m_pNext.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ); + assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); } }; diff --git a/cds/intrusive/skip_list_base.h b/cds/intrusive/skip_list_base.h index b715e619..9715c43a 100644 --- a/cds/intrusive/skip_list_base.h +++ b/cds/intrusive/skip_list_base.h @@ -45,17 +45,17 @@ namespace cds { namespace intrusive { public: /// Constructs a node of height 1 (a bottom-list node) node() - : m_pNext( null_ptr()) + : m_pNext( nullptr ) , m_nHeight(1) - , m_arrNext( null_ptr()) + , m_arrNext( nullptr ) {} /// Constructs a node of height \p nHeight void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) { assert( nHeight > 0 ); - assert( ( nHeight == 1 && nextTower == null_ptr() ) // bottom-list node - || ( nHeight > 1 && nextTower != null_ptr() ) // node at level of more than 0 + assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node + || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 ); m_arrNext = nextTower; @@ -66,7 +66,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr * release_tower() { atomic_marked_ptr * pTower = m_arrNext; - m_arrNext = null_ptr(); + m_arrNext = nullptr; m_nHeight = 1; return pTower; } @@ -81,7 +81,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr& next( unsigned int nLevel ) { assert( nLevel < height() ); - assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr() )); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } @@ -90,7 +90,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr const& next( unsigned int nLevel ) const { assert( nLevel < height() ); - assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr() ); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } @@ -116,7 +116,7 @@ namespace cds { namespace intrusive { /// Clears internal links void clear() { - assert( m_arrNext == null_ptr()); + assert( m_arrNext == nullptr ); m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release ); } @@ -124,7 +124,7 @@ namespace cds { namespace intrusive { bool is_cleared() const { return m_pNext == atomic_marked_ptr() - && m_arrNext == null_ptr() + && m_arrNext == nullptr && m_nHeight <= 1 ; } @@ -617,7 +617,7 @@ namespace cds { namespace intrusive { static node_type * make_tower( node_type * pNode, unsigned int nHeight ) { if ( nHeight > 1 ) - pNode->make_tower( nHeight, tower_allocator().NewArray( nHeight - 1, null_ptr() )); + pNode->make_tower( nHeight, tower_allocator().NewArray( nHeight - 1, nullptr ) ); return pNode; } diff --git a/cds/intrusive/skip_list_hrc.h b/cds/intrusive/skip_list_hrc.h index afd4df43..15973a94 100644 --- a/cds/intrusive/skip_list_hrc.h +++ b/cds/intrusive/skip_list_hrc.h @@ -31,9 +31,9 @@ namespace cds { namespace intrusive { namespace skip_list { public: /// Constructs a node of height 1 (a bottom-list node) node() - : m_pNext( null_ptr()) + : m_pNext( nullptr ) , m_nHeight(1) - , m_arrNext( null_ptr()) + , m_arrNext( nullptr ) , m_bDel( false ) {} @@ -47,8 +47,8 @@ namespace cds { namespace intrusive { namespace skip_list { void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) { assert( nHeight > 0 ); - assert( ( nHeight == 1 && nextTower == null_ptr() ) // bottom-list node - || ( nHeight > 1 && nextTower != null_ptr() ) // node at level of more than 0 + assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node + || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 ); m_arrNext = nextTower; @@ -60,7 +60,7 @@ namespace cds { namespace intrusive { namespace skip_list { unsigned int nHeight = m_nHeight - 1; atomic_marked_ptr * pTower = m_arrNext; if ( pTower ) { - m_arrNext = null_ptr(); + m_arrNext = nullptr; m_nHeight = 1; for ( unsigned int i = 0; i < nHeight; ++i ) pTower[i].store( marked_ptr(), CDS_ATOMIC::memory_order_release ); @@ -77,7 +77,7 @@ namespace cds { namespace intrusive { namespace skip_list { atomic_marked_ptr& next( unsigned int nLevel ) { assert( nLevel < height() ); - assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr() )); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } @@ -86,7 +86,7 @@ namespace cds { namespace intrusive { namespace skip_list { atomic_marked_ptr const& next( unsigned int nLevel ) const { assert( nLevel < height() ); - assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr()) ); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } diff --git a/cds/intrusive/skip_list_impl.h b/cds/intrusive/skip_list_impl.h index bda9e0fa..386e3073 100644 --- a/cds/intrusive/skip_list_impl.h +++ b/cds/intrusive/skip_list_impl.h @@ -75,7 +75,7 @@ namespace cds { namespace intrusive { public: // for internal use only!!! iterator( node_type& refHead ) - : m_pNode( null_ptr() ) + : m_pNode( nullptr ) { back_off bkoff; @@ -100,7 +100,7 @@ namespace cds { namespace intrusive { public: iterator() - : m_pNode( null_ptr()) + : m_pNode( nullptr ) {} iterator( iterator const& s) @@ -111,16 +111,16 @@ namespace cds { namespace intrusive { value_type * operator ->() const { - assert( m_pNode != null_ptr< node_type *>() ); - assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return node_traits::to_value_ptr( m_pNode ); } value_ref operator *() const { - assert( m_pNode != null_ptr< node_type *>() ); - assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return *node_traits::to_value_ptr( m_pNode ); } @@ -506,7 +506,7 @@ namespace cds { namespace intrusive { static void dispose_node( value_type * pVal ) { - assert( pVal != null_ptr() ); + assert( pVal != nullptr ); typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) ); disposer()( pVal ); } @@ -535,7 +535,7 @@ namespace cds { namespace intrusive { goto retry; } - if ( pCur.ptr() == null_ptr()) { + if ( pCur.ptr() == nullptr ) { // end of the list at level nLevel - goto next level break; } @@ -632,7 +632,7 @@ namespace cds { namespace intrusive { pos.pSucc[ nLevel ] = pCur.ptr(); } - return (pos.pCur = pCur.ptr()) != null_ptr(); + return (pos.pCur = pCur.ptr()) != nullptr; } bool find_max_position( position& pos ) @@ -657,7 +657,7 @@ namespace cds { namespace intrusive { goto retry; } - if ( pCur.ptr() == null_ptr()) { + if ( pCur.ptr() == nullptr ) { // end of the list at level nLevel - goto next level break; } @@ -694,7 +694,7 @@ namespace cds { namespace intrusive { pos.pSucc[ nLevel ] = pCur.ptr(); } - return (pos.pCur = pCur.ptr()) != null_ptr(); + return (pos.pCur = pCur.ptr()) != nullptr; } template @@ -744,7 +744,7 @@ namespace cds { namespace intrusive { template bool try_remove_at( node_type * pDel, position& pos, Func f ) { - assert( pDel != null_ptr()); + assert( pDel != nullptr ); marked_node_ptr pSucc; typename gc::Guard gSucc; @@ -1151,7 +1151,7 @@ namespace cds { namespace intrusive { node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; position pos; @@ -1223,7 +1223,7 @@ namespace cds { namespace intrusive { node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; # ifndef CDS_CXX11_LAMBDA_SUPPORT @@ -1707,7 +1707,7 @@ namespace cds { namespace intrusive { /// Checks if the set is empty bool empty() const { - return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr(); + return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; } /// Clears the set (non-atomic) diff --git a/cds/intrusive/skip_list_nogc.h b/cds/intrusive/skip_list_nogc.h index b3d2ed41..98daa06e 100644 --- a/cds/intrusive/skip_list_nogc.h +++ b/cds/intrusive/skip_list_nogc.h @@ -33,17 +33,17 @@ namespace cds { namespace intrusive { public: /// Constructs a node of height 1 (a bottom-list node) node() - : m_pNext( null_ptr()) + : m_pNext( nullptr ) , m_nHeight(1) - , m_arrNext( null_ptr()) + , m_arrNext( nullptr ) {} /// Constructs a node of height \p nHeight void make_tower( unsigned int nHeight, atomic_ptr * nextTower ) { assert( nHeight > 0 ); - assert( ( nHeight == 1 && nextTower == null_ptr() ) // bottom-list node - || ( nHeight > 1 && nextTower != null_ptr() ) // node at level of more than 0 + assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node + || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 ); m_arrNext = nextTower; @@ -53,7 +53,7 @@ namespace cds { namespace intrusive { atomic_ptr * release_tower() { atomic_ptr * pTower = m_arrNext; - m_arrNext = null_ptr(); + m_arrNext = nullptr; m_nHeight = 1; return pTower; } @@ -67,7 +67,7 @@ namespace cds { namespace intrusive { atomic_ptr& next( unsigned int nLevel ) { assert( nLevel < height() ); - assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr() )); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } @@ -76,7 +76,7 @@ namespace cds { namespace intrusive { atomic_ptr const& next( unsigned int nLevel ) const { assert( nLevel < height() ); - assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr() ); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } @@ -102,14 +102,14 @@ namespace cds { namespace intrusive { /// Clears internal links void clear() { - assert( m_arrNext == null_ptr()); - m_pNext.store( null_ptr(), CDS_ATOMIC::memory_order_release ); + assert( m_arrNext == nullptr ); + m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release ); } bool is_cleared() const { - return m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == null_ptr() - && m_arrNext == null_ptr() + return m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr + && m_arrNext == nullptr && m_nHeight <= 1 ; } @@ -149,7 +149,7 @@ namespace cds { namespace intrusive { public: iterator() - : m_pNode( null_ptr()) + : m_pNode( nullptr ) {} iterator( iterator const& s) @@ -158,16 +158,16 @@ namespace cds { namespace intrusive { value_type * operator ->() const { - assert( m_pNode != null_ptr< node_type *>() ); - assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return node_traits::to_value_ptr( m_pNode ); } value_ref operator *() const { - assert( m_pNode != null_ptr< node_type *>() ); - assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return *node_traits::to_value_ptr( m_pNode ); } @@ -443,7 +443,7 @@ namespace cds { namespace intrusive { head_node( unsigned int nHeight ) { for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) - m_Tower[i].store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed ); node_type::make_tower( nHeight, m_Tower ); } @@ -456,8 +456,8 @@ namespace cds { namespace intrusive { void clear() { for (unsigned int i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) - m_Tower[i].store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); - node_type::m_pNext.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + node_type::m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); } }; //@endcond @@ -487,7 +487,7 @@ namespace cds { namespace intrusive { static void dispose_node( node_type * pNode ) { - assert( pNode != null_ptr() ); + assert( pNode != nullptr ); typename node_builder::node_disposer()( pNode ); disposer()( node_traits::to_value_ptr( pNode )); } @@ -497,7 +497,7 @@ namespace cds { namespace intrusive { { node_type * pPred; node_type * pSucc; - node_type * pCur = null_ptr(); + node_type * pCur = nullptr; int nCmp = 1; @@ -551,7 +551,7 @@ namespace cds { namespace intrusive { unsigned int nHeight = pNode->height(); for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) - pNode->next(nLevel).store( null_ptr(), memory_model::memory_order_relaxed ); + pNode->next( nLevel ).store( nullptr, memory_model::memory_order_relaxed ); { node_type * p = pos.pSucc[0]; @@ -563,7 +563,7 @@ namespace cds { namespace intrusive { } for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { - node_type * p = null_ptr(); + node_type * p = nullptr; while ( true ) { node_type * q = pos.pSucc[ nLevel ]; @@ -594,7 +594,7 @@ namespace cds { namespace intrusive { } else { m_Stat.onFindFastFailed(); - return null_ptr(); + return nullptr; } } @@ -691,7 +691,7 @@ namespace cds { namespace intrusive { node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; position pos; @@ -765,7 +765,7 @@ namespace cds { namespace intrusive { node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; # ifndef CDS_CXX11_LAMBDA_SUPPORT @@ -841,7 +841,7 @@ namespace cds { namespace intrusive { template bool find( Q& val, Func f ) const { - return find_with_( val, key_comparator(), f ) != null_ptr(); + return find_with_( val, key_comparator(), f ) != nullptr; } /// Finds the key \p val using \p pred predicate for comparing @@ -854,7 +854,7 @@ namespace cds { namespace intrusive { template bool find_with( Q& val, Less pred, Func f ) const { - return find_with_( val, cds::opt::details::make_comparator_from_less(), f ) != null_ptr(); + return find_with_( val, cds::opt::details::make_comparator_from_less(), f ) != nullptr; } /// Finds the key \p val @@ -883,7 +883,7 @@ namespace cds { namespace intrusive { template bool find( Q const& val, Func f ) const { - return find_with_( val, key_comparator(), f ) != null_ptr(); + return find_with_( val, key_comparator(), f ) != nullptr; } /// Finds the key \p val using \p pred predicate for comparing @@ -896,7 +896,7 @@ namespace cds { namespace intrusive { template bool find_with( Q const& val, Less pred, Func f ) const { - return find_with_( val, cds::opt::details::make_comparator_from_less(), f ) != null_ptr(); + return find_with_( val, cds::opt::details::make_comparator_from_less(), f ) != nullptr; } /// Finds the key \p val @@ -918,7 +918,7 @@ namespace cds { namespace intrusive { # endif if ( pNode ) return node_traits::to_value_ptr( pNode ); - return null_ptr(); + return nullptr; } /// Finds the key \p val using \p pred predicate for comparing @@ -939,7 +939,7 @@ namespace cds { namespace intrusive { # endif if ( pNode ) return node_traits::to_value_ptr( pNode ); - return null_ptr(); + return nullptr; } /// Gets minimum key from the set @@ -973,7 +973,7 @@ namespace cds { namespace intrusive { pPred = pCur; } } - return pPred && pPred != m_Head.head() ? node_traits::to_value_ptr( pPred ) : null_ptr(); + return pPred && pPred != m_Head.head() ? node_traits::to_value_ptr( pPred ) : nullptr; } /// Clears the set (non-atomic) @@ -1012,7 +1012,7 @@ namespace cds { namespace intrusive { /// Checks if the set is empty bool empty() const { - return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr(); + return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; } /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. diff --git a/cds/intrusive/skip_list_rcu.h b/cds/intrusive/skip_list_rcu.h index e03162cc..61aaec66 100644 --- a/cds/intrusive/skip_list_rcu.h +++ b/cds/intrusive/skip_list_rcu.h @@ -47,14 +47,14 @@ namespace cds { namespace intrusive { public: /// Constructs a node of height 1 (a bottom-list node) node() - : m_pNext( null_ptr()) - , m_pDelChain( null_ptr()) + : m_pNext( nullptr ) + , m_pDelChain( nullptr ) # ifdef _DEBUG , m_bLinked( false ) , m_bUnlinked( false ) # endif , m_nHeight(1) - , m_arrNext( null_ptr()) + , m_arrNext( nullptr ) {} # ifdef _DEBUG @@ -68,8 +68,8 @@ namespace cds { namespace intrusive { void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) { assert( nHeight > 0 ); - assert( ( nHeight == 1 && nextTower == null_ptr() ) // bottom-list node - || ( nHeight > 1 && nextTower != null_ptr() ) // node at level of more than 0 + assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node + || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 ); m_arrNext = nextTower; @@ -79,7 +79,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr * release_tower() { atomic_marked_ptr * pTower = m_arrNext; - m_arrNext = null_ptr(); + m_arrNext = nullptr; m_nHeight = 1; return pTower; } @@ -99,7 +99,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr& next( unsigned int nLevel ) { assert( nLevel < height() ); - assert( nLevel == 0 || (nLevel > 0 && m_arrNext != null_ptr() )); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } @@ -108,7 +108,7 @@ namespace cds { namespace intrusive { atomic_marked_ptr const& next( unsigned int nLevel ) const { assert( nLevel < height() ); - assert( nLevel == 0 || nLevel > 0 && m_arrNext != null_ptr() ); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; } @@ -134,15 +134,15 @@ namespace cds { namespace intrusive { /// Clears internal links void clear() { - assert( m_arrNext == null_ptr()); + assert( m_arrNext == nullptr ); m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release ); - m_pDelChain = null_ptr(); + m_pDelChain = nullptr; } bool is_cleared() const { return m_pNext == atomic_marked_ptr() - && m_arrNext == null_ptr() + && m_arrNext == nullptr && m_nHeight <= 1; } }; @@ -207,7 +207,7 @@ namespace cds { namespace intrusive { public: // for internal use only!!! iterator( node_type& refHead ) - : m_pNode( null_ptr() ) + : m_pNode( nullptr ) { // RCU should be locked before iterating!!! assert( gc::is_locked() ); @@ -234,7 +234,7 @@ namespace cds { namespace intrusive { public: iterator() - : m_pNode( null_ptr()) + : m_pNode( nullptr ) { // RCU should be locked before iterating!!! assert( gc::is_locked() ); @@ -249,16 +249,16 @@ namespace cds { namespace intrusive { value_type * operator ->() const { - assert( m_pNode != null_ptr< node_type *>() ); - assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return node_traits::to_value_ptr( m_pNode ); } value_ref operator *() const { - assert( m_pNode != null_ptr< node_type *>() ); - assert( node_traits::to_value_ptr( m_pNode ) != null_ptr() ); + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); return *node_traits::to_value_ptr( m_pNode ); } @@ -586,12 +586,12 @@ namespace cds { namespace intrusive { node_type * pDelChain; position() - : pDelChain( null_ptr()) + : pDelChain( nullptr ) {} # ifdef _DEBUG ~position() { - assert( pDelChain == null_ptr()); + assert( pDelChain == nullptr ); } # endif }; @@ -722,7 +722,7 @@ namespace cds { namespace intrusive { goto retry; } - if ( pCur.ptr() == null_ptr()) { + if ( pCur.ptr() == nullptr ) { // end of the list at level nLevel - goto next level break; } @@ -837,7 +837,7 @@ namespace cds { namespace intrusive { pos.pPrev[ nLevel ] = pPred; pos.pSucc[ nLevel ] = pCur.ptr(); } - return (pos.pCur = pCur.ptr()) != null_ptr(); + return (pos.pCur = pCur.ptr()) != nullptr; } bool find_max_position( position& pos ) @@ -860,7 +860,7 @@ retry: goto retry; } - if ( pCur.ptr() == null_ptr()) { + if ( pCur.ptr() == nullptr ) { // end of the list at level nLevel - goto next level break; } @@ -908,7 +908,7 @@ retry: pos.pSucc[ nLevel ] = pCur.ptr(); } - return (pos.pCur = pCur.ptr()) != null_ptr(); + return (pos.pCur = pCur.ptr()) != nullptr; } template @@ -960,7 +960,7 @@ retry: static void link_for_remove( position& pos, node_type * pDel ) { - assert( pDel->m_pDelChain == null_ptr() ); + assert( pDel->m_pDelChain == nullptr ); pDel->m_pDelChain = pos.pDelChain; pos.pDelChain = pDel; @@ -969,7 +969,7 @@ retry: template bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract ) { - assert( pDel != null_ptr()); + assert( pDel != nullptr ); assert( gc::is_locked() ); marked_node_ptr pSucc; @@ -1186,7 +1186,7 @@ retry: if ( !find_position( key, pos, cmp, false ) ) { m_Stat.onExtractFailed(); - pDel = null_ptr(); + pDel = nullptr; } else { pDel = pos.pCur; @@ -1208,12 +1208,12 @@ retry: } else { m_Stat.onExtractFailed(); - pDel = null_ptr(); + pDel = nullptr; } } defer_chain( pos ); - return pDel ? node_traits::to_value_ptr(pDel) : null_ptr(); + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; } template @@ -1225,7 +1225,7 @@ retry: { rcu_lock l; value_type * pDel = do_extract_key( key, key_comparator() ); - bReturn = pDel != null_ptr(); + bReturn = pDel != nullptr; if ( bReturn ) result = pDel; } @@ -1243,7 +1243,7 @@ retry: { rcu_lock l; value_type * pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less() ); - bReturn = pDel != null_ptr(); + bReturn = pDel != nullptr; if ( bReturn ) result = pDel; } @@ -1261,7 +1261,7 @@ retry: if ( !find_min_position( pos ) ) { m_Stat.onExtractMinFailed(); - pDel = null_ptr(); + pDel = nullptr; } else { pDel = pos.pCur; @@ -1281,7 +1281,7 @@ retry: } else { m_Stat.onExtractMinFailed(); - pDel = null_ptr(); + pDel = nullptr; } } @@ -1298,7 +1298,7 @@ retry: { rcu_lock l; node_type * pDel = do_extract_min(); - bReturn = pDel != null_ptr(); + bReturn = pDel != nullptr; if ( bReturn ) result = node_traits::to_value_ptr(pDel); } @@ -1316,7 +1316,7 @@ retry: if ( !find_max_position( pos ) ) { m_Stat.onExtractMaxFailed(); - pDel = null_ptr(); + pDel = nullptr; } else { pDel = pos.pCur; @@ -1336,7 +1336,7 @@ retry: } else { m_Stat.onExtractMaxFailed(); - pDel = null_ptr(); + pDel = nullptr; } } @@ -1353,7 +1353,7 @@ retry: { rcu_lock l; node_type * pDel = do_extract_max(); - bReturn = pDel != null_ptr(); + bReturn = pDel != nullptr; if ( bReturn ) result = node_traits::to_value_ptr(pDel); } @@ -1377,7 +1377,7 @@ retry: : pCur(p) {} deferred_list_iterator() - : pCur( null_ptr()) + : pCur( nullptr ) {} cds::urcu::retired_ptr operator *() const @@ -1416,7 +1416,7 @@ retry: // Delete local chain if ( pos.pDelChain ) { dispose_chain( pos.pDelChain ); - pos.pDelChain = null_ptr(); + pos.pDelChain = nullptr; } // Delete deferred chain @@ -1425,7 +1425,7 @@ retry: void dispose_deferred() { - dispose_chain( m_pDeferredDelChain.exchange( null_ptr(), memory_model::memory_order_acq_rel )); + dispose_chain( m_pDeferredDelChain.exchange( nullptr, memory_model::memory_order_acq_rel ) ); } void defer_chain( position& pos ) @@ -1441,7 +1441,7 @@ retry: pTail->m_pDelChain = pDeferList; } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )); - pos.pDelChain = null_ptr(); + pos.pDelChain = nullptr; } } @@ -1452,7 +1452,7 @@ retry: SkipListSet() : m_Head( c_nMaxHeight ) , m_nHeight( c_nMinHeight ) - , m_pDeferredDelChain( null_ptr() ) + , m_pDeferredDelChain( nullptr ) { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); @@ -1560,7 +1560,7 @@ retry: node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; rcu_lock rcuLock; @@ -1645,7 +1645,7 @@ retry: node_type * pNode = node_traits::to_node_ptr( val ); scoped_node_ptr scp( pNode ); unsigned int nHeight = pNode->height(); - bool bTowerOk = nHeight > 1 && pNode->get_tower() != null_ptr(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; bool bTowerMade = false; # ifndef CDS_CXX11_LAMBDA_SUPPORT @@ -2115,11 +2115,11 @@ retry: # ifdef CDS_CXX11_LAMBDA_SUPPORT value_type * pFound; return do_find_with( val, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; } ) - ? pFound : null_ptr(); + ? pFound : nullptr; # else get_functor gf; return do_find_with( val, key_comparator(), cds::ref(gf) ) - ? gf.pFound : null_ptr(); + ? gf.pFound : nullptr; # endif } @@ -2141,11 +2141,11 @@ retry: value_type * pFound; return do_find_with( val, cds::opt::details::make_comparator_from_less(), [&pFound](value_type& found, Q const& ) { pFound = &found; } ) - ? pFound : null_ptr(); + ? pFound : nullptr; # else get_functor gf; return do_find_with( val, cds::opt::details::make_comparator_from_less(), cds::ref(gf) ) - ? gf.pFound : null_ptr(); + ? gf.pFound : nullptr; # endif } @@ -2164,7 +2164,7 @@ retry: /// Checks if the set is empty bool empty() const { - return m_Head.head()->next(0).load( memory_model::memory_order_relaxed ) == null_ptr(); + return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; } /// Clears the set (non-atomic) diff --git a/cds/intrusive/split_list.h b/cds/intrusive/split_list.h index e72f9f0f..98143326 100644 --- a/cds/intrusive/split_list.h +++ b/cds/intrusive/split_list.h @@ -255,7 +255,7 @@ namespace cds { namespace intrusive { public: bool insert_at( dummy_node_type * pHead, value_type& val ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::insert_at( h, val ); } @@ -263,7 +263,7 @@ namespace cds { namespace intrusive { template bool insert_at( dummy_node_type * pHead, value_type& val, Func f ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::insert_at( h, val, f ); } @@ -271,14 +271,14 @@ namespace cds { namespace intrusive { template std::pair ensure_at( dummy_node_type * pHead, value_type& val, Func func ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::ensure_at( h, val, func ); } bool unlink_at( dummy_node_type * pHead, value_type& val ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::unlink_at( h, val ); } @@ -286,7 +286,7 @@ namespace cds { namespace intrusive { template bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::erase_at( h, val, cmp, f ); } @@ -294,7 +294,7 @@ namespace cds { namespace intrusive { template bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::erase_at( h, val, cmp ); } @@ -302,7 +302,7 @@ namespace cds { namespace intrusive { template bool extract_at( dummy_node_type * pHead, typename gc::Guard& guard, split_list::details::search_value_type const& val, Compare cmp ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::extract_at( h, guard, val, cmp ); } @@ -310,7 +310,7 @@ namespace cds { namespace intrusive { template bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::find_at( h, val, cmp, f ); } @@ -318,7 +318,7 @@ namespace cds { namespace intrusive { template bool find_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::find_at( h, val, cmp ); } @@ -326,7 +326,7 @@ namespace cds { namespace intrusive { template bool get_at( dummy_node_type * pHead, typename gc::Guard& guard, split_list::details::search_value_type const& val, Compare cmp ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::get_at( h, guard, val, cmp ); } @@ -386,11 +386,11 @@ namespace cds { namespace intrusive { size_t nParent = parent_bucket( nBucket ); dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); - if ( pParentBucket == null_ptr() ) { + if ( pParentBucket == nullptr ) { pParentBucket = init_bucket( nParent ); } - assert( pParentBucket != null_ptr() ); + assert( pParentBucket != nullptr ); // Allocate a dummy node for new bucket { @@ -411,7 +411,7 @@ namespace cds { namespace intrusive { back_off bkoff; while ( true ) { dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); - if ( p != null_ptr() ) + if ( p != nullptr ) return const_cast( p ); bkoff(); } @@ -422,7 +422,7 @@ namespace cds { namespace intrusive { size_t nBucket = bucket_no( nHash ); dummy_node_type * pHead = m_Buckets.bucket( nBucket ); - if ( pHead == null_ptr() ) + if ( pHead == nullptr ) pHead = init_bucket( nBucket ); assert( pHead->is_dummy() ); @@ -462,7 +462,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); # ifdef CDS_CXX11_LAMBDA_SUPPORT return m_List.find_at( pHead, sv, cmp, @@ -479,7 +479,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); return m_List.find_at( pHead, sv, cmp ); } @@ -490,7 +490,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); return m_List.get_at( pHead, guard, sv, cmp ); } @@ -513,7 +513,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp, f )) { --m_ItemCounter; @@ -528,7 +528,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp ) ) { --m_ItemCounter; @@ -543,7 +543,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); if ( m_List.extract_at( pHead, guard, sv, cmp ) ) { --m_ItemCounter; @@ -602,7 +602,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -636,7 +636,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -678,7 +678,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -704,7 +704,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); if ( m_List.unlink_at( pHead, val ) ) { --m_ItemCounter; diff --git a/cds/intrusive/split_list_base.h b/cds/intrusive/split_list_base.h index 98609d0d..2b1b73a9 100644 --- a/cds/intrusive/split_list_base.h +++ b/cds/intrusive/split_list_base.h @@ -189,7 +189,7 @@ namespace cds { namespace intrusive { //@cond void allocate_table() { - m_Table = bucket_table_allocator().NewArray( m_nCapacity, null_ptr() ); + m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr ); } void destroy_table() @@ -237,7 +237,7 @@ namespace cds { namespace intrusive { void bucket( size_t nBucket, node_type * pNode ) { assert( nBucket < capacity() ); - assert( bucket(nBucket) == null_ptr() ); + assert( bucket( nBucket ) == nullptr ); m_Table[ nBucket ].store( pNode, memory_model::memory_order_release ); } @@ -358,7 +358,7 @@ namespace cds { namespace intrusive { segment_type * allocate_table() { - return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, null_ptr() ); + return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr ); } void destroy_table( segment_type * pTable ) @@ -368,7 +368,7 @@ namespace cds { namespace intrusive { table_entry * allocate_segment() { - return segment_allocator().NewArray( m_metrics.nSegmentSize, null_ptr() ); + return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr ); } void destroy_segment( table_entry * pSegment ) @@ -414,7 +414,7 @@ namespace cds { namespace intrusive { segment_type * pSegments = m_Segments; for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) { table_entry * pEntry = pSegments[i].load(memory_model::memory_order_relaxed); - if ( pEntry != null_ptr() ) + if ( pEntry != nullptr ) destroy_segment( pEntry ); } destroy_table( pSegments ); @@ -427,8 +427,8 @@ namespace cds { namespace intrusive { assert( nSegment < m_metrics.nSegmentCount ); table_entry * pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire); - if ( pSegment == null_ptr() ) - return null_ptr() ; // uninitialized bucket + if ( pSegment == nullptr ) + return nullptr; // uninitialized bucket return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire); } @@ -439,9 +439,9 @@ namespace cds { namespace intrusive { assert( nSegment < m_metrics.nSegmentCount ); segment_type& segment = m_Segments[nSegment]; - if ( segment.load(memory_model::memory_order_relaxed) == null_ptr() ) { + if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) { table_entry * pNewSegment = allocate_segment(); - table_entry * pNull = null_ptr(); + table_entry * pNull = nullptr; if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { destroy_segment( pNewSegment ); } diff --git a/cds/intrusive/split_list_nogc.h b/cds/intrusive/split_list_nogc.h index a5b54570..6ed3ffe7 100644 --- a/cds/intrusive/split_list_nogc.h +++ b/cds/intrusive/split_list_nogc.h @@ -95,7 +95,7 @@ namespace cds { namespace intrusive { public: list_iterator insert_at_( dummy_node_type * pHead, value_type& val ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::insert_at_( h, val ); } @@ -103,7 +103,7 @@ namespace cds { namespace intrusive { template std::pair ensure_at_( dummy_node_type * pHead, value_type& val, Func func ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::ensure_at_( h, val, func ); } @@ -111,7 +111,7 @@ namespace cds { namespace intrusive { template bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::find_at( h, val, cmp, f ); } @@ -119,7 +119,7 @@ namespace cds { namespace intrusive { template list_iterator find_at_( dummy_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::find_at_( h, val, cmp ); } @@ -180,11 +180,11 @@ namespace cds { namespace intrusive { size_t nParent = parent_bucket( nBucket ); dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); - if ( pParentBucket == null_ptr() ) { + if ( pParentBucket == nullptr ) { pParentBucket = init_bucket( nParent ); } - assert( pParentBucket != null_ptr() ); + assert( pParentBucket != nullptr ); // Allocate a dummy node for new bucket { @@ -205,7 +205,7 @@ namespace cds { namespace intrusive { back_off bkoff; while ( true ) { dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); - if ( p && p != null_ptr() ) + if ( p && p != nullptr ) return const_cast( p ); bkoff(); } @@ -216,7 +216,7 @@ namespace cds { namespace intrusive { size_t nBucket = bucket_no( nHash ); dummy_node_type * pHead = m_Buckets.bucket( nBucket ); - if ( pHead == null_ptr() ) + if ( pHead == nullptr ) pHead = init_bucket( nBucket ); assert( pHead->is_dummy() ); @@ -337,7 +337,7 @@ namespace cds { namespace intrusive { { iterator it = find_( val ); if ( it == end() ) - return null_ptr(); + return nullptr; return &*it; } @@ -353,7 +353,7 @@ namespace cds { namespace intrusive { { iterator it = find_with_( val, pred ); if ( it == end() ) - return null_ptr(); + return nullptr; return &*it; } @@ -547,7 +547,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -564,7 +564,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -583,7 +583,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); return iterator( m_List.find_at_( pHead, sv, typename wrapped_ordered_list::template make_compare_from_less() ), m_List.end() ); } @@ -594,7 +594,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); return iterator( m_List.find_at_( pHead, sv, key_comparator() ), m_List.end() ); @@ -606,7 +606,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); # ifdef CDS_CXX11_LAMBDA_SUPPORT return m_List.find_at( pHead, sv, cmp, [&f](value_type& item, split_list::details::search_value_type& val){ cds::unref(f)(item, val.val ); }); diff --git a/cds/intrusive/split_list_rcu.h b/cds/intrusive/split_list_rcu.h index 2d655c1c..7401f18c 100644 --- a/cds/intrusive/split_list_rcu.h +++ b/cds/intrusive/split_list_rcu.h @@ -126,7 +126,7 @@ namespace cds { namespace intrusive { public: bool insert_at( dummy_node_type * pHead, value_type& val ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::insert_at( h, val ); } @@ -134,7 +134,7 @@ namespace cds { namespace intrusive { template bool insert_at( dummy_node_type * pHead, value_type& val, Func f ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::insert_at( h, val, f ); } @@ -142,14 +142,14 @@ namespace cds { namespace intrusive { template std::pair ensure_at( dummy_node_type * pHead, value_type& val, Func func ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::ensure_at( h, val, func ); } bool unlink_at( dummy_node_type * pHead, value_type& val ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::unlink_at( h, val ); } @@ -157,7 +157,7 @@ namespace cds { namespace intrusive { template bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::erase_at( h, val, cmp, f ); } @@ -165,7 +165,7 @@ namespace cds { namespace intrusive { template bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::erase_at( h, val, cmp ); } @@ -173,7 +173,7 @@ namespace cds { namespace intrusive { template value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::extract_at( h, val, cmp ); } @@ -181,7 +181,7 @@ namespace cds { namespace intrusive { template bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) const { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::find_at( h, val, cmp, f ); } @@ -189,7 +189,7 @@ namespace cds { namespace intrusive { template bool find_at( dummy_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) const { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::find_at( h, val, cmp ); } @@ -197,7 +197,7 @@ namespace cds { namespace intrusive { template value_type * get_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) const { - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); bucket_head_type h(pHead); return base_class::get_at( h, val, cmp ); } @@ -275,11 +275,11 @@ namespace cds { namespace intrusive { size_t nParent = parent_bucket( nBucket ); dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); - if ( pParentBucket == null_ptr() ) { + if ( pParentBucket == nullptr ) { pParentBucket = init_bucket( nParent ); } - assert( pParentBucket != null_ptr() ); + assert( pParentBucket != nullptr ); // Allocate a dummy node for new bucket { @@ -300,7 +300,7 @@ namespace cds { namespace intrusive { back_off bkoff; while ( true ) { dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); - if ( p != null_ptr() ) + if ( p != nullptr ) return const_cast( p ); bkoff(); } @@ -311,7 +311,7 @@ namespace cds { namespace intrusive { size_t nBucket = bucket_no( nHash ); dummy_node_type * pHead = m_Buckets.bucket( nBucket ); - if ( pHead == null_ptr() ) + if ( pHead == nullptr ) pHead = init_bucket( nBucket ); assert( pHead->is_dummy() ); @@ -351,7 +351,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); # ifdef CDS_CXX11_LAMBDA_SUPPORT return m_List.find_at( pHead, sv, cmp, @@ -368,7 +368,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); return m_List.find_at( pHead, sv, cmp ); } @@ -379,7 +379,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); return m_List.get_at( pHead, sv, cmp ); } @@ -390,7 +390,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); value_type * pNode = m_List.extract_at( pHead, sv, cmp ); if ( pNode ) @@ -410,7 +410,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp ) ) { --m_ItemCounter; @@ -425,7 +425,7 @@ namespace cds { namespace intrusive { size_t nHash = hash_value( val ); split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); if ( m_List.erase_at( pHead, sv, cmp, f )) { --m_ItemCounter; @@ -474,7 +474,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -510,7 +510,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -554,7 +554,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); @@ -582,7 +582,7 @@ namespace cds { namespace intrusive { { size_t nHash = hash_value( val ); dummy_node_type * pHead = get_bucket( nHash ); - assert( pHead != null_ptr() ); + assert( pHead != nullptr ); if ( m_List.unlink_at( pHead, val ) ) { --m_ItemCounter; diff --git a/cds/intrusive/striped_set.h b/cds/intrusive/striped_set.h index 0c1a1acc..6ad3231e 100644 --- a/cds/intrusive/striped_set.h +++ b/cds/intrusive/striped_set.h @@ -422,7 +422,7 @@ namespace cds { namespace intrusive { public: /// Default ctor. The initial capacity is 16. StripedSet() - : m_Buckets( null_ptr() ) + : m_Buckets( nullptr ) , m_nBucketMask( c_nMinimalCapacity - 1 ) , m_MutexPolicy( c_nMinimalCapacity ) { @@ -433,7 +433,7 @@ namespace cds { namespace intrusive { StripedSet( size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ) - : m_Buckets( null_ptr() ) + : m_Buckets( nullptr ) , m_nBucketMask( calc_init_capacity(nCapacity) - 1 ) , m_MutexPolicy( m_nBucketMask + 1 ) { @@ -448,7 +448,7 @@ namespace cds { namespace intrusive { size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ,resizing_policy const& resizingPolicy ///< Resizing policy ) - : m_Buckets( null_ptr() ) + : m_Buckets( nullptr ) , m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 ) , m_MutexPolicy( m_nBucketMask + 1 ) , m_ResizingPolicy( resizingPolicy ) @@ -466,7 +466,7 @@ namespace cds { namespace intrusive { size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. ,resizing_policy&& resizingPolicy ///< Resizing policy ) - : m_Buckets( null_ptr() ) + : m_Buckets( nullptr ) , m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 ) , m_MutexPolicy( m_nBucketMask + 1 ) , m_ResizingPolicy( resizingPolicy ) diff --git a/cds/intrusive/striped_set/adapter.h b/cds/intrusive/striped_set/adapter.h index 8655175a..f544731f 100644 --- a/cds/intrusive/striped_set/adapter.h +++ b/cds/intrusive/striped_set/adapter.h @@ -254,8 +254,8 @@ namespace cds { namespace intrusive { value_type * erase( Q const& key, Func f ) { iterator it = m_Set.find( key, key_comparator() ); - if ( it == m_Set.end() ) - return null_ptr(); + if (it == m_Set.end()) + return nullptr; value_type& val = *it; cds::unref(f)( val ); m_Set.erase( it ); @@ -266,8 +266,8 @@ namespace cds { namespace intrusive { value_type * erase( Q const& key, Less pred, Func f ) { iterator it = m_Set.find( key, pred ); - if ( it == m_Set.end() ) - return null_ptr(); + if (it == m_Set.end()) + return nullptr; value_type& val = *it; cds::unref(f)( val ); m_Set.erase( it ); diff --git a/cds/intrusive/striped_set/boost_list.h b/cds/intrusive/striped_set/boost_list.h index c5744b72..bea6d96b 100644 --- a/cds/intrusive/striped_set/boost_list.h +++ b/cds/intrusive/striped_set/boost_list.h @@ -124,7 +124,7 @@ namespace cds { namespace intrusive { namespace striped_set { { iterator it = find_key( key, find_predicate() ); if ( it == m_List.end() || key_comparator()( key, *it ) != 0 ) - return null_ptr(); + return nullptr; // key exists value_type& val = *it; @@ -139,7 +139,7 @@ namespace cds { namespace intrusive { namespace striped_set { { iterator it = find_key( key, pred ); if ( it == m_List.end() || pred( key, *it ) || pred( *it, key ) ) - return null_ptr(); + return nullptr; // key exists value_type& val = *it; diff --git a/cds/intrusive/striped_set/boost_slist.h b/cds/intrusive/striped_set/boost_slist.h index 0d861167..2257ff51 100644 --- a/cds/intrusive/striped_set/boost_slist.h +++ b/cds/intrusive/striped_set/boost_slist.h @@ -72,7 +72,7 @@ namespace cds { namespace intrusive { namespace striped_set { { std::pair< iterator, bool > pos = find_prev_item_cmp( key, cmp ); if ( !pos.second ) - return null_ptr(); + return nullptr; // key exists iterator it = pos.first; diff --git a/cds/intrusive/striped_set/boost_unordered_set.h b/cds/intrusive/striped_set/boost_unordered_set.h index d7d46a67..fdcfdc40 100644 --- a/cds/intrusive/striped_set/boost_unordered_set.h +++ b/cds/intrusive/striped_set/boost_unordered_set.h @@ -110,7 +110,7 @@ namespace cds { namespace intrusive { namespace striped_set { { iterator it = m_Set.find( key, typename container_type::hasher(), typename container_type::key_equal() ); if ( it == m_Set.end() ) - return null_ptr(); + return nullptr; value_type& val = *it; cds::unref(f)( val ); m_Set.erase( it ); @@ -122,7 +122,7 @@ namespace cds { namespace intrusive { namespace striped_set { { iterator it = m_Set.find( key, typename container_type::hasher(), equal_from_compare(pred) ); if ( it == m_Set.end() ) - return null_ptr(); + return nullptr; value_type& val = *it; cds::unref(f)( val ); m_Set.erase( it ); diff --git a/cds/intrusive/treiber_stack.h b/cds/intrusive/treiber_stack.h index 713790b4..c755f25e 100644 --- a/cds/intrusive/treiber_stack.h +++ b/cds/intrusive/treiber_stack.h @@ -35,7 +35,7 @@ namespace cds { namespace intrusive { CDS_ATOMIC::atomic nStatus; ///< Internal elimination status operation() - : pVal( null_ptr() ) + : pVal( nullptr ) , nStatus(0) {} }; @@ -228,7 +228,7 @@ namespace cds { namespace intrusive { himOp->pVal = op.pVal; else op.pVal = himOp->pVal; - slot.pRec = null_ptr(); + slot.pRec = nullptr; slot.lock.unlock(); himOp->nStatus.store( op_collided, CDS_ATOMIC::memory_order_release ); @@ -259,7 +259,7 @@ namespace cds { namespace intrusive { { slot_scoped_lock l( slot.lock ); if ( slot.pRec == myRec ) - slot.pRec = null_ptr(); + slot.pRec = nullptr; } bool bCollided = op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) == op_collided; @@ -511,7 +511,7 @@ namespace cds { namespace intrusive { //@cond void clear_links( node_type * pNode ) CDS_NOEXCEPT { - pNode->m_pNext.store( null_ptr(), memory_model::memory_order_relaxed ); + pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed ); } template @@ -540,7 +540,7 @@ namespace cds { namespace intrusive { public: /// Constructs empty stack TreiberStack() - : m_Top(null_ptr()) + : m_Top( nullptr ) { init(); } @@ -552,7 +552,7 @@ namespace cds { namespace intrusive { \p nCollisionCapacity parameter specifies the capacity of collision array. */ TreiberStack( size_t nCollisionCapacity ) - : m_Top(null_ptr()) + : m_Top( nullptr ) , m_Backoff( nCollisionCapacity ) { init(); @@ -614,8 +614,8 @@ namespace cds { namespace intrusive { while ( true ) { node_type * t = guard.protect( m_Top, node_to_value() ); - if ( t == null_ptr() ) - return null_ptr() ; // stack is empty + if ( t == nullptr ) + return nullptr; // stack is empty node_type * pNext = t->m_pNext.load(memory_model::memory_order_relaxed); if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { // #2 @@ -637,7 +637,7 @@ namespace cds { namespace intrusive { bool empty() const { // http://www.manning-sandbox.com/thread.jspa?threadID=46245&tstart=0 - return m_Top.load(memory_model::memory_order_relaxed) == null_ptr(); + return m_Top.load( memory_model::memory_order_relaxed ) == nullptr; } /// Clear the stack @@ -654,9 +654,9 @@ namespace cds { namespace intrusive { node_type * pTop; while ( true ) { pTop = m_Top.load( memory_model::memory_order_relaxed ); - if ( pTop == null_ptr() ) + if ( pTop == nullptr ) return; - if ( m_Top.compare_exchange_weak( pTop, null_ptr(), memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) { // sync-with #1 and #2 + if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) ) { // sync-with #1 and #2 m_ItemCounter.reset(); break; } diff --git a/cds/intrusive/tsigas_cycle_queue.h b/cds/intrusive/tsigas_cycle_queue.h index 73131d34..ac240517 100644 --- a/cds/intrusive/tsigas_cycle_queue.h +++ b/cds/intrusive/tsigas_cycle_queue.h @@ -116,7 +116,7 @@ namespace cds { namespace intrusive { //@cond static CDS_CONSTEXPR value_type * free0() CDS_NOEXCEPT { - return null_ptr(); + return nullptr; } static CDS_CONSTEXPR value_type * free1() CDS_NOEXCEPT { @@ -296,7 +296,7 @@ namespace cds { namespace intrusive { } while ( bkoff(), true ); // No control path reaches this line! - return null_ptr(); + return nullptr; } /// Synonym of \ref cds_intrusive_TsigasQueue_enqueue "enqueue" @@ -350,7 +350,7 @@ namespace cds { namespace intrusive { void clear( Disposer f ) { value_type * pv; - while ( (pv = pop()) != null_ptr() ) { + while ( (pv = pop()) != nullptr ) { unref(f)( pv ); } } diff --git a/cds/intrusive/vyukov_mpmc_cycle_queue.h b/cds/intrusive/vyukov_mpmc_cycle_queue.h index be6c48d5..1b049bf5 100644 --- a/cds/intrusive/vyukov_mpmc_cycle_queue.h +++ b/cds/intrusive/vyukov_mpmc_cycle_queue.h @@ -102,8 +102,8 @@ namespace cds { namespace intrusive { */ value_type * dequeue() { - value_type * p = null_ptr(); - return base_class::dequeue( p ) ? p : null_ptr(); + value_type * p = nullptr; + return base_class::dequeue( p ) ? p : nullptr; } /// Synonym of \ref enqueue @@ -134,7 +134,7 @@ namespace cds { namespace intrusive { void clear( Disposer f ) { value_type * pv; - while ( (pv = pop()) != null_ptr() ) { + while ( (pv = pop()) != nullptr ) { unref(f)( pv ); } } diff --git a/cds/lock/array.h b/cds/lock/array.h index ce96cc0b..0829a8fb 100644 --- a/cds/lock/array.h +++ b/cds/lock/array.h @@ -142,11 +142,11 @@ namespace cds { namespace lock { // Only for internal use!!! array() - : m_arrLocks( null_ptr() ) + : m_arrLocks( nullptr ) , m_nCapacity(0) {} array( select_cell_policy const& policy ) - : m_arrLocks( null_ptr() ) + : m_arrLocks( nullptr ) , m_nCapacity(0) , m_SelectCellPolicy( policy ) {} @@ -160,7 +160,7 @@ namespace cds { namespace lock { array( size_t nCapacity ///< [in] Array size ) - : m_arrLocks( null_ptr() ) + : m_arrLocks( nullptr ) , m_nCapacity( nCapacity ) { m_arrLocks = create_lock_array( nCapacity ); @@ -174,7 +174,7 @@ namespace cds { namespace lock { size_t nCapacity, ///< [in] Array size select_cell_policy const& policy ///< Cell selection policy (copy-constructible) ) - : m_arrLocks( null_ptr() ) + : m_arrLocks( nullptr ) , m_nCapacity( nCapacity ) , m_SelectCellPolicy( policy ) { @@ -190,7 +190,7 @@ namespace cds { namespace lock { size_t nCapacity, ///< [in] Array size select_cell_policy&& policy ///< Cell selection policy (move-constructible) ) - : m_arrLocks( null_ptr() ) + : m_arrLocks( nullptr ) , m_nCapacity( nCapacity ) , m_SelectCellPolicy( std::forward( policy )) { diff --git a/cds/memory/michael/allocator.h b/cds/memory/michael/allocator.h index f571ed10..abd3706d 100644 --- a/cds/memory/michael/allocator.h +++ b/cds/memory/michael/allocator.h @@ -140,7 +140,7 @@ namespace michael { struct make_null_ptr { void operator ()(void *& p) { - p = null_ptr(); + p = nullptr; } }; #endif @@ -332,7 +332,7 @@ namespace michael { { auto_lock al(m_access); if ( base_class::empty() ) - return null_ptr(); + return nullptr; T& rDesc = base_class::front(); base_class::pop_front(); assert( base_class::node_algorithms::inited( static_cast(&rDesc) ) ); @@ -386,7 +386,7 @@ namespace michael { { auto_lock al( m_access ); if ( base_class::empty() ) - return null_ptr(); + return nullptr; T& rDesc = base_class::front(); base_class::pop_front(); assert( base_class::node_algorithms::inited( static_cast(&rDesc) ) ); @@ -396,7 +396,7 @@ namespace michael { /// Removes \p pDesc descriptor from the free-list bool unlink( T * pDesc ) { - assert( pDesc != null_ptr() ); + assert(pDesc != nullptr); auto_lock al( m_access ); // !inited(pDesc) is equal to "pDesc is being linked to partial list" if ( !base_class::node_algorithms::inited( static_cast(pDesc) ) ) { @@ -794,8 +794,8 @@ namespace michael { //@cond superblock_desc() - : pSB( null_ptr() ) - , pProcHeap( null_ptr() ) + : pSB(nullptr) + , pProcHeap( nullptr ) {} //@endcond }; @@ -982,7 +982,7 @@ namespace michael { public: CDS_CONSTEXPR active_tag() CDS_NOEXCEPT - : pDesc(null_ptr()) + : pDesc( nullptr ) , nCredits(0) {} @@ -1020,7 +1020,7 @@ namespace michael { void clear() { - pDesc = null_ptr(); + pDesc = nullptr; nCredits = 0; } @@ -1043,7 +1043,7 @@ namespace michael { public: active_tag() CDS_NOEXCEPT - : pDesc( null_ptr() ) + : pDesc( nullptr ) {} # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT // Clang 3.1: error: first argument to atomic operation must be a pointer to a trivially-copyable type @@ -1119,9 +1119,9 @@ namespace michael { //@cond processor_heap_base() CDS_NOEXCEPT - : pProcDesc( null_ptr() ) - , pSizeClass( null_ptr() ) - , pPartial( null_ptr() ) + : pProcDesc( nullptr ) + , pSizeClass( nullptr ) + , pPartial( nullptr ) { assert( (reinterpret_cast(this) & (c_nAlignment - 1)) == 0 ); } @@ -1136,7 +1136,7 @@ namespace michael { pDesc = partialList.pop(); break; } - } while ( !pPartial.compare_exchange_weak( pDesc, null_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed)); + } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); //assert( pDesc == NULL || free_desc_list::node_algorithms::inited( static_cast(pDesc) )); //assert( pDesc == NULL || partial_desc_list::node_algorithms::inited( static_cast(pDesc) ) ); @@ -1149,7 +1149,7 @@ namespace michael { assert( pPartial != pDesc ); //assert( partial_desc_list::node_algorithms::inited( static_cast(pDesc) ) ); - superblock_desc * pCur = null_ptr(); + superblock_desc * pCur = nullptr; if ( !pPartial.compare_exchange_strong(pCur, pDesc, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed) ) partialList.push( pDesc ); } @@ -1174,8 +1174,8 @@ namespace michael { //@cond processor_desc() - : arrProcHeap( null_ptr() ) - , pageHeaps( null_ptr() ) + : arrProcHeap( nullptr ) + , pageHeaps( nullptr ) {} //@endcond }; @@ -1215,7 +1215,7 @@ namespace michael { ++nCollision; oldActive = pProcHeap->active.load(CDS_ATOMIC::memory_order_acquire); if ( !oldActive.ptr() ) - return null_ptr(); + return nullptr; unsigned int nCredits = oldActive.credits(); active_tag newActive ; // default = 0 if ( nCredits != 0 ) { @@ -1285,7 +1285,7 @@ namespace michael { retry: superblock_desc * pDesc = pProcHeap->get_partial(); if ( !pDesc ) - return null_ptr(); + return nullptr; // reserve blocks anchor_tag oldAnchor; @@ -1353,7 +1353,7 @@ namespace michael { block_header * alloc_from_new_superblock( processor_heap * pProcHeap ) { superblock_desc * pDesc = new_superblock_desc( pProcHeap ); - assert( pDesc != null_ptr() ); + assert( pDesc != nullptr ); pDesc->pSB = new_superblock_buffer( pProcHeap ); anchor_tag anchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_relaxed); @@ -1384,7 +1384,7 @@ namespace michael { } free_superblock( pDesc ); - return null_ptr(); + return nullptr; } /// Find appropriate processor heap based on size-class selected @@ -1560,7 +1560,7 @@ namespace michael { (pDesc->pageHeaps + i)->page_heap::~page_heap(); //m_IntHeap.free( pDesc->pageHeaps ); - pDesc->pageHeaps = null_ptr(); + pDesc->pageHeaps = nullptr; pDesc->processor_desc::~processor_desc(); m_AlignedHeap.free( pDesc ); @@ -1571,7 +1571,7 @@ namespace michael { { anchor_tag anchor; superblock_desc * pDesc = pProcHeap->pProcDesc->listSBDescFree.pop(); - if ( pDesc == null_ptr() ) { + if ( pDesc == nullptr ) { pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc; assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 ); @@ -1639,17 +1639,17 @@ namespace michael { if ( !pProcHeap ) return alloc_from_OS( nSize ); - if ( (pBlock = alloc_from_active( pProcHeap )) != null_ptr() ) + if ( (pBlock = alloc_from_active( pProcHeap )) != nullptr ) break; - if ( (pBlock = alloc_from_partial( pProcHeap )) != null_ptr() ) + if ( (pBlock = alloc_from_partial( pProcHeap )) != nullptr ) break; - if ( (pBlock = alloc_from_new_superblock( pProcHeap )) != null_ptr() ) + if ( (pBlock = alloc_from_new_superblock( pProcHeap )) != nullptr ) break; } pProcHeap->stat.incAllocatedBytes( pProcHeap->pSizeClass->nBlockSize ); - assert( pBlock != null_ptr() ); + assert( pBlock != nullptr ); return pBlock; } @@ -1767,7 +1767,7 @@ namespace michael { free_superblock( pDesc ); } else if (oldAnchor.state == SBSTATE_FULL ) { - assert( pProcHeap != null_ptr() ); + assert( pProcHeap != nullptr ); pProcHeap->stat.decDescFull(); pProcHeap->add_partial( pDesc ); } @@ -1791,7 +1791,7 @@ namespace michael { { if ( nNewSize == 0 ) { free( pMemory ); - return null_ptr(); + return nullptr; } const size_t nOrigSize = nNewSize; @@ -1802,7 +1802,7 @@ namespace michael { // Reallocation of aligned block is not possible if ( pBlock->isAligned() ) { assert( false ); - return null_ptr(); + return nullptr; } if ( pBlock->isOSAllocated() ) { @@ -1840,7 +1840,7 @@ namespace michael { return pNew; } - return null_ptr(); + return nullptr; } /// Allocate aligned memory block diff --git a/cds/opt/permutation.h b/cds/opt/permutation.h index 233ced16..eb83aeca 100644 --- a/cds/opt/permutation.h +++ b/cds/opt/permutation.h @@ -201,7 +201,7 @@ namespace cds { namespace opt { public: /// Initializes the generator of arbitrary length \p nLength random_shuffle_permutation( size_t nLength ) - : m_pCur( null_ptr() ) + : m_pCur( nullptr ) { m_pFirst = new integer_type[nLength]; m_pLast = m_pFirst + nLength; diff --git a/cds/threading/details/_common.h b/cds/threading/details/_common.h index 7c11fc33..be0acb49 100644 --- a/cds/threading/details/_common.h +++ b/cds/threading/details/_common.h @@ -160,17 +160,17 @@ namespace cds { if (cds::gc::HP::isUsed() ) m_hpManager = new (m_hpManagerPlaceholder) cds::gc::HP::thread_gc_impl; else - m_hpManager = null_ptr(); + m_hpManager = nullptr; if ( cds::gc::HRC::isUsed() ) m_hrcManager = new (m_hrcManagerPlaceholder) cds::gc::HRC::thread_gc_impl; else - m_hrcManager = null_ptr(); + m_hrcManager = nullptr; if ( cds::gc::PTB::isUsed() ) m_ptbManager = new (m_ptbManagerPlaceholder) cds::gc::PTB::thread_gc_impl; else - m_ptbManager = null_ptr(); + m_ptbManager = nullptr; } ~ThreadData() @@ -178,19 +178,19 @@ namespace cds { if ( m_hpManager ) { typedef cds::gc::HP::thread_gc_impl hp_thread_gc_impl; m_hpManager->~hp_thread_gc_impl(); - m_hpManager = null_ptr(); + m_hpManager = nullptr; } if ( m_hrcManager ) { typedef cds::gc::HRC::thread_gc_impl hrc_thread_gc_impl; m_hrcManager->~hrc_thread_gc_impl(); - m_hrcManager = null_ptr(); + m_hrcManager = nullptr; } if ( m_ptbManager ) { typedef cds::gc::PTB::thread_gc_impl ptb_thread_gc_impl; m_ptbManager->~ptb_thread_gc_impl(); - m_ptbManager = null_ptr(); + m_ptbManager = nullptr; } assert( m_pGPIRCU == NULL ); diff --git a/cds/urcu/details/base.h b/cds/urcu/details/base.h index 08b638ce..8f3a1f9e 100644 --- a/cds/urcu/details/base.h +++ b/cds/urcu/details/base.h @@ -319,7 +319,7 @@ namespace cds { CDS_ATOMIC::atomic m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned) thread_list_record() - : m_pNext( null_ptr() ) + : m_pNext( nullptr ) , m_idOwner( cds::OS::nullThreadId() ) {} @@ -340,7 +340,7 @@ namespace cds { public: thread_list() - : m_pHead( null_ptr()) + : m_pHead( nullptr ) {} ~thread_list() @@ -379,13 +379,13 @@ namespace cds { void retire( thread_record * pRec ) { - assert( pRec != null_ptr() ); + assert( pRec != nullptr ); pRec->m_list.m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release ); } void detach_all() { - thread_record * pNext = null_ptr(); + thread_record * pNext = nullptr; cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId(); for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) { @@ -408,7 +408,7 @@ namespace cds { CDS_DEBUG_DO( cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId() ;) CDS_DEBUG_DO( cds::OS::ThreadId const mainThreadId = cds::OS::getCurrentThreadId() ;) - thread_record * p = m_pHead.exchange( null_ptr(), CDS_ATOMIC::memory_order_seq_cst ); + thread_record * p = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_seq_cst ); while ( p ) { thread_record * pNext = p->m_list.m_pNext; diff --git a/cds/urcu/details/gp.h b/cds/urcu/details/gp.h index 7770980b..381e28fa 100644 --- a/cds/urcu/details/gp.h +++ b/cds/urcu/details/gp.h @@ -35,7 +35,7 @@ namespace cds { namespace urcu { namespace details { inline void gp_thread_gc::access_lock() { thread_record * pRec = get_thread_record(); - assert( pRec != null_ptr()); + assert( pRec != nullptr ); uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ); if ( (tmp & rcu_class::c_nNestMask) == 0 ) { @@ -53,7 +53,7 @@ namespace cds { namespace urcu { namespace details { inline void gp_thread_gc::access_unlock() { thread_record * pRec = get_thread_record(); - assert( pRec != null_ptr()); + assert( pRec != nullptr ); //CDS_COMPILER_RW_BARRIER; pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release ); @@ -63,7 +63,7 @@ namespace cds { namespace urcu { namespace details { inline bool gp_thread_gc::is_locked() { thread_record * pRec = get_thread_record(); - assert( pRec != null_ptr()); + assert( pRec != nullptr ); return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0; } diff --git a/cds/urcu/details/gpb.h b/cds/urcu/details/gpb.h index 89bae00a..5df1fcbc 100644 --- a/cds/urcu/details/gpb.h +++ b/cds/urcu/details/gpb.h @@ -85,7 +85,7 @@ namespace cds { namespace urcu { /// Checks if the singleton is created and ready to use static bool isUsed() { - return singleton_ptr::s_pRCU != null_ptr(); + return singleton_ptr::s_pRCU != nullptr; } protected: @@ -153,7 +153,7 @@ namespace cds { namespace urcu { if ( bDetachAll ) instance()->m_ThreadList.detach_all(); delete instance(); - singleton_ptr::s_pRCU = null_ptr(); + singleton_ptr::s_pRCU = nullptr; } } diff --git a/cds/urcu/details/gpi.h b/cds/urcu/details/gpi.h index 4d512b75..4d9a852d 100644 --- a/cds/urcu/details/gpi.h +++ b/cds/urcu/details/gpi.h @@ -68,7 +68,7 @@ namespace cds { namespace urcu { /// Checks if the singleton is created and ready to use static bool isUsed() { - return singleton_ptr::s_pRCU != null_ptr(); + return singleton_ptr::s_pRCU != nullptr; } protected: @@ -100,7 +100,7 @@ namespace cds { namespace urcu { if ( bDetachAll ) instance()->m_ThreadList.detach_all(); delete instance(); - singleton_ptr::s_pRCU = null_ptr(); + singleton_ptr::s_pRCU = nullptr; } } diff --git a/cds/urcu/details/gpt.h b/cds/urcu/details/gpt.h index 427aa820..c741ea2e 100644 --- a/cds/urcu/details/gpt.h +++ b/cds/urcu/details/gpt.h @@ -92,7 +92,7 @@ namespace cds { namespace urcu { /// Checks if the singleton is created and ready to use static bool isUsed() { - return singleton_ptr::s_pRCU != null_ptr(); + return singleton_ptr::s_pRCU != nullptr; } protected: @@ -155,7 +155,7 @@ namespace cds { namespace urcu { pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire )); delete pThis; - singleton_ptr::s_pRCU = null_ptr(); + singleton_ptr::s_pRCU = nullptr; } } diff --git a/cds/urcu/details/sh.h b/cds/urcu/details/sh.h index 4e752f8e..87f72c99 100644 --- a/cds/urcu/details/sh.h +++ b/cds/urcu/details/sh.h @@ -37,7 +37,7 @@ namespace cds { namespace urcu { namespace details { inline void sh_thread_gc::access_lock() { thread_record * pRec = get_thread_record(); - assert( pRec != null_ptr()); + assert( pRec != nullptr ); uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ); if ( (tmp & rcu_class::c_nNestMask) == 0 ) { @@ -56,7 +56,7 @@ namespace cds { namespace urcu { namespace details { inline void sh_thread_gc::access_unlock() { thread_record * pRec = get_thread_record(); - assert( pRec != null_ptr()); + assert( pRec != nullptr); CDS_COMPILER_RW_BARRIER; pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release ); @@ -66,7 +66,7 @@ namespace cds { namespace urcu { namespace details { inline bool sh_thread_gc::is_locked() { thread_record * pRec = get_thread_record(); - assert( pRec != null_ptr()); + assert( pRec != nullptr); return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0; } @@ -83,7 +83,7 @@ namespace cds { namespace urcu { namespace details { sigact.sa_flags = SA_SIGINFO; sigemptyset( &sigact.sa_mask ); //sigaddset( &sigact.sa_mask, m_nSigNo ); - sigaction( m_nSigNo, &sigact, null_ptr() ); + sigaction( m_nSigNo, &sigact, nullptr ); sigaddset( &sigact.sa_mask, m_nSigNo ); pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, NULL ); diff --git a/cds/urcu/details/sig_buffered.h b/cds/urcu/details/sig_buffered.h index 4aa89f89..81f88013 100644 --- a/cds/urcu/details/sig_buffered.h +++ b/cds/urcu/details/sig_buffered.h @@ -87,7 +87,7 @@ namespace cds { namespace urcu { /// Checks if the singleton is created and ready to use static bool isUsed() { - return singleton_ptr::s_pRCU != null_ptr(); + return singleton_ptr::s_pRCU != nullptr; } protected: @@ -151,7 +151,7 @@ namespace cds { namespace urcu { if ( bDetachAll ) instance()->m_ThreadList.detach_all(); delete instance(); - singleton_ptr::s_pRCU = null_ptr(); + singleton_ptr::s_pRCU = nullptr; } } diff --git a/cds/urcu/details/sig_threaded.h b/cds/urcu/details/sig_threaded.h index 57da1681..e4820943 100644 --- a/cds/urcu/details/sig_threaded.h +++ b/cds/urcu/details/sig_threaded.h @@ -94,7 +94,7 @@ namespace cds { namespace urcu { /// Checks if the singleton is created and ready to use static bool isUsed() { - return singleton_ptr::s_pRCU != null_ptr(); + return singleton_ptr::s_pRCU != nullptr; } protected: @@ -154,7 +154,7 @@ namespace cds { namespace urcu { pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire )); delete pThis; - singleton_ptr::s_pRCU = null_ptr(); + singleton_ptr::s_pRCU = nullptr; } } diff --git a/cds/urcu/dispose_thread.h b/cds/urcu/dispose_thread.h index 25c73af5..3c1619c1 100644 --- a/cds/urcu/dispose_thread.h +++ b/cds/urcu/dispose_thread.h @@ -91,7 +91,7 @@ namespace cds { namespace urcu { bQuit = m_bQuit; nCurEpoch = m_nCurEpoch; pBuffer = m_pBuffer; - m_pBuffer = null_ptr(); + m_pBuffer = nullptr; } if ( pBuffer ) @@ -116,7 +116,7 @@ namespace cds { namespace urcu { public: //@cond dispose_thread() - : m_pBuffer( null_ptr() ) + : m_pBuffer( nullptr ) , m_nCurEpoch(0) , m_bQuit( false ) , m_bReady( false ) diff --git a/cds/urcu/exempt_ptr.h b/cds/urcu/exempt_ptr.h index 3b4eed36..963ec152 100644 --- a/cds/urcu/exempt_ptr.h +++ b/cds/urcu/exempt_ptr.h @@ -82,7 +82,7 @@ namespace cds { namespace urcu { public: /// Constructs empty pointer exempt_ptr() CDS_NOEXCEPT - : m_pNode( null_ptr()) + : m_pNode( nullptr ) {} /// Releases the pointer @@ -94,13 +94,13 @@ namespace cds { namespace urcu { /// Checks if the pointer is \p NULL bool empty() const CDS_NOEXCEPT { - return m_pNode == null_ptr(); + return m_pNode == nullptr; } /// Dereference operator value_type * operator->() const CDS_NOEXCEPT { - return !empty() ? node_to_value_cast()( m_pNode ) : null_ptr(); + return !empty() ? node_to_value_cast()(m_pNode) : nullptr; } /// Returns a reference to the value @@ -128,7 +128,7 @@ namespace cds { namespace urcu { assert( !rcu::is_locked() ); if ( !empty() ) { rcu::template retire_ptr( m_pNode ); - m_pNode = null_ptr(); + m_pNode = nullptr; } } }; @@ -161,7 +161,7 @@ namespace cds { namespace urcu { public: /// Constructs empty pointer exempt_ptr() CDS_NOEXCEPT - : m_pNode( null_ptr()) + : m_pNode( nullptr ) {} /// Releases the pointer @@ -173,13 +173,13 @@ namespace cds { namespace urcu { /// Checks if the pointer is \p NULL bool empty() const CDS_NOEXCEPT { - return m_pNode == null_ptr(); + return m_pNode == nullptr; } /// Dereference operator. value_type * operator->() const CDS_NOEXCEPT { - return !empty() ? m_pNode : null_ptr(); + return !empty() ? m_pNode : nullptr; } /// Returns a reference to the value @@ -205,7 +205,7 @@ namespace cds { namespace urcu { assert( !rcu::is_locked() ); if ( !empty() ) { rcu::template retire_ptr( m_pNode ); - m_pNode = null_ptr(); + m_pNode = nullptr; } } }; diff --git a/src/hrc_gc.cpp b/src/hrc_gc.cpp index db9f5564..9a8f8360 100644 --- a/src/hrc_gc.cpp +++ b/src/hrc_gc.cpp @@ -20,14 +20,14 @@ namespace cds { namespace gc { namespace hrc { - GarbageCollector * GarbageCollector::m_pGC = null_ptr(); + GarbageCollector * GarbageCollector::m_pGC = nullptr; GarbageCollector::GarbageCollector( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nRetiredNodeArraySize ) - : m_pListHead( null_ptr()), + : m_pListHead( nullptr ), m_bStatEnabled( true ), m_nHazardPointerCount( nHazardPtrCount ), m_nMaxThreadCount( nMaxThreadCount ), @@ -81,7 +81,7 @@ namespace cds { namespace gc { } delete m_pGC; - m_pGC = null_ptr(); + m_pGC = nullptr; } } @@ -103,10 +103,10 @@ namespace cds { namespace gc { assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() ); ContainerNode * pItem; for ( size_t n = 0; n < pNode->m_arrRetired.capacity(); ++n ) { - if ( (pItem = pNode->m_arrRetired[n].m_pNode.load(CDS_ATOMIC::memory_order_relaxed)) != null_ptr() ) { + if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed )) != nullptr ) { pNode->m_arrRetired[n].m_funcFree( pItem ); //pItem->destroy(); - pNode->m_arrRetired[n].m_pNode.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + pNode->m_arrRetired[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); } } assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() ); @@ -123,7 +123,7 @@ namespace cds { namespace gc { return hprec; } } - return null_ptr(); + return nullptr; } details::thread_descriptor * GarbageCollector::allocateHRCThreadDesc( ThreadGC * pThreadGC ) @@ -177,7 +177,7 @@ namespace cds { namespace gc { after thread termination */ assert( pNode->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != cds::OS::nullThreadId() ); - pNode->m_pOwner = null_ptr(); + pNode->m_pOwner = nullptr; pNode->m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release ); assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() ); } @@ -290,7 +290,7 @@ namespace cds { namespace gc { } // We own threadDesc. - assert( pRec->m_pOwner == null_ptr() ); + assert( pRec->m_pOwner == nullptr ); if ( !pRec->m_bFree ) { // All undeleted pointers is moved to pThis (it is private for the current thread) @@ -303,7 +303,7 @@ namespace cds { namespace gc { details::retired_vector::iterator it = src.begin(); for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) { - if ( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed) == null_ptr() ) + if ( it->m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ) continue; dest.push( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed), it->m_funcFree ); diff --git a/src/hzp_gc.cpp b/src/hzp_gc.cpp index 7fa6d835..2bdbdcb9 100644 --- a/src/hzp_gc.cpp +++ b/src/hzp_gc.cpp @@ -62,7 +62,7 @@ namespace cds { namespace gc { CDS_DEBUG_DO( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;) hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed ); - m_pListHead.store( null_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); hplist_node * pNext = NULL; for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) { diff --git a/src/ptb_gc.cpp b/src/ptb_gc.cpp index f3ab0ad5..12ceb7f9 100644 --- a/src/ptb_gc.cpp +++ b/src/ptb_gc.cpp @@ -36,7 +36,7 @@ namespace cds { namespace gc { namespace ptb { assert( (nBucketCount & (nBucketCount - 1)) == 0 ); m_Buckets = allocator_type().NewArray( nBucketCount ); - std::fill( m_Buckets, m_Buckets + nBucketCount, null_ptr()); + std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr ); } ~liberate_set() @@ -46,14 +46,14 @@ namespace cds { namespace gc { namespace ptb { void insert( retired_ptr_node& node ) { - node.m_pNext = null_ptr(); + node.m_pNext = nullptr; item_type& refBucket = bucket( node ); if ( refBucket ) { item_type p = refBucket; do { if ( p->m_ptr.m_p == node.m_ptr.m_p ) { - assert( node.m_pNextFree == null_ptr() ); + assert( node.m_pNextFree == nullptr ); node.m_pNextFree = p->m_pNextFree; p->m_pNextFree = &node; @@ -71,7 +71,7 @@ namespace cds { namespace gc { namespace ptb { { item_type& refBucket = bucket( ptr ); item_type p = refBucket; - item_type pPrev = null_ptr(); + item_type pPrev = nullptr; while ( p ) { if ( p->m_ptr.m_p == ptr ) { @@ -79,21 +79,21 @@ namespace cds { namespace gc { namespace ptb { pPrev->m_pNext = p->m_pNext; else refBucket = p->m_pNext; - p->m_pNext = null_ptr(); + p->m_pNext = nullptr; return p; } pPrev = p; p = p->m_pNext; } - return null_ptr(); + return nullptr; } typedef std::pair list_range; list_range free_all() { - item_type pTail = null_ptr(); + item_type pTail = nullptr; list_range ret = std::make_pair( pTail, pTail ); item_type const * pEndBucket = m_Buckets + m_nBucketCount; @@ -109,12 +109,12 @@ namespace cds { namespace gc { namespace ptb { for (;;) { item_type pNext = pTail->m_pNext; pTail->m_ptr.free(); - pTail->m_pNext = null_ptr(); + pTail->m_pNext = nullptr; while ( pTail->m_pNextFree ) { pTail = pTail->m_pNextFree; pTail->m_ptr.free(); - pTail->m_pNext = null_ptr(); + pTail->m_pNext = nullptr; } if ( pNext ) @@ -126,7 +126,7 @@ namespace cds { namespace gc { namespace ptb { } if ( pTail ) - pTail->m_pNextFree = null_ptr(); + pTail->m_pNextFree = nullptr; ret.second = pTail; return ret; } @@ -165,12 +165,12 @@ namespace cds { namespace gc { namespace ptb { liberate(); #if 0 - details::retired_ptr_node * pHead = null_ptr(); - details::retired_ptr_node * pTail = null_ptr(); + details::retired_ptr_node * pHead = nullptr; + details::retired_ptr_node * pTail = nullptr; for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_relaxed)) { details::guard_data::handoff_ptr h = pGuard->pHandOff; - pGuard->pHandOff = null_ptr(); + pGuard->pHandOff = nullptr; while ( h ) { details::guard_data::handoff_ptr pNext = h->m_pNextFree; if ( h->m_ptr.m_p ) @@ -199,7 +199,7 @@ namespace cds { namespace gc { namespace ptb { details::retired_ptr_node * pHead = retiredList.first; while ( pHead ) { details::retired_ptr_node * pNext = pHead->m_pNext; - pHead->m_pNextFree = null_ptr(); + pHead->m_pNextFree = nullptr; set.insert( *pHead ); pHead = pNext; } @@ -232,7 +232,7 @@ namespace cds { namespace gc { namespace ptb { m_RetiredAllocator.inc_epoch(); if ( range.first ) { - assert( range.second != null_ptr() ); + assert( range.second != nullptr ); m_RetiredAllocator.free_range( range.first, range.second ); } else { @@ -245,7 +245,7 @@ namespace cds { namespace gc { namespace ptb { #if 0 void GarbageCollector::liberate( details::liberate_set& set ) { - details::guard_data::handoff_ptr const nullHandOff = null_ptr(); + details::guard_data::handoff_ptr const nullHandOff = nullptr; for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_acquire) ) { @@ -265,7 +265,7 @@ namespace cds { namespace gc { namespace ptb { cds::lock::Auto al( pGuard->spinHandOff ); if ( valGuarded == pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire) ) { if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) { - h = nullHandOff ; //null_ptr(); + h = nullHandOff ; //nullptr; details::retired_ptr_node * pTail = pGuard->pHandOff; while ( pTail->m_pNextFree ) pTail = pTail->m_pNextFree; diff --git a/src/urcu_gp.cpp b/src/urcu_gp.cpp index 2acd85c8..309e1775 100644 --- a/src/urcu_gp.cpp +++ b/src/urcu_gp.cpp @@ -4,8 +4,8 @@ namespace cds { namespace urcu { namespace details { - template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_instant_tag >::s_pRCU = null_ptr(); - template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_buffered_tag >::s_pRCU = null_ptr(); - template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_threaded_tag >::s_pRCU = null_ptr(); + template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_instant_tag >::s_pRCU = nullptr; + template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_buffered_tag >::s_pRCU = nullptr; + template<> CDS_EXPORT_API singleton_vtbl * gp_singleton_instance< general_threaded_tag >::s_pRCU = nullptr; }}} // namespace cds::urcu::details diff --git a/src/urcu_sh.cpp b/src/urcu_sh.cpp index cceed18d..d450a6b6 100644 --- a/src/urcu_sh.cpp +++ b/src/urcu_sh.cpp @@ -5,8 +5,8 @@ #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED namespace cds { namespace urcu { namespace details { - template<> CDS_EXPORT_API singleton_vtbl * sh_singleton_instance< signal_buffered_tag >::s_pRCU = null_ptr(); - template<> CDS_EXPORT_API singleton_vtbl * sh_singleton_instance< signal_threaded_tag >::s_pRCU = null_ptr(); + template<> CDS_EXPORT_API singleton_vtbl * sh_singleton_instance< signal_buffered_tag >::s_pRCU = nullptr; + template<> CDS_EXPORT_API singleton_vtbl * sh_singleton_instance< signal_threaded_tag >::s_pRCU = nullptr; }}} // namespace cds::urcu::details diff --git a/tests/unit/queue/intrusive_queue_type.h b/tests/unit/queue/intrusive_queue_type.h index 39ad1daa..b9aaf981 100644 --- a/tests/unit/queue/intrusive_queue_type.h +++ b/tests/unit/queue/intrusive_queue_type.h @@ -120,7 +120,7 @@ namespace queue { { lock_guard l( m_Lock ); if ( m_List.empty() ) - return cds::null_ptr(); + return cds::nullptr; value_type& v = m_List.front(); m_List.pop_front(); return &v;