{
assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur );
- pNode->m_pNext.store( marked_node_ptr(pCur), memory_model::memory_order_release );
+ pNode->m_pNext.store( marked_node_ptr(pCur), memory_model::memory_order_relaxed );
pPred->m_pNext.store( marked_node_ptr(pNode), memory_model::memory_order_release );
}
node_type * pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr();
pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_relaxed ); // logical deletion + back-link for search
- pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_relaxed); // physically deleting
+ pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_release); // physically deleting
}
//@endcond
}
template <typename Q, typename Compare, typename Func>
- bool erase_at( node_type * pHead, Q const& val, Compare cmp, Func f, position& pos )
+ bool erase_at( node_type * const pHead, Q const& val, Compare cmp, Func f, position& pos )
{
check_deadlock_policy::check();
}
template <typename Q, typename Compare>
- value_type * extract_at( node_type * pHead, Q const& val, Compare cmp )
+ value_type * extract_at( node_type * const pHead, Q const& val, Compare cmp )
{
position pos;
assert( gc::is_locked() ) ; // RCU must be locked!!!
protected:
//@cond
template <typename Q>
- void search( node_type * pHead, Q const& key, position& pos ) const
+ void search( node_type * const pHead, Q const& key, position& pos ) const
{
search( pHead, key, pos, key_comparator() );
}
template <typename Q, typename Compare>
- void search( node_type * pHead, Q const& key, position& pos, Compare cmp ) const
+ void search( node_type * const pHead, Q const& key, position& pos, Compare cmp ) const
{
// RCU should be locked!!!
assert( gc::is_locked() );
marked_node_ptr pCur(pHead);
marked_node_ptr pPrev(pHead);
- while ( pCur.ptr() != pTail && ( pCur.ptr() == pHead || cmp( *node_traits::to_value_ptr( *pCur.ptr() ), key ) < 0 )) {
+ while ( pCur != pTail && ( pCur == pHead || cmp( *node_traits::to_value_ptr( *pCur.ptr()), key ) < 0 )) {
pPrev = pCur;
pCur = pCur->m_pNext.load(memory_model::memory_order_acquire);
}
pos.pPred = pPrev.ptr();
}
- static bool validate( node_type * pPred, node_type * pCur )
+ static bool validate( node_type * pPred, node_type * pCur ) CDS_NOEXCEPT
{
// RCU lock should be locked!!!
assert( gc::is_locked() );
- \p Traits - set traits, default isd \p split_list::traits.
Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
- @note About reqired features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
+ @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
\par How to use
Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
- stat m_Stat; ///< Internal stattistics accumulator
+ stat m_Stat; ///< Internal statistics accumulator
protected:
//@cond
};
- /// Lazy free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue
+ /// Lazy free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- \p T - the type of object maintaining by free-list
- - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus
- cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
+ - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
+ \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
\b Internals
return cxx_allocator().New();
}
- /// Deallocated the object \p p
+ /// Deallocates the object \p p
/**
The pool supports allocation only single object (\p n = 1).
If \p n > 1 the behaviour is undefined.
};
- /// Bounded free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue
+ /// Bounded free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- \p T - the type of object maintaining by free-list
- - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus
- cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
+ - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
+ \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
\b Internals
return p;
}
- /// Deallocated the object \p p
+ /// Deallocates the object \p p
/**
The pool supports allocation only single object (\p n = 1).
If \p n > 1 the behaviour is undefined.
- \p should be from preallocated pool.
+ \p p should be from preallocated pool.
*/
void deallocate( value_type * p, size_t n )
{