#define CDSLIB_GC_DETAILS_RETIRED_PTR_H
#include <cds/details/defs.h>
+#include <cds/details/static_functor.h>
//@cond
namespace cds { namespace gc {
return !(p1 == p2);
}
} // namespace details
+
+ template <typename Func, typename T>
+ cds::gc::details::retired_ptr make_retired_ptr( T * p )
+ {
+ return cds::gc::details::retired_ptr( p, cds::details::static_functor<Func, T>::call );
+ }
+
}} // namespace cds::gc
//@endcond
{
assert( !gc::is_locked() );
- node_type * p = pDelChain;
- if ( p ) {
- while ( p ) {
- node_type * pNext = p->m_pDelChain;
- dispose_node( p );
- p = pNext;
- }
+ node_type * chain = pDelChain;
+ if ( chain ) {
+ auto f = [&chain]() -> cds::urcu::retired_ptr {
+ node_type * p = chain;
+ chain = p->m_pDelChain;
+ return cds::urcu::make_retired_ptr<clear_and_dispose>( node_traits::to_value_ptr( p ));
+ };
+ gc::batch_retire(std::ref(f));
}
}
};
template <typename Q>
value_type * get( Q const& key )
{
- return get_at( const_cast<atomic_node_ptr&>( m_pHead ), key, key_comparator());
+ return get_at( m_pHead, key, key_comparator());
}
/// Finds \p key and return the item found
value_type * get_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return get_at( const_cast<atomic_node_ptr&>( m_pHead ), key, cds::opt::details::make_comparator_from_less<Less>());
+ return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Clears the list using default disposer
///@anchor cds_urcu_retired_ptr Retired pointer, i.e. pointer that ready for reclamation
typedef cds::gc::details::retired_ptr retired_ptr;
+ using cds::gc::make_retired_ptr;
/// Pointer to function to free (destruct and deallocate) retired pointer of specific type
typedef cds::gc::details::free_retired_ptr_func free_retired_ptr_func;
pRec->m_nAccessControl.store( gp_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_relaxed),
atomics::memory_order_relaxed );
atomics::atomic_thread_fence( atomics::memory_order_acquire );
- //CDS_COMPILER_RW_BARRIER;
+ CDS_COMPILER_RW_BARRIER;
}
else {
pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_relaxed );
thread_record * pRec = get_thread_record();
assert( pRec != nullptr );
- //CDS_COMPILER_RW_BARRIER;
+ CDS_COMPILER_RW_BARRIER;
pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
}
p.free();
}
else {
- push_buffer( p );
+ push_buffer( std::move(p) );
break;
}
}
}
// Return: true - synchronize has been called, false - otherwise
- bool push_buffer( epoch_retired_ptr& ep )
+ bool push_buffer( epoch_retired_ptr&& ep )
{
bool bPushed = m_Buffer.push( ep );
if ( !bPushed || m_Buffer.size() >= capacity() ) {
*/
virtual void retire_ptr( retired_ptr& p )
{
- if ( p.m_p ) {
- epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
- push_buffer( ep );
- }
+ if ( p.m_p )
+ push_buffer( epoch_retired_ptr( p, m_nCurEpoch.load( atomics::memory_order_relaxed )));
}
/// Retires the pointer chain [\p itFirst, \p itLast)
{
uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
- epoch_retired_ptr ep( *itFirst, nEpoch );
+ push_buffer( epoch_retired_ptr( *itFirst, nEpoch ));
++itFirst;
- push_buffer( ep );
}
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ void batch_retire( Func e )
+ {
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
+ for ( retired_ptr p{ e() }; p.m_p; p = e() )
+ push_buffer( epoch_retired_ptr( p, nEpoch ));
+ }
+
/// Wait to finish a grace period and then clear the buffer
void synchronize()
{
virtual void retire_ptr( retired_ptr& p )
{
synchronize();
- if ( p.m_p ) {
- // TSan ignores atomic_thread_fence in synchronize()
- //CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( p.m_p );
+ if ( p.m_p )
p.free();
- }
}
/// Retires the pointer chain [\p itFirst, \p itLast)
while ( itFirst != itLast ) {
retired_ptr p( *itFirst );
++itFirst;
- if ( p.m_p ) {
- // TSan ignores atomic_thread_fence in synchronize()
- //CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( p.m_p );
+ if ( p.m_p )
p.free();
- }
}
}
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ void batch_retire( Func e )
+ {
+ retired_ptr p{ e() };
+ if ( p.m_p ) {
+ synchronize();
+ for ( ; p.m_p; p = e() )
+ p.free();
+ }
+ }
+
/// Waits to finish a grace period
void synchronize()
{
}
// Return: true - synchronize has been called, false - otherwise
- bool push_buffer( epoch_retired_ptr& p )
+ bool push_buffer( epoch_retired_ptr&& p )
{
bool bPushed = m_Buffer.push( p );
if ( !bPushed || m_Buffer.size() >= capacity() ) {
*/
virtual void retire_ptr( retired_ptr& p )
{
- if ( p.m_p ) {
- epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) );
- push_buffer( ep );
- }
+ if ( p.m_p )
+ push_buffer( epoch_retired_ptr( p, m_nCurEpoch.load( atomics::memory_order_acquire )));
}
/// Retires the pointer chain [\p itFirst, \p itLast)
{
uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
- epoch_retired_ptr p( *itFirst, nEpoch );
+ push_buffer( epoch_retired_ptr( *itFirst, nEpoch ));
++itFirst;
- push_buffer( p );
}
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ void batch_retire( Func e )
+ {
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
+ for ( retired_ptr p{ e() }; p.m_p; p = e() )
+ push_buffer( epoch_retired_ptr( p, nEpoch ));
+ }
+
+
/// Waits to finish a grace period and calls disposing thread
void synchronize()
{
}
}
- bool push_buffer( epoch_retired_ptr& ep )
+ bool push_buffer( epoch_retired_ptr&& ep )
{
bool bPushed = m_Buffer.push( ep );
if ( !bPushed || m_Buffer.size() >= capacity() ) {
*/
virtual void retire_ptr( retired_ptr& p )
{
- if ( p.m_p ) {
- epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
- push_buffer( ep );
- }
+ if ( p.m_p )
+ push_buffer( epoch_retired_ptr( p, m_nCurEpoch.load( atomics::memory_order_relaxed )));
}
/// Retires the pointer chain [\p itFirst, \p itLast)
{
uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
- epoch_retired_ptr ep( *itFirst, nEpoch );
+ push_buffer( epoch_retired_ptr( *itFirst, nEpoch ));
++itFirst;
- push_buffer( ep );
}
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ void batch_retire( Func e )
+ {
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
+ for ( retired_ptr p{ e() }; p.m_p; p = e() )
+ push_buffer( epoch_retired_ptr( p, nEpoch ));
+ }
+
/// Wait to finish a grace period and then clear the buffer
void synchronize()
{
{}
// Return: true - synchronize has been called, false - otherwise
- bool push_buffer( epoch_retired_ptr& p )
+ bool push_buffer( epoch_retired_ptr&& p )
{
bool bPushed = m_Buffer.push( p );
if ( !bPushed || m_Buffer.size() >= capacity() ) {
{
if ( p.m_p ) {
epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) );
- push_buffer( ep );
+ push_buffer( epoch_retired_ptr( p, m_nCurEpoch.load( atomics::memory_order_acquire )));
}
}
{
uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
- epoch_retired_ptr p( *itFirst, nEpoch );
+ push_buffer( epoch_retired_ptr( *itFirst, nEpoch ) );
++itFirst;
- push_buffer( p );
}
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ void batch_retire( Func e )
+ {
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
+ for ( retired_ptr p{ e() }; p.m_p; p = e() )
+ push_buffer( epoch_retired_ptr( p, nEpoch ));
+ }
+
+
/// Waits to finish a grace period and calls disposing thread
void synchronize()
{
rcu_implementation::instance()->batch_retire( itFirst, itLast );
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ static void batch_retire( Func e )
+ {
+ rcu_implementation::instance()->batch_retire( e );
+ }
+
/// Acquires access lock (so called RCU reader-side lock)
/**
For safety reasons, it is better to use \ref scoped_lock class for locking/unlocking
rcu_implementation::instance()->batch_retire( itFirst, itLast );
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ static void batch_retire( Func e )
+ {
+ rcu_implementation::instance()->batch_retire( e );
+ }
+
/// Acquires access lock (so called RCU reader-side lock)
/**
For safety reasons, it is better to use \ref scoped_lock class for locking/unlocking
rcu_implementation::instance()->batch_retire( itFirst, itLast );
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ static void batch_retire( Func e )
+ {
+ rcu_implementation::instance()->batch_retire( e );
+ }
+
/// Acquires access lock (so called RCU reader-side lock)
/**
For safety reasons, it is better to use \ref scoped_lock class for locking/unlocking
rcu_implementation::instance()->batch_retire( itFirst, itLast );
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ static void batch_retire( Func e )
+ {
+ rcu_implementation::instance()->batch_retire( e );
+ }
+
/// Acquires access lock (so called RCU reader-side lock)
/**
For safety reasons, it is better to use \ref scoped_lock class for locking/unlocking
rcu_implementation::instance()->batch_retire( itFirst, itLast );
}
+ /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
+ template <typename Func>
+ static void batch_retire( Func e )
+ {
+ rcu_implementation::instance()->batch_retire( e );
+ }
+
/// Acquires access lock (so called RCU reader-side lock)
/**
For safety reasons, it is better to use \ref scoped_lock class for locking/unlocking