3 #ifndef _CDS_URCU_DETAILS_GPT_H
4 #define _CDS_URCU_DETAILS_GPT_H
6 #include <cds/urcu/details/gp.h>
7 #include <cds/urcu/dispose_thread.h>
8 #include <cds/algo/backoff_strategy.h>
9 #include <cds/container/vyukov_mpmc_cycle_queue.h>
11 namespace cds { namespace urcu {
13 /// User-space general-purpose RCU with deferred threaded reclamation
15 @headerfile cds/urcu/general_threaded.h
17 This implementation is similar to \ref general_buffered but separate thread is created
18 for deleting the retired objects. Like \p %general_buffered, the class contains an internal buffer
19 where retired objects are accumulated. When the buffer becomes full,
20 the RCU \p synchronize function is called that waits until all reader/updater threads end up their read-side critical sections,
21 i.e. until the RCU quiescent state will come. After that the "work ready" message is sent to reclamation tread.
22 The reclamation thread frees the buffer.
23 This synchronization cycle may be called in any thread that calls \ref retire_ptr function.
25 There is a wrapper \ref cds_urcu_general_threaded_gc "gc<general_threaded>" for \p %general_threaded class
26 that provides unified RCU interface. You should use this wrapper class instead \p %general_threaded
29 - \p Buffer - buffer type with FIFO semantics. Default is cds::container::VyukovMPMCCycleQueue. See \ref general_buffered
30 for description of buffer's interface. The buffer contains the objects of \ref epoch_retired_ptr
31 type that contains additional \p m_nEpoch field. This field specifies an epoch when the object
32 has been placed into the buffer. The \p %general_threaded object has a global epoch counter
33 that is incremented on each \p synchronize call. The epoch is used internally to prevent early deletion.
34 - \p Lock - mutex type, default is \p std::mutex
35 - \p DisposerThread - the reclamation thread class. Default is \ref cds::urcu::dispose_thread,
36 see the description of this class for required interface.
37 - \p Backoff - back-off schema, default is cds::backoff::Default
40 class Buffer = cds::container::VyukovMPMCCycleQueue<
42 ,cds::opt::buffer< cds::opt::v::dynamic_buffer< epoch_retired_ptr > >
44 ,class Lock = std::mutex
45 ,class DisposerThread = dispose_thread<Buffer>
46 ,class Backoff = cds::backoff::Default
48 class general_threaded: public details::gp_singleton< general_threaded_tag >
51 typedef details::gp_singleton< general_threaded_tag > base_class;
54 typedef Buffer buffer_type ; ///< Buffer type
55 typedef Lock lock_type ; ///< Lock type
56 typedef Backoff back_off ; ///< Back-off scheme
57 typedef DisposerThread disposer_thread ; ///< Disposer thread type
59 typedef general_threaded_tag rcu_tag ; ///< Thread-side RCU part
60 typedef base_class::thread_gc thread_gc ; ///< Access lock class
61 typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
63 static bool const c_bBuffered = true ; ///< This RCU buffers disposed elements
67 typedef details::gp_singleton_instance< rcu_tag > singleton_ptr;
69 struct scoped_disposer {
70 void operator ()( general_threaded * p )
80 CDS_ATOMIC::atomic<uint64_t> m_nCurEpoch;
82 size_t const m_nCapacity;
83 disposer_thread m_DisposerThread;
87 /// Returns singleton instance
88 static general_threaded * instance()
90 return static_cast<general_threaded *>( base_class::instance() );
92 /// Checks if the singleton is created and ready to use
95 return singleton_ptr::s_pRCU != nullptr;
100 general_threaded( size_t nBufferCapacity )
101 : m_Buffer( nBufferCapacity )
103 , m_nCapacity( nBufferCapacity )
109 base_class::flip_and_wait( bkoff );
112 // Return: true - synchronize has been called, false - otherwise
113 bool push_buffer( epoch_retired_ptr& p )
115 bool bPushed = m_Buffer.push( p );
116 if ( !bPushed || m_Buffer.size() >= capacity() ) {
133 /// Creates singleton object and starts reclamation thread
135 The \p nBufferCapacity parameter defines RCU threshold.
137 static void Construct( size_t nBufferCapacity = 256 )
139 if ( !singleton_ptr::s_pRCU ) {
140 std::unique_ptr< general_threaded, scoped_disposer > pRCU( new general_threaded( nBufferCapacity ) );
141 pRCU->m_DisposerThread.start();
143 singleton_ptr::s_pRCU = pRCU.release();
147 /// Destroys singleton object and terminates internal reclamation thread
148 static void Destruct( bool bDetachAll = false )
151 general_threaded * pThis = instance();
153 pThis->m_ThreadList.detach_all();
155 pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
158 singleton_ptr::s_pRCU = nullptr;
163 /// Retires \p p pointer
165 The method pushes \p p pointer to internal buffer.
166 When the buffer becomes full \ref synchronize function is called
167 to wait for the end of grace period and then
168 a message is sent to the reclamation thread.
170 virtual void retire_ptr( retired_ptr& p )
173 epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) );
178 /// Retires the pointer chain [\p itFirst, \p itLast)
179 template <typename ForwardIterator>
180 void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
182 uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
183 while ( itFirst != itLast ) {
184 epoch_retired_ptr p( *itFirst, nEpoch );
190 /// Waits to finish a grace period and calls disposing thread
193 synchronize( false );
197 void synchronize( bool bSync )
199 uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release );
201 CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
203 cds::lock::scoped_lock<lock_type> sl( m_Lock );
207 m_DisposerThread.dispose( m_Buffer, nPrevEpoch, bSync );
209 CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
217 /// Returns the threshold of internal buffer
218 size_t capacity() const
223 }} // namespace cds::urcu
225 #endif // #ifndef _CDS_URCU_DETAILS_GPT_H