3 #ifndef CDSLIB_URCU_DETAILS_SIG_BUFFERED_H
4 #define CDSLIB_URCU_DETAILS_SIG_BUFFERED_H
6 #include <cds/urcu/details/sh.h>
7 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
10 #include <cds/algo/backoff_strategy.h>
11 #include <cds/container/vyukov_mpmc_cycle_queue.h>
13 namespace cds { namespace urcu {
15 /// User-space signal-handled RCU with deferred (buffered) reclamation
17 @headerfile cds/urcu/signal_buffered.h
19 This URCU implementation contains an internal buffer where retired objects are
20 accumulated. When the buffer becomes full, the RCU \p synchronize function is called
21 that waits until all reader/updater threads end up their read-side critical sections,
22 i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
23 This synchronization cycle may be called in any thread that calls \p retire_ptr function.
25 The \p Buffer contains items of \ref cds_urcu_retired_ptr "retired_ptr" type and it should support a queue interface with
27 - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
28 returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
29 - <tt>bool pop( retired_ptr& p ) </tt> - pops queue's head item into \p p parameter; if the queue is empty
30 this function must return \p false
31 - <tt>size_t size()</tt> - returns queue's item count.
33 The buffer is considered as full if \p push returns \p false or the buffer size reaches the RCU threshold.
35 There is a wrapper \ref cds_urcu_signal_buffered_gc "gc<signal_buffered>" for \p %signal_buffered class
36 that provides unified RCU interface. You should use this wrapper class instead \p %signal_buffered
39 - \p Buffer - buffer type. Default is cds::container::VyukovMPMCCycleQueue
40 - \p Lock - mutex type, default is \p std::mutex
41 - \p Backoff - back-off schema, default is cds::backoff::Default
44 class Buffer = cds::container::VyukovMPMCCycleQueue< epoch_retired_ptr >
45 ,class Lock = std::mutex
46 ,class Backoff = cds::backoff::Default
48 class signal_buffered: public details::sh_singleton< signal_buffered_tag >
51 typedef details::sh_singleton< signal_buffered_tag > base_class;
54 typedef signal_buffered_tag rcu_tag ; ///< RCU tag
55 typedef Buffer buffer_type ; ///< Buffer type
56 typedef Lock lock_type ; ///< Lock type
57 typedef Backoff back_off ; ///< Back-off type
59 typedef base_class::thread_gc thread_gc ; ///< Thread-side RCU part
60 typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
62 static bool const c_bBuffered = true ; ///< This RCU buffers disposed elements
66 typedef details::sh_singleton_instance< rcu_tag > singleton_ptr;
72 atomics::atomic<uint64_t> m_nCurEpoch;
74 size_t const m_nCapacity;
78 /// Returns singleton instance
79 static signal_buffered * instance()
81 return static_cast<signal_buffered *>( base_class::instance() );
83 /// Checks if the singleton is created and ready to use
86 return singleton_ptr::s_pRCU != nullptr;
91 signal_buffered( size_t nBufferCapacity, int nSignal = SIGUSR1 )
92 : base_class( nSignal )
93 , m_Buffer( nBufferCapacity )
95 , m_nCapacity( nBufferCapacity )
100 clear_buffer( (uint64_t) -1 );
103 void clear_buffer( uint64_t nEpoch )
106 while ( m_Buffer.pop( p )) {
107 if ( p.m_nEpoch <= nEpoch )
116 bool push_buffer( epoch_retired_ptr& ep )
118 bool bPushed = m_Buffer.push( ep );
119 if ( !bPushed || m_Buffer.size() >= capacity() ) {
130 /// Creates singleton object
132 The \p nBufferCapacity parameter defines RCU threshold.
134 The \p nSignal parameter defines a signal number stated for RCU, default is \p SIGUSR1
136 static void Construct( size_t nBufferCapacity = 256, int nSignal = SIGUSR1 )
138 if ( !singleton_ptr::s_pRCU )
139 singleton_ptr::s_pRCU = new signal_buffered( nBufferCapacity, nSignal );
142 /// Destroys singleton object
143 static void Destruct( bool bDetachAll = false )
146 instance()->clear_buffer( (uint64_t) -1 );
148 instance()->m_ThreadList.detach_all();
150 singleton_ptr::s_pRCU = nullptr;
155 /// Retire \p p pointer
157 The method pushes \p p pointer to internal buffer.
158 When the buffer becomes full \ref synchronize function is called
159 to wait for the end of grace period and then to free all pointers from the buffer.
161 virtual void retire_ptr( retired_ptr& p )
164 epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
169 /// Retires the pointer chain [\p itFirst, \p itLast)
170 template <typename ForwardIterator>
171 void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
173 uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
174 while ( itFirst != itLast ) {
175 epoch_retired_ptr ep( *itFirst, nEpoch );
181 /// Wait to finish a grace period and then clear the buffer
184 epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
189 bool synchronize( epoch_retired_ptr& ep )
192 atomics::atomic_thread_fence( atomics::memory_order_acquire );
194 std::unique_lock<lock_type> sl( m_Lock );
195 if ( ep.m_p && m_Buffer.push( ep ) && m_Buffer.size() < capacity())
197 nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
200 base_class::force_membar_all_threads( bkOff );
201 base_class::switch_next_epoch();
203 base_class::wait_for_quiescent_state( bkOff );
204 base_class::switch_next_epoch();
206 base_class::wait_for_quiescent_state( bkOff );
207 base_class::force_membar_all_threads( bkOff );
210 clear_buffer( nEpoch );
215 /// Returns the threshold of internal buffer
216 size_t capacity() const
221 /// Returns the signal number stated for RCU
222 int signal_no() const
224 return base_class::signal_no();
228 }} // namespace cds::urcu
230 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
231 #endif // #ifndef CDSLIB_URCU_DETAILS_SIG_BUFFERED_H