2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_URCU_DETAILS_GPB_H
32 #define CDSLIB_URCU_DETAILS_GPB_H
36 #include <cds/urcu/details/gp.h>
37 #include <cds/algo/backoff_strategy.h>
38 #include <cds/container/vyukov_mpmc_cycle_queue.h>
40 namespace cds { namespace urcu {
42 /// User-space general-purpose RCU with deferred (buffered) reclamation
44 @headerfile cds/urcu/general_buffered.h
46 This URCU implementation contains an internal buffer where retired objects are
47 accumulated. When the buffer becomes full, the RCU \p synchronize function is called
48 that waits until all reader/updater threads end up their read-side critical sections,
49 i.e. until the RCU quiescent state will come. After that the buffer and all retired objects are freed.
50 This synchronization cycle may be called in any thread that calls \p retire_ptr function.
52 The \p Buffer contains items of \ref cds_urcu_retired_ptr "epoch_retired_ptr" type and it should support a queue interface with
54 - <tt> bool push( retired_ptr& p ) </tt> - places the retired pointer \p p into queue. If the function
55 returns \p false it means that the buffer is full and RCU synchronization cycle must be processed.
56 - <tt>bool pop( retired_ptr& p ) </tt> - pops queue's head item into \p p parameter; if the queue is empty
57 this function must return \p false
58 - <tt>size_t size()</tt> - returns queue's item count.
60 The buffer is considered as full if \p push() returns \p false or the buffer size reaches the RCU threshold.
62 There is a wrapper \ref cds_urcu_general_buffered_gc "gc<general_buffered>" for \p %general_buffered class
63 that provides unified RCU interface. You should use this wrapper class instead \p %general_buffered
66 - \p Buffer - buffer type. Default is \p cds::container::VyukovMPMCCycleQueue
67 - \p Lock - mutex type, default is \p std::mutex
68 - \p Backoff - back-off schema, default is cds::backoff::Default
71 class Buffer = cds::container::VyukovMPMCCycleQueue< epoch_retired_ptr >
72 ,class Lock = std::mutex
73 ,class Backoff = cds::backoff::Default
75 class general_buffered: public details::gp_singleton< general_buffered_tag >
78 typedef details::gp_singleton< general_buffered_tag > base_class;
81 typedef general_buffered_tag rcu_tag ; ///< RCU tag
82 typedef Buffer buffer_type ; ///< Buffer type
83 typedef Lock lock_type ; ///< Lock type
84 typedef Backoff back_off ; ///< Back-off type
86 typedef base_class::thread_gc thread_gc ; ///< Thread-side RCU part
87 typedef typename thread_gc::scoped_lock scoped_lock ; ///< Access lock class
90 static bool const c_bBuffered = true ; ///< Bufferized RCU
95 typedef details::gp_singleton_instance< rcu_tag > singleton_ptr;
100 buffer_type m_Buffer;
101 atomics::atomic<uint64_t> m_nCurEpoch;
103 size_t const m_nCapacity;
107 /// Returns singleton instance
108 static general_buffered * instance()
110 return static_cast<general_buffered *>( base_class::instance());
112 /// Checks if the singleton is created and ready to use
115 return singleton_ptr::s_pRCU != nullptr;
120 general_buffered( size_t nBufferCapacity )
121 : m_Buffer( nBufferCapacity )
123 , m_nCapacity( nBufferCapacity )
128 clear_buffer( std::numeric_limits< uint64_t >::max());
134 base_class::flip_and_wait( bkoff );
137 void clear_buffer( uint64_t nEpoch )
140 while ( m_Buffer.pop( p )) {
141 if ( p.m_nEpoch <= nEpoch ) {
145 push_buffer( std::move(p));
151 // Return: \p true - synchronize has been called, \p false - otherwise
152 bool push_buffer( epoch_retired_ptr&& ep )
154 bool bPushed = m_Buffer.push( ep );
155 if ( !bPushed || m_Buffer.size() >= capacity()) {
167 /// Creates singleton object
169 The \p nBufferCapacity parameter defines RCU threshold.
171 static void Construct( size_t nBufferCapacity = 256 )
173 if ( !singleton_ptr::s_pRCU )
174 singleton_ptr::s_pRCU = new general_buffered( nBufferCapacity );
177 /// Destroys singleton object
178 static void Destruct( bool bDetachAll = false )
181 instance()->clear_buffer( std::numeric_limits< uint64_t >::max());
183 instance()->m_ThreadList.detach_all();
185 singleton_ptr::s_pRCU = nullptr;
190 /// Retire \p p pointer
192 The method pushes \p p pointer to internal buffer.
193 When the buffer becomes full \ref synchronize function is called
194 to wait for the end of grace period and then to free all pointers from the buffer.
196 virtual void retire_ptr( retired_ptr& p ) override
199 push_buffer( epoch_retired_ptr( p, m_nCurEpoch.load( atomics::memory_order_relaxed )));
202 /// Retires the pointer chain [\p itFirst, \p itLast)
203 template <typename ForwardIterator>
204 void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
206 uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
207 while ( itFirst != itLast ) {
208 epoch_retired_ptr ep( *itFirst, nEpoch );
210 push_buffer( std::move(ep));
214 /// Retires the pointer chain until \p Func returns \p nullptr retired pointer
215 template <typename Func>
216 void batch_retire( Func e )
218 uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
219 for ( retired_ptr p{ e() }; p.m_p; ) {
220 epoch_retired_ptr ep( p, nEpoch );
222 push_buffer( std::move(ep));
226 /// Wait to finish a grace period and then clear the buffer
229 epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
234 bool synchronize( epoch_retired_ptr& ep )
238 std::unique_lock<lock_type> sl( m_Lock );
239 if ( ep.m_p && m_Buffer.push( ep ))
241 nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
245 clear_buffer( nEpoch );
250 /// Returns internal buffer capacity
251 size_t capacity() const
257 /// User-space general-purpose RCU with deferred (buffered) reclamation (stripped version)
259 @headerfile cds/urcu/general_buffered.h
261 This short version of \p general_buffered is intended for stripping debug info.
262 If you use \p %general_buffered with default template arguments you may use
263 this stripped version. All functionality of both classes are identical.
265 class general_buffered_stripped: public general_buffered<>
268 }} // namespace cds::urcu
270 #endif // #ifndef CDSLIB_URCU_DETAILS_GPB_H