2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_FREE_LIST_H
32 #define CDSLIB_INTRUSIVE_FREE_LIST_H
34 #include <cds/algo/atomic.h>
36 namespace cds { namespace intrusive {
38 /// Lock-free free list
39 /** @ingroup cds_intrusive_freelist
41 Free list is a helper class intended for reusing objects instead of freeing them completely;
42 this avoids the overhead of \p malloc(), and also avoids its worst-case behavior of taking an operating system lock.
43 So, the free list can be considered as a specialized allocator for objects of some type.
45 The algorithm is taken from <a href="http://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists">this article</a>.
46 The algo does not require any SMR like Hazard Pointer to prevent ABA problem.
48 There is \ref TaggedFreeList "tagged pointers" variant of free list for processors with double-width CAS support.
52 #include <cds/intrusive/free_list.h>
54 // Your struct should be derived from FreeList::node
55 struct Foo: public cds::intrusive::FreeList::node
60 // Simplified Foo allocator
64 // free-list clear() must be explicitly called before destroying the free-list object
67 m_FreeList.clear( []( freelist_node * p ) { delete static_cast<Foo *>( p ); });
72 freelist_node * p = m_FreeList.get();
74 return static_cast<Foo *>( p );
78 void dealloc( Foo * p )
80 m_FreeList.put( static_cast<freelist_node *>( p ));
84 typedef cds::intrusive::FreeList::node freelist_node;
85 cds::intrusive::FreeList m_FreeList;
95 atomics::atomic<uint32_t> m_freeListRefs;
96 atomics::atomic<node *> m_freeListNext;
101 m_freeListNext.store( nullptr, atomics::memory_order_release );
107 /// Creates empty free list
112 /// Destroys the free list. Free-list must be empty.
114 @warning dtor does not free elements of the list.
115 To free elements you should manually call \p clear() with an appropriate disposer.
122 /// Puts \p pNode to the free list
123 void put( node * pNode )
125 // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to
126 // set it using a fetch_add
127 if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList, atomics::memory_order_release ) == 0 ) {
128 // Oh look! We were the last ones referencing this node, and we know
129 // we want to add it to the free list, so let's do it!
130 add_knowing_refcount_is_zero( pNode );
134 /// Gets a node from the free list. If the list is empty, returns \p nullptr
137 auto head = m_Head.load( atomics::memory_order_acquire );
138 while ( head != nullptr ) {
139 auto prevHead = head;
140 auto refs = head->m_freeListRefs.load( atomics::memory_order_relaxed );
142 if ( cds_unlikely( (refs & c_RefsMask) == 0 || !head->m_freeListRefs.compare_exchange_strong( refs, refs + 1,
143 atomics::memory_order_acquire, atomics::memory_order_relaxed )))
145 head = m_Head.load( atomics::memory_order_acquire );
149 // Good, reference count has been incremented (it wasn't at zero), which means
150 // we can read the next and not worry about it changing between now and the time
152 node * next = head->m_freeListNext.load( atomics::memory_order_relaxed );
153 if ( cds_likely( m_Head.compare_exchange_strong( head, next, atomics::memory_order_acquire, atomics::memory_order_relaxed ))) {
154 // Yay, got the node. This means it was on the list, which means
155 // shouldBeOnFreeList must be false no matter the refcount (because
156 // nobody else knows it's been taken off yet, it can't have been put back on).
157 assert( (head->m_freeListRefs.load( atomics::memory_order_relaxed ) & c_ShouldBeOnFreeList) == 0 );
159 // Decrease refcount twice, once for our ref, and once for the list's ref
160 head->m_freeListRefs.fetch_sub( 2, atomics::memory_order_relaxed );
165 // OK, the head must have changed on us, but we still need to decrease the refcount we
167 refs = prevHead->m_freeListRefs.fetch_sub( 1, atomics::memory_order_acq_rel );
168 if ( refs == c_ShouldBeOnFreeList + 1 )
169 add_knowing_refcount_is_zero( prevHead );
175 /// Checks whether the free list is empty
178 return m_Head.load( atomics::memory_order_relaxed ) == nullptr;
181 /// Clears the free list (not atomic)
183 For each element \p disp disposer is called to free memory.
184 The \p Disposer interface:
188 void operator()( FreeList::node * node );
192 This method must be explicitly called before the free list destructor.
194 template <typename Disposer>
195 void clear( Disposer disp )
197 node * head = m_Head.load( atomics::memory_order_relaxed );
198 m_Head.store( nullptr, atomics::memory_order_relaxed );
200 node * next = head->m_freeListNext.load( atomics::memory_order_relaxed );
208 void add_knowing_refcount_is_zero( node * pNode )
210 // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we
211 // run only one copy of this method per node at a time, i.e. the single thread case), then we
212 // know we can safely change the next pointer of the node; however, once the refcount is back
213 // above zero, then other threads could increase it (happens under heavy contention, when the
214 // refcount goes to zero in between a load and a refcount increment of a node in try_get, then
215 // back up to something non-zero, then the refcount increment is done by the other thread) --
216 // so, if the CAS to add the node to the actual list fails, decrease the refcount and leave
217 // the add operation to the next thread who puts the refcount back at zero (which could be us,
219 node * head = m_Head.load( atomics::memory_order_relaxed );
221 pNode->m_freeListNext.store( head, atomics::memory_order_relaxed );
222 pNode->m_freeListRefs.store( 1, atomics::memory_order_release );
223 if ( cds_unlikely( !m_Head.compare_exchange_strong( head, pNode, atomics::memory_order_release, atomics::memory_order_relaxed ))) {
224 // Hmm, the add failed, but we can only try again when the refcount goes back to zero
225 if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList - 1, atomics::memory_order_release ) == 1 )
235 static CDS_CONSTEXPR uint32_t const c_RefsMask = 0x7FFFFFFF;
236 static CDS_CONSTEXPR uint32_t const c_ShouldBeOnFreeList = 0x80000000;
238 // Implemented like a stack, but where node order doesn't matter (nodes are
239 // inserted out of order under contention)
240 atomics::atomic<node *> m_Head;
244 }} // namespace cds::intrusive
246 #endif // CDSLIB_INTRUSIVE_FREE_LIST_H