2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_FREE_LIST_H
32 #define CDSLIB_INTRUSIVE_FREE_LIST_H
34 #include <cds/algo/atomic.h>
36 namespace cds { namespace intrusive {
38 /// Lock-free free list
39 /** @ingroup cds_intrusive_helper
41 Free list is a helper class intended for reusing objects instead of freeing them completely;
42 this avoids the overhead of \p malloc(), and also avoids its worst-case behavior of taking an operating system lock.
43 So, the free list can be considered as a specialized allocator for objects of some type.
45 The algorithm is taken from <a href="http://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists">this article</a>.
46 The algo does not require any SMR like Hazard Pointer to prevent ABA problem.
48 There is \ref TaggedFreeList "tagged pointers" variant of free list for processors which support double-width CAS.
52 #include <cds/intrusive/free_list.h>
54 // Your struct should be derived from FreeList::node
55 struct Foo: public cds::intrusive::FreeList::node
60 // Simplified Foo allocator
64 // free-list clear() must be explicitly called before destroying the free-list object
67 m_FreeList.clear( []( freelist_node * p ) { delete static_cast<Foo *>( p ); });
72 freelist_node * p = m_FreeList.get();
74 return static_cast<Foo *>( p );
78 void dealloc( Foo * p )
80 m_FreeList.put( static_cast<freelist_node *>( p ));
84 typedef cds::intrusive::FreeList::node freelist_node;
85 cds::intrusive::FreeList m_FreeList;
95 atomics::atomic<uint32_t> m_freeListRefs;
96 atomics::atomic<node *> m_freeListNext;
100 , m_freeListNext( nullptr )
106 /// Creates empty free list
111 /// Destroys the free list. Free-list must be empty.
113 @warning dtor does not free elements of the list.
114 To free elements you should manually call \p clear() with an appropriate disposer.
121 /// Puts \p pNode to the free list
122 void put( node * pNode )
124 // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to
125 // set it using a fetch_add
126 if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList, atomics::memory_order_release ) == 0 ) {
127 // Oh look! We were the last ones referencing this node, and we know
128 // we want to add it to the free list, so let's do it!
129 add_knowing_refcount_is_zero( pNode );
133 /// Gets a node from the free list. If the list is empty, returns \p nullptr
136 auto head = m_Head.load( atomics::memory_order_acquire );
137 while ( head != nullptr ) {
138 auto prevHead = head;
139 auto refs = head->m_freeListRefs.load( atomics::memory_order_relaxed );
141 if ( cds_unlikely( (refs & c_RefsMask) == 0 || !head->m_freeListRefs.compare_exchange_strong( refs, refs + 1,
142 atomics::memory_order_acquire, atomics::memory_order_relaxed )))
144 head = m_Head.load( atomics::memory_order_acquire );
148 // Good, reference count has been incremented (it wasn't at zero), which means
149 // we can read the next and not worry about it changing between now and the time
151 node * next = head->m_freeListNext.load( atomics::memory_order_relaxed );
152 if ( cds_likely( m_Head.compare_exchange_strong( head, next, atomics::memory_order_acquire, atomics::memory_order_relaxed ))) {
153 // Yay, got the node. This means it was on the list, which means
154 // shouldBeOnFreeList must be false no matter the refcount (because
155 // nobody else knows it's been taken off yet, it can't have been put back on).
156 assert( (head->m_freeListRefs.load( atomics::memory_order_relaxed ) & c_ShouldBeOnFreeList) == 0 );
158 // Decrease refcount twice, once for our ref, and once for the list's ref
159 head->m_freeListRefs.fetch_sub( 2, atomics::memory_order_relaxed );
164 // OK, the head must have changed on us, but we still need to decrease the refcount we
166 refs = prevHead->m_freeListRefs.fetch_sub( 1, atomics::memory_order_acq_rel );
167 if ( refs == c_ShouldBeOnFreeList + 1 )
168 add_knowing_refcount_is_zero( prevHead );
174 /// Checks whether the free list is empty
177 return m_Head.load( atomics::memory_order_relaxed ) == nullptr;
180 /// Clears the free list (not atomic)
182 For each element \p disp disposer is called to free memory.
183 The \p Disposer interface:
187 void operator()( FreeList::node * node );
191 This method must be explicitly called before the free list destructor.
193 template <typename Disposer>
194 void clear( Disposer disp )
196 node * head = m_Head.load( atomics::memory_order_relaxed );
197 m_Head.store( nullptr, atomics::memory_order_relaxed );
199 node * next = head->m_freeListNext.load( atomics::memory_order_relaxed );
207 void add_knowing_refcount_is_zero( node * pNode )
209 // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we
210 // run only one copy of this method per node at a time, i.e. the single thread case), then we
211 // know we can safely change the next pointer of the node; however, once the refcount is back
212 // above zero, then other threads could increase it (happens under heavy contention, when the
213 // refcount goes to zero in between a load and a refcount increment of a node in try_get, then
214 // back up to something non-zero, then the refcount increment is done by the other thread) --
215 // so, if the CAS to add the node to the actual list fails, decrease the refcount and leave
216 // the add operation to the next thread who puts the refcount back at zero (which could be us,
218 node * head = m_Head.load( atomics::memory_order_relaxed );
220 pNode->m_freeListNext.store( head, atomics::memory_order_relaxed );
221 pNode->m_freeListRefs.store( 1, atomics::memory_order_release );
222 if ( cds_unlikely( !m_Head.compare_exchange_strong( head, pNode, atomics::memory_order_release, atomics::memory_order_relaxed ))) {
223 // Hmm, the add failed, but we can only try again when the refcount goes back to zero
224 if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList - 1, atomics::memory_order_release ) == 1 )
234 static CDS_CONSTEXPR uint32_t const c_RefsMask = 0x7FFFFFFF;
235 static CDS_CONSTEXPR uint32_t const c_ShouldBeOnFreeList = 0x80000000;
237 // Implemented like a stack, but where node order doesn't matter (nodes are
238 // inserted out of order under contention)
239 atomics::atomic<node *> m_Head;
243 }} // namespace cds::intrusive
245 #endif // CDSLIB_INTRUSIVE_FREE_LIST_H