3 // Dynamic Hazard Pointer memory manager implementation
5 #include <algorithm> // std::fill
6 #include <functional> // std::hash
8 #include <cds/gc/details/dhp.h>
9 #include <cds/algo/int_algo.h>
11 namespace cds { namespace gc { namespace dhp {
16 typedef retired_ptr_node * item_type;
17 typedef cds::details::Allocator<item_type, CDS_DEFAULT_ALLOCATOR> allocator_type;
19 size_t const m_nBucketCount;
20 item_type * m_Buckets;
22 item_type& bucket( retired_ptr_node& node )
24 return bucket( node.m_ptr.m_p );
26 item_type& bucket( guard_data::guarded_ptr p )
28 return m_Buckets[ std::hash<guard_data::guarded_ptr>()( p ) & (m_nBucketCount - 1) ];
32 liberate_set( size_t nBucketCount )
33 : m_nBucketCount( nBucketCount )
35 assert( nBucketCount > 0 );
36 assert( (nBucketCount & (nBucketCount - 1)) == 0 );
38 m_Buckets = allocator_type().NewArray( nBucketCount );
39 std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr );
44 allocator_type().Delete( m_Buckets, m_nBucketCount );
47 void insert( retired_ptr_node& node )
49 node.m_pNext.store( nullptr, atomics::memory_order_relaxed );
51 item_type& refBucket = bucket( node );
53 item_type p = refBucket;
55 if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
56 assert( node.m_pNextFree.load( atomics::memory_order_relaxed ) == nullptr );
58 node.m_pNextFree.store( p->m_pNextFree.load( atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
59 p->m_pNextFree.store( &node, atomics::memory_order_relaxed );
62 p = p->m_pNext.load(atomics::memory_order_relaxed);
65 node.m_pNext.store( refBucket, atomics::memory_order_relaxed );
70 item_type erase( guard_data::guarded_ptr ptr )
72 item_type& refBucket = bucket( ptr );
73 item_type p = refBucket;
74 item_type pPrev = nullptr;
77 if ( p->m_ptr.m_p == ptr ) {
79 pPrev->m_pNext.store( p->m_pNext.load(atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
81 refBucket = p->m_pNext.load(atomics::memory_order_relaxed);
82 p->m_pNext.store( nullptr, atomics::memory_order_relaxed );
86 p = p->m_pNext.load( atomics::memory_order_relaxed );
92 typedef std::pair<item_type, item_type> list_range;
96 item_type pTail = nullptr;
97 list_range ret = std::make_pair( pTail, pTail );
99 item_type const * pEndBucket = m_Buckets + m_nBucketCount;
100 for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) {
101 item_type pBucket = *ppBucket;
106 pTail->m_pNextFree.store( pBucket, atomics::memory_order_relaxed );
110 item_type pNext = pTail->m_pNext.load( atomics::memory_order_relaxed );
112 pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
114 while ( pTail->m_pNextFree.load( atomics::memory_order_relaxed )) {
115 pTail = pTail->m_pNextFree.load( atomics::memory_order_relaxed );
117 pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
121 pTail->m_pNextFree.store( pNext, atomics::memory_order_relaxed );
131 pTail->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
138 GarbageCollector * GarbageCollector::m_pManager = nullptr;
140 void CDS_STDCALL GarbageCollector::Construct(
141 size_t nLiberateThreshold
142 , size_t nInitialThreadGuardCount
146 m_pManager = new GarbageCollector( nLiberateThreshold, nInitialThreadGuardCount );
150 void CDS_STDCALL GarbageCollector::Destruct()
153 m_pManager = nullptr;
156 GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount )
157 : m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 )
158 , m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 )
159 , m_bStatEnabled( false )
164 GarbageCollector::~GarbageCollector()
169 void GarbageCollector::scan()
171 details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
172 if ( retiredList.first ) {
174 size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed);
175 details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) );
177 // Get list of retired pointers
178 size_t nRetiredCount = 0;
179 details::retired_ptr_node * pHead = retiredList.first;
181 details::retired_ptr_node * pNext = pHead->m_pNext.load( atomics::memory_order_relaxed );
182 pHead->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
183 set.insert( *pHead );
190 details::retired_ptr_node * pBusyFirst = nullptr;
191 details::retired_ptr_node * pBusyLast = nullptr;
192 size_t nBusyCount = 0;
194 for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
196 // get guarded pointer
197 details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
200 details::retired_ptr_node * pRetired = set.erase( valGuarded );
202 // Retired pointer is being guarded
203 // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
204 // List is linked on m_pNextFree field
207 pBusyLast->m_pNext.store( pRetired, atomics::memory_order_relaxed );
209 pBusyFirst = pRetired;
210 pBusyLast = pRetired;
212 details::retired_ptr_node * p = pBusyLast->m_pNextFree.load(atomics::memory_order_relaxed);
213 while ( p != nullptr ) {
214 pBusyLast->m_pNext.store( p, atomics::memory_order_relaxed );
222 // Place [pBusyList, pBusyLast] back to m_RetiredBuffer
224 m_RetiredBuffer.push_list( pBusyFirst, pBusyLast, nBusyCount );
226 // Free all retired pointers
227 details::liberate_set::list_range range = set.free_all();
229 m_RetiredAllocator.inc_epoch();
232 assert( range.second != nullptr );
233 m_RetiredAllocator.free_range( range.first, range.second );
235 else if ( nRetiredCount >= nLiberateThreshold ) {
236 // scan() cycle did not free any retired pointer - double scan() threshold
237 m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
241 }}} // namespace cds::gc::dhp