3 // Dynamic Hazard Pointer memory manager implementation
5 #include <algorithm> // std::fill
6 #include <functional> // std::hash
8 #include <cds/gc/details/dhp.h>
9 #include <cds/algo/int_algo.h>
11 namespace cds { namespace gc { namespace dhp {
16 typedef retired_ptr_node * item_type;
17 typedef cds::details::Allocator<item_type, CDS_DEFAULT_ALLOCATOR> allocator_type;
19 size_t const m_nBucketCount;
20 item_type * m_Buckets;
22 item_type& bucket( retired_ptr_node& node )
24 return bucket( node.m_ptr.m_p );
26 item_type& bucket( guard_data::guarded_ptr p )
28 return m_Buckets[ std::hash<guard_data::guarded_ptr>()( p ) & (m_nBucketCount - 1) ];
32 liberate_set( size_t nBucketCount )
33 : m_nBucketCount( nBucketCount )
35 assert( nBucketCount > 0 );
36 assert( (nBucketCount & (nBucketCount - 1)) == 0 );
38 m_Buckets = allocator_type().NewArray( nBucketCount );
39 std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr );
44 allocator_type().Delete( m_Buckets, m_nBucketCount );
47 void insert( retired_ptr_node& node )
49 node.m_pNext.store( nullptr, atomics::memory_order_relaxed );
51 item_type& refBucket = bucket( node );
53 item_type p = refBucket;
55 if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
56 assert( node.m_pNextFree.load( atomics::memory_order_relaxed ) == nullptr );
58 node.m_pNextFree.store( p->m_pNextFree.load( atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
59 p->m_pNextFree.store( &node, atomics::memory_order_relaxed );
62 p = p->m_pNext.load(atomics::memory_order_relaxed);
65 node.m_pNext.store( refBucket, atomics::memory_order_relaxed );
70 item_type erase( guard_data::guarded_ptr ptr )
72 item_type& refBucket = bucket( ptr );
73 item_type p = refBucket;
74 item_type pPrev = nullptr;
77 if ( p->m_ptr.m_p == ptr ) {
79 pPrev->m_pNext.store( p->m_pNext.load(atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
81 refBucket = p->m_pNext.load(atomics::memory_order_relaxed);
82 p->m_pNext.store( nullptr, atomics::memory_order_relaxed );
86 p = p->m_pNext.load( atomics::memory_order_relaxed );
92 typedef std::pair<item_type, item_type> list_range;
96 item_type pTail = nullptr;
97 list_range ret = std::make_pair( pTail, pTail );
99 item_type const * pEndBucket = m_Buckets + m_nBucketCount;
100 for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) {
101 item_type pBucket = *ppBucket;
106 pTail->m_pNextFree.store( pBucket, atomics::memory_order_relaxed );
110 item_type pNext = pTail->m_pNext.load( atomics::memory_order_relaxed );
112 pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
114 while ( pTail->m_pNextFree.load( atomics::memory_order_relaxed )) {
115 pTail = pTail->m_pNextFree.load( atomics::memory_order_relaxed );
117 pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
121 pTail->m_pNextFree.store( pNext, atomics::memory_order_relaxed );
131 pTail->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
138 GarbageCollector * GarbageCollector::m_pManager = nullptr;
140 void CDS_STDCALL GarbageCollector::Construct(
141 size_t nLiberateThreshold
142 , size_t nInitialThreadGuardCount
146 m_pManager = new GarbageCollector( nLiberateThreshold, nInitialThreadGuardCount );
150 void CDS_STDCALL GarbageCollector::Destruct()
153 m_pManager = nullptr;
156 GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount )
157 : m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 )
158 , m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 )
163 GarbageCollector::~GarbageCollector()
168 void GarbageCollector::scan()
170 details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
171 if ( retiredList.first ) {
173 size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed);
174 details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) );
176 // Get list of retired pointers
177 details::retired_ptr_node * pHead = retiredList.first;
179 details::retired_ptr_node * pNext = pHead->m_pNext.load( atomics::memory_order_relaxed );
180 pHead->m_pNextFree.store( nullptr, atomics::memory_order_relaxed );
181 set.insert( *pHead );
187 details::retired_ptr_node * pBusyFirst = nullptr;
188 details::retired_ptr_node * pBusyLast = nullptr;
189 size_t nBusyCount = 0;
191 for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
193 // get guarded pointer
194 details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
197 details::retired_ptr_node * pRetired = set.erase( valGuarded );
199 // Retired pointer is being guarded
200 // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
201 // List is linked on m_pNextFree field
204 pBusyLast->m_pNext.store( pRetired, atomics::memory_order_relaxed );
206 pBusyFirst = pRetired;
207 pBusyLast = pRetired;
209 details::retired_ptr_node * p = pBusyLast->m_pNextFree.load(atomics::memory_order_relaxed);
210 while ( p != nullptr ) {
211 pBusyLast->m_pNext.store( p, atomics::memory_order_relaxed );
219 // Place [pBusyList, pBusyLast] back to m_RetiredBuffer
221 m_RetiredBuffer.push_list( pBusyFirst, pBusyLast, nBusyCount );
223 // Free all retired pointers
224 details::liberate_set::list_range range = set.free_all();
226 m_RetiredAllocator.inc_epoch();
229 assert( range.second != nullptr );
230 m_RetiredAllocator.free_range( range.first, range.second );
233 // scan() cycle did not free any retired pointer - double scan() threshold
234 m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
238 }}} // namespace cds::gc::dhp