Added epoch count as constructor parameter for DHP GC
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_GC_DETAILS_DHP_H
4 #define CDSLIB_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/int_algo.h>
9 #include <cds/gc/details/retired_ptr.h>
10 #include <cds/details/aligned_allocator.h>
11 #include <cds/details/allocator.h>
12 #include <cds/sync/spinlock.h>
13
14 #if CDS_COMPILER == CDS_COMPILER_MSVC
15 #   pragma warning(push)
16 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
17 #endif
18
19 //@cond
20 namespace cds { namespace gc {
21
22     /// Dynamic Hazard Pointer reclamation schema
23     /**
24         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
25         Use cds::gc::DHP class in your code.
26
27         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
28         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
29         by contructing cds::gc::DHP object in beginning of your main().
30         See cds::gc::DHP class for explanation.
31
32         \par Implementation issues
33             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
34             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
35             of guards allocated from the global list of free guards and the access to the global list is occurred only when
36             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
37             Guards allocated for the thread is push back to the global list only when the thread terminates.
38     */
39     namespace dhp {
40
41         // Forward declarations
42         class Guard;
43         template <size_t Count> class GuardArray;
44         class ThreadGC;
45         class GarbageCollector;
46
47         /// Retired pointer type
48         typedef cds::gc::details::retired_ptr retired_ptr;
49
50         using cds::gc::details::free_retired_ptr_func;
51
52         /// Details of Dynamic Hazard Pointer algorithm
53         namespace details {
54
55             // Forward declaration
56             class liberate_set;
57
58             /// Retired pointer buffer node
59             struct retired_ptr_node {
60                 retired_ptr         m_ptr   ;   ///< retired pointer
61                 atomics::atomic<retired_ptr_node *>  m_pNext     ;   ///< next retired pointer in buffer
62                 atomics::atomic<retired_ptr_node *>  m_pNextFree ;   ///< next item in free list of \p retired_ptr_node
63             };
64
65             /// Internal guard representation
66             struct guard_data {
67                 typedef void * guarded_ptr;  ///< type of value guarded
68
69                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
70                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
71                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
72
73                 guard_data * pThreadNext; ///< next item of thread's local list of guards
74
75                 guard_data() CDS_NOEXCEPT
76                     : pPost( nullptr )
77                     , pGlobalNext( nullptr )
78                     , pNextFree( nullptr )
79                     , pThreadNext( nullptr )
80                 {}
81
82                 void init() CDS_NOEXCEPT
83                 {
84                     pPost.store( nullptr, atomics::memory_order_relaxed );
85                 }
86
87                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
88                 bool isFree() const CDS_NOEXCEPT
89                 {
90                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
91                 }
92             };
93
94             /// Guard allocator
95             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
96             class guard_allocator
97             {
98                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
99
100                 atomics::atomic<guard_data *>  m_GuardList;     ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
101                 atomics::atomic<guard_data *>  m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
102                 cds::sync::spin                m_freeListLock;  ///< Access to m_FreeGuardList
103
104                 /*
105                     Unfortunately, access to the list of free guard is lock-based.
106                     Lock-free manipulations with guard free-list are ABA-prone.
107                     TODO: working with m_FreeGuardList in lock-free manner.
108                 */
109
110             private:
111                 /// Allocates new guard from the heap. The function uses aligned allocator
112                 guard_data * allocNew()
113                 {
114                     //TODO: the allocator should make block allocation
115
116                     details::guard_data * pGuard = m_GuardAllocator.New();
117
118                     // Link guard to the list
119                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
120                     // so, ABA problem is impossible for it
121                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
122                     do {
123                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
124                         // pHead is changed by compare_exchange_weak
125                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
126
127                     pGuard->init();
128                     return pGuard;
129                 }
130
131             public:
132                 // Default ctor
133                 guard_allocator() CDS_NOEXCEPT
134                     : m_GuardList( nullptr )
135                     , m_FreeGuardList( nullptr )
136                 {}
137
138                 // Destructor
139                 ~guard_allocator()
140                 {
141                     guard_data * pNext;
142                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
143                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
144                         m_GuardAllocator.Delete( pData );
145                     }
146                 }
147
148                 /// Allocates a guard from free list or from heap if free list is empty
149                 guard_data * alloc()
150                 {
151                     // Try to pop a guard from free-list
152                     details::guard_data * pGuard;
153
154                     {
155                         std::unique_lock<cds::sync::spin> al( m_freeListLock );
156                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
157                         if ( pGuard )
158                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
159                     }
160                     if ( !pGuard )
161                         return allocNew();
162
163                     pGuard->init();
164                     return pGuard;
165                 }
166
167                 /// Frees guard \p pGuard
168                 /**
169                     The function places the guard \p pGuard into free-list
170                 */
171                 void free( guard_data * pGuard ) CDS_NOEXCEPT
172                 {
173                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
174
175                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
176                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
177                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
178                 }
179
180                 /// Allocates list of guard
181                 /**
182                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
183
184                     cds::gc::dhp::ThreadGC supporting method
185                 */
186                 guard_data * allocList( size_t nCount )
187                 {
188                     assert( nCount != 0 );
189
190                     guard_data * pHead;
191                     guard_data * pLast;
192
193                     pHead =
194                         pLast = alloc();
195
196                     // The guard list allocated is private for the thread,
197                     // so, we can use relaxed memory order
198                     while ( --nCount ) {
199                         guard_data * p = alloc();
200                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
201                         pLast = p;
202                     }
203
204                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
205
206                     return pHead;
207                 }
208
209                 /// Frees list of guards
210                 /**
211                     The list \p pList is linked by guard's \p pThreadNext field.
212
213                     cds::gc::dhp::ThreadGC supporting method
214                 */
215                 void freeList( guard_data * pList ) CDS_NOEXCEPT
216                 {
217                     assert( pList != nullptr );
218
219                     guard_data * pLast = pList;
220                     while ( pLast->pThreadNext ) {
221                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
222                         guard_data * p;
223                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
224                         pLast = p;
225                     }
226
227                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
228                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
229                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
230                 }
231
232                 /// Returns the list's head of guards allocated
233                 guard_data * begin() CDS_NOEXCEPT
234                 {
235                     return m_GuardList.load(atomics::memory_order_acquire);
236                 }
237             };
238
239             /// Retired pointer buffer
240             /**
241                 The buffer of retired nodes ready for liberating.
242                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
243                 retired nodes.
244             */
245             class retired_ptr_buffer
246             {
247                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
248                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
249
250             public:
251                 retired_ptr_buffer() CDS_NOEXCEPT
252                     : m_pHead( nullptr )
253                     , m_nItemCount(0)
254                 {}
255
256                 ~retired_ptr_buffer() CDS_NOEXCEPT
257                 {
258                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
259                 }
260
261                 /// Pushes new node into the buffer. Returns current buffer size
262                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
263                 {
264                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
265                     do {
266                         node.m_pNext.store( pHead, atomics::memory_order_relaxed );
267                         // pHead is changed by compare_exchange_weak
268                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
269
270                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
271                 }
272
273                 /// Pushes [pFirst, pLast] list linked by pNext field.
274                 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
275                 {
276                     assert( pFirst );
277                     assert( pLast );
278
279                     retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
280                     do {
281                         pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
282                         // pHead is changed by compare_exchange_weak
283                     } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
284
285                     return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
286                 }
287
288                 /// Result of \ref dhp_gc_privatize "privatize" function.
289                 /**
290                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
291                 */
292                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
293
294                 /// Gets current list of retired pointer and clears the list
295                 /**@anchor dhp_gc_privatize
296                 */
297                 privatize_result privatize() CDS_NOEXCEPT
298                 {
299                     privatize_result res;
300                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
301
302                     // Item counter is needed only as a threshold for \p scan() function
303                     // So, we may clear the item counter without synchronization with m_pHead
304                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
305                     return res;
306                 }
307
308                 /// Returns current size of buffer (approximate)
309                 size_t size() const CDS_NOEXCEPT
310                 {
311                     return m_nItemCount.load(atomics::memory_order_relaxed);
312                 }
313             };
314
315             /// Pool of retired pointers
316             /**
317                 The class acts as an allocator of retired node.
318                 Retired pointers are linked in the lock-free list.
319             */
320             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
321             class retired_ptr_pool {
322                 /// Pool item
323                 typedef retired_ptr_node    item;
324
325                 /// Count of items in block
326                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
327
328                 /// Pool block
329                 struct block {
330                     atomics::atomic<block *> pNext;     ///< next block
331                     item        items[m_nItemPerBlock]; ///< item array
332                 };
333
334                 atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
335
336                 // To solve ABA problem we use epoch-based approach
337                 unsigned int const m_nEpochBitmask;             ///< Epoch bitmask (log2( m_nEpochCount))
338                 atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
339                 atomics::atomic<item *>* m_pEpochFree;          ///< List of free item per epoch
340                 atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
341
342                 typedef cds::details::Allocator< block, Alloc > block_allocator;
343                 typedef cds::details::Allocator< atomics::atomic<item *>, Alloc > epoch_array_alloc;
344
345             private:
346                 void allocNewBlock()
347                 {
348                     // allocate new block
349                     block * pNew = block_allocator().New();
350
351                     // link items within the block
352                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
353                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
354                         pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
355                         CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
356                     }
357
358                     // links new block to the block list
359                     {
360                         block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
361                         do {
362                             pNew->pNext.store( pHead, atomics::memory_order_relaxed );
363                             // pHead is changed by compare_exchange_weak
364                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
365                     }
366
367                     // links block's items to the free list
368                     {
369                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
370                         do {
371                             pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
372                             // pHead is changed by compare_exchange_weak
373                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
374                     }
375                 }
376
377                 unsigned int current_epoch() const CDS_NOEXCEPT
378                 {
379                     return m_nCurEpoch.load(atomics::memory_order_acquire) & m_nEpochBitmask;
380                 }
381
382                 unsigned int next_epoch() const CDS_NOEXCEPT
383                 {
384                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & m_nEpochBitmask;
385                 }
386
387             public:
388                 retired_ptr_pool( unsigned int nEpochCount = 8 )
389                     : m_pBlockListHead( nullptr )
390                     , m_nEpochBitmask( static_cast<unsigned int>(beans::ceil2(nEpochCount)) - 1 )
391                     , m_nCurEpoch(0)
392                     , m_pEpochFree( epoch_array_alloc().NewArray( m_nEpochBitmask + 1))
393                     , m_pGlobalFreeHead( nullptr )
394                 {
395                     
396                     for (unsigned int i = 0; i <= m_nEpochBitmask; ++i )
397                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
398
399                     allocNewBlock();
400                 }
401
402                 ~retired_ptr_pool()
403                 {
404                     block_allocator a;
405                     block * p;
406                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
407                         p = pBlock->pNext.load( atomics::memory_order_relaxed );
408                         a.Delete( pBlock );
409                     }
410
411                     epoch_array_alloc().Delete( m_pEpochFree, m_nEpochBitmask + 1 );
412                 }
413
414                 /// Increments current epoch
415                 void inc_epoch() CDS_NOEXCEPT
416                 {
417                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
418                 }
419
420                 /// Allocates the new retired pointer
421                 retired_ptr_node&  alloc()
422                 {
423                     unsigned int nEpoch;
424                     item * pItem;
425                     for (;;) {
426                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
427                         if ( !pItem )
428                             goto retry;
429                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
430                                                                          pItem->m_pNextFree.load(atomics::memory_order_acquire),
431                                                                          atomics::memory_order_acquire, atomics::memory_order_relaxed ))
432                         {
433                             goto success;
434                         }
435                     }
436
437                     // Epoch free list is empty
438                     // Alloc from global free list
439                 retry:
440                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
441                     do {
442                         if ( !pItem ) {
443                             allocNewBlock();
444                             goto retry;
445                         }
446                         // pItem is changed by compare_exchange_weak
447                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
448                                                                         pItem->m_pNextFree.load(atomics::memory_order_acquire),
449                                                                         atomics::memory_order_acquire, atomics::memory_order_relaxed ));
450
451                 success:
452                     CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
453                     return *pItem;
454                 }
455
456                 /// Allocates and initializes new retired pointer
457                 retired_ptr_node& alloc( const retired_ptr& p )
458                 {
459                     retired_ptr_node& node = alloc();
460                     node.m_ptr = p;
461                     return node;
462                 }
463
464                 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
465                 /**
466                     The list is linked on the m_pNextFree field
467                 */
468                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
469                 {
470                     assert( pHead != nullptr );
471                     assert( pTail != nullptr );
472
473                     unsigned int nEpoch;
474                     item * pCurHead;
475                     do {
476                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
477                         pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
478                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
479                 }
480             };
481
482             /// Uninitialized guard
483             class guard
484             {
485                 friend class dhp::ThreadGC;
486             protected:
487                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
488
489             public:
490                 /// Initialize empty guard.
491                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
492                     : m_pGuard( nullptr )
493                 {}
494
495                 /// Copy-ctor is disabled
496                 guard( guard const& ) = delete;
497
498                 /// Move-ctor is disabled
499                 guard( guard&& ) = delete;
500
501                 /// Object destructor, does nothing
502                 ~guard() CDS_NOEXCEPT
503                 {}
504
505                 /// Get current guarded pointer
506                 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
507                 {
508                     assert( m_pGuard != nullptr );
509                     return m_pGuard->pPost.load( order );
510                 }
511
512                 /// Guards pointer \p p
513                 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
514                 {
515                     assert( m_pGuard != nullptr );
516                     m_pGuard->pPost.store( p, order );
517                 }
518
519                 /// Clears the guard
520                 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
521                 {
522                     assert( m_pGuard != nullptr );
523                     m_pGuard->pPost.store( nullptr, order );
524                 }
525
526                 /// Guards pointer \p p
527                 template <typename T>
528                 T * operator =(T * p) CDS_NOEXCEPT
529                 {
530                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
531                     return p;
532                 }
533
534                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
535                 {
536                     clear();
537                     return nullptr;
538                 }
539
540             public: // for ThreadGC.
541                 /*
542                     GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
543                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
544                     despite the fact that ThreadGC is declared as friend for guard class.
545                     Therefore, we have to add set_guard/get_guard public functions
546                 */
547                 /// Set guard data
548                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
549                 {
550                     assert( m_pGuard == nullptr );
551                     m_pGuard = pGuard;
552                 }
553
554                 /// Get current guard data
555                 details::guard_data * get_guard() CDS_NOEXCEPT
556                 {
557                     return m_pGuard;
558                 }
559                 /// Get current guard data
560                 details::guard_data * get_guard() const CDS_NOEXCEPT
561                 {
562                     return m_pGuard;
563                 }
564
565                 details::guard_data * release_guard() CDS_NOEXCEPT
566                 {
567                     details::guard_data * p = m_pGuard;
568                     m_pGuard = nullptr;
569                     return p;
570                 }
571
572                 bool is_initialized() const
573                 {
574                     return m_pGuard != nullptr;
575                 }
576             };
577
578         } // namespace details
579
580         /// Guard
581         /**
582             This class represents auto guard: ctor allocates a guard from guard pool,
583             dtor returns the guard back to the pool of free guard.
584         */
585         class Guard: public details::guard
586         {
587             typedef details::guard    base_class;
588             friend class ThreadGC;
589         public:
590             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
591             Guard(); // inline in dhp_impl.h
592
593             /// Returns guard allocated back to pool of free guards
594             ~Guard();    // inline in dhp_impl.h
595
596             /// Guards pointer \p p
597             template <typename T>
598             T * operator =(T * p) CDS_NOEXCEPT
599             {
600                 return base_class::operator =<T>( p );
601             }
602
603             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
604             {
605                 return base_class::operator =(nullptr);
606             }
607         };
608
609         /// Array of guards
610         /**
611             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
612             dtor returns the guards allocated back to the pool.
613         */
614         template <size_t Count>
615         class GuardArray
616         {
617             details::guard      m_arr[Count]    ;    ///< array of guard
618             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
619
620         public:
621             /// Rebind array for other size \p OtherCount
622             template <size_t OtherCount>
623             struct rebind {
624                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
625             };
626
627         public:
628             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
629             GuardArray();    // inline in dhp_impl.h
630
631             /// The object is not copy-constructible
632             GuardArray( GuardArray const& ) = delete;
633
634             /// The object is not move-constructible
635             GuardArray( GuardArray&& ) = delete;
636
637             /// Returns guards allocated back to pool
638             ~GuardArray();    // inline in dh_impl.h
639
640             /// Returns the capacity of array
641             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
642             {
643                 return c_nCapacity;
644             }
645
646             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
647             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
648             {
649                 assert( nIndex < capacity() );
650                 return m_arr[nIndex];
651             }
652
653             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
654             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
655             {
656                 assert( nIndex < capacity() );
657                 return m_arr[nIndex];
658             }
659
660             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
661             template <typename T>
662             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
663             {
664                 assert( nIndex < capacity() );
665                 m_arr[nIndex].set( p );
666             }
667
668             /// Clears (sets to \p nullptr) the guard \p nIndex
669             void clear( size_t nIndex ) CDS_NOEXCEPT
670             {
671                 assert( nIndex < capacity() );
672                 m_arr[nIndex].clear();
673             }
674
675             /// Clears all guards in the array
676             void clearAll() CDS_NOEXCEPT
677             {
678                 for ( size_t i = 0; i < capacity(); ++i )
679                     clear(i);
680             }
681         };
682
683         /// Memory manager (Garbage collector)
684         class CDS_EXPORT_API GarbageCollector
685         {
686         private:
687             friend class ThreadGC;
688
689             /// Internal GC statistics
690             struct internal_stat
691             {
692                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
693                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
694
695                 internal_stat()
696                     : m_nGuardCount(0)
697                     , m_nFreeGuardCount(0)
698                 {}
699             };
700
701         public:
702             /// Exception "No GarbageCollector object is created"
703             class not_initialized : public std::runtime_error
704             {
705             public:
706                 //@cond
707                 not_initialized()
708                     : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
709                 {}
710                 //@endcond
711             };
712
713             /// Internal GC statistics
714             struct InternalState
715             {
716                 size_t m_nGuardCount       ;   ///< Total guard count
717                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
718
719                 //@cond
720                 InternalState()
721                     : m_nGuardCount(0)
722                     , m_nFreeGuardCount(0)
723                 {}
724
725                 InternalState& operator =( internal_stat const& s )
726                 {
727                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
728                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
729
730                     return *this;
731                 }
732                 //@endcond
733             };
734
735         private:
736             static GarbageCollector * m_pManager    ;   ///< GC global instance
737
738             atomics::atomic<size_t>  m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
739             const size_t             m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
740
741             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
742             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
743             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
744
745             internal_stat   m_stat  ;   ///< Internal statistics
746             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
747
748         public:
749             /// Initializes DHP memory manager singleton
750             /**
751                 This member function creates and initializes DHP global object.
752                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
753                 this member function is called in the \p main() function. See cds::gc::dhp for example.
754                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
755
756                 \par Parameters
757                 - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
758                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
759                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
760                 - \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
761                     is initialized the GC allocates local guard pool for the thread from common guard pool.
762                     By perforce the local thread's guard pool is grown automatically from common pool.
763                     When the thread terminated its guard pool is backed to common GC's pool.
764                 - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
765                     ABA problem for internal data. \p nEpochCount specifies the epoch count, 
766                     i.e. the count of simultaneously working threads that remove the elements
767                     of DHP-based concurrent data structure. Default value is 8.
768
769             */
770             static void CDS_STDCALL Construct(
771                 size_t nLiberateThreshold = 1024
772                 , size_t nInitialThreadGuardCount = 8
773                 , size_t nEpochCount = 8
774             );
775
776             /// Destroys DHP memory manager
777             /**
778                 The member function destroys DHP global object. After calling of this function you may \b NOT
779                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
780                 at the end of your \p main(). See cds::gc::dhp for example.
781             */
782             static void CDS_STDCALL Destruct();
783
784             /// Returns pointer to GarbageCollector instance
785             /**
786                 If DHP GC is not initialized, \p not_initialized exception is thrown
787             */
788             static GarbageCollector&   instance()
789             {
790                 if ( m_pManager == nullptr )
791                     throw not_initialized();
792                 return *m_pManager;
793             }
794
795             /// Checks if global GC object is constructed and may be used
796             static bool isUsed() CDS_NOEXCEPT
797             {
798                 return m_pManager != nullptr;
799             }
800
801         public:
802             //@{
803             /// Internal interface
804
805             /// Allocates a guard
806             details::guard_data * allocGuard()
807             {
808                 return m_GuardPool.alloc();
809             }
810
811             /// Frees guard \p g for reusing in future
812             void freeGuard(details::guard_data * pGuard )
813             {
814                 m_GuardPool.free( pGuard );
815             }
816
817             /// Allocates guard list for a thread.
818             details::guard_data * allocGuardList( size_t nCount )
819             {
820                 return m_GuardPool.allocList( nCount );
821             }
822
823             /// Frees thread's guard list pointed by \p pList
824             void freeGuardList( details::guard_data * pList )
825             {
826                 m_GuardPool.freeList( pList );
827             }
828
829             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
830             /**@anchor dhp_gc_retirePtr
831             */
832             template <typename T>
833             void retirePtr( T * p, void (* pFunc)(T *) )
834             {
835                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
836             }
837
838             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
839             void retirePtr( retired_ptr const& p )
840             {
841                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
842                     scan();
843             }
844
845         protected:
846             /// Liberate function
847             /** @anchor dhp_gc_liberate
848                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
849                 trapped by any guard.
850             */
851             void scan();
852             //@}
853
854         public:
855             /// Get internal statistics
856             InternalState& getInternalState(InternalState& stat) const
857             {
858                 return stat = m_stat;
859             }
860
861             /// Checks if internal statistics enabled
862             bool              isStatisticsEnabled() const
863             {
864                 return m_bStatEnabled;
865             }
866
867             /// Enables/disables internal statistics
868             bool  enableStatistics( bool bEnable )
869             {
870                 bool bEnabled = m_bStatEnabled;
871                 m_bStatEnabled = bEnable;
872                 return bEnabled;
873             }
874
875         private:
876             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount );
877             ~GarbageCollector();
878         };
879
880         /// Thread GC
881         /**
882             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
883             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
884             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
885             \ref cds_threading "cds::threading::Manager::detachThread()".
886
887             The ThreadGC object maintains two list:
888             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
889             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
890             Free guard list is a subset of thread guard list.
891         */
892         class ThreadGC
893         {
894             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
895             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
896             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
897
898         public:
899             /// Default constructor
900             ThreadGC()
901                 : m_gc( GarbageCollector::instance() )
902                 , m_pList( nullptr )
903                 , m_pFree( nullptr )
904             {}
905
906             /// The object is not copy-constructible
907             ThreadGC( ThreadGC const& ) = delete;
908
909             /// Dtor calls fini()
910             ~ThreadGC()
911             {
912                 fini();
913             }
914
915             /// Initialization. Repeat call is available
916             void init()
917             {
918                 if ( !m_pList ) {
919                     m_pList =
920                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
921                 }
922             }
923
924             /// Finalization. Repeat call is available
925             void fini()
926             {
927                 if ( m_pList ) {
928                     m_gc.freeGuardList( m_pList );
929                     m_pList =
930                         m_pFree = nullptr;
931                 }
932             }
933
934         public:
935             /// Initializes guard \p g
936             void allocGuard( dhp::details::guard& g )
937             {
938                 assert( m_pList != nullptr );
939                 if ( !g.m_pGuard ) {
940                     if ( m_pFree ) {
941                         g.m_pGuard = m_pFree;
942                         m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
943                     }
944                     else {
945                         g.m_pGuard = m_gc.allocGuard();
946                         g.m_pGuard->pThreadNext = m_pList;
947                         m_pList = g.m_pGuard;
948                     }
949                 }
950             }
951
952             /// Frees guard \p g
953             void freeGuard( dhp::details::guard& g )
954             {
955                 assert( m_pList != nullptr );
956                 if ( g.m_pGuard ) {
957                     g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
958                     g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
959                     m_pFree = g.m_pGuard;
960                     g.m_pGuard = nullptr;
961                 }
962             }
963
964             /// Initializes guard array \p arr
965             template <size_t Count>
966             void allocGuard( GuardArray<Count>& arr )
967             {
968                 assert( m_pList != nullptr );
969                 size_t nCount = 0;
970
971                 while ( m_pFree && nCount < Count ) {
972                     arr[nCount].set_guard( m_pFree );
973                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
974                     ++nCount;
975                 }
976
977                 while ( nCount < Count ) {
978                     details::guard& g = arr[nCount++];
979                     g.set_guard( m_gc.allocGuard() );
980                     g.get_guard()->pThreadNext = m_pList;
981                     m_pList = g.get_guard();
982                 }
983             }
984
985             /// Frees guard array \p arr
986             template <size_t Count>
987             void freeGuard( GuardArray<Count>& arr )
988             {
989                 assert( m_pList != nullptr );
990
991                 details::guard_data * pGuard;
992                 for ( size_t i = 0; i < Count - 1; ++i ) {
993                     pGuard = arr[i].get_guard();
994                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
995                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
996                 }
997                 pGuard = arr[Count-1].get_guard();
998                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
999                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
1000                 m_pFree = arr[0].get_guard();
1001             }
1002
1003             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
1004             template <typename T>
1005             void retirePtr( T * p, void (* pFunc)(T *) )
1006             {
1007                 m_gc.retirePtr( p, pFunc );
1008             }
1009
1010             /// Run retiring cycle
1011             void scan()
1012             {
1013                 m_gc.scan();
1014             }
1015         };
1016     }   // namespace dhp
1017 }}  // namespace cds::gc
1018 //@endcond
1019
1020 #if CDS_COMPILER == CDS_COMPILER_MSVC
1021 #   pragma warning(pop)
1022 #endif
1023
1024 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H