Fixed rare double-free in DHP SMR
[libcds.git] / cds / gc / details / dhp.h
1 //$$CDS-header$$
2
3 #ifndef CDSLIB_GC_DETAILS_DHP_H
4 #define CDSLIB_GC_DETAILS_DHP_H
5
6 #include <mutex>        // unique_lock
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/int_algo.h>
9 #include <cds/gc/details/retired_ptr.h>
10 #include <cds/details/aligned_allocator.h>
11 #include <cds/details/allocator.h>
12 #include <cds/sync/spinlock.h>
13
14 #if CDS_COMPILER == CDS_COMPILER_MSVC
15 #   pragma warning(push)
16 #   pragma warning(disable:4251)    // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
17 #endif
18
19 //@cond
20 namespace cds { namespace gc {
21
22     /// Dynamic Hazard Pointer reclamation schema
23     /**
24         The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
25         Use cds::gc::DHP class in your code.
26
27         Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
28         GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
29         by contructing cds::gc::DHP object in beginning of your main().
30         See cds::gc::DHP class for explanation.
31
32         \par Implementation issues
33             The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
34             It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
35             of guards allocated from the global list of free guards and the access to the global list is occurred only when
36             all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
37             Guards allocated for the thread is push back to the global list only when the thread terminates.
38     */
39     namespace dhp {
40
41         // Forward declarations
42         class Guard;
43         template <size_t Count> class GuardArray;
44         class ThreadGC;
45         class GarbageCollector;
46
47         /// Retired pointer type
48         typedef cds::gc::details::retired_ptr retired_ptr;
49
50         using cds::gc::details::free_retired_ptr_func;
51
52         /// Details of Dynamic Hazard Pointer algorithm
53         namespace details {
54
55             // Forward declaration
56             class liberate_set;
57
58             /// Retired pointer buffer node
59             struct retired_ptr_node {
60                 retired_ptr         m_ptr   ;   ///< retired pointer
61                 atomics::atomic<retired_ptr_node *>  m_pNext     ;   ///< next retired pointer in buffer
62                 atomics::atomic<retired_ptr_node *>  m_pNextFree ;   ///< next item in free list of \p retired_ptr_node
63             };
64
65             /// Internal guard representation
66             struct guard_data {
67                 typedef void * guarded_ptr;  ///< type of value guarded
68
69                 atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
70                 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
71                 atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
72
73                 guard_data * pThreadNext; ///< next item of thread's local list of guards
74
75                 guard_data() CDS_NOEXCEPT
76                     : pPost( nullptr )
77                     , pGlobalNext( nullptr )
78                     , pNextFree( nullptr )
79                     , pThreadNext( nullptr )
80                 {}
81
82                 void init() CDS_NOEXCEPT
83                 {
84                     pPost.store( nullptr, atomics::memory_order_relaxed );
85                 }
86
87                 /// Checks if the guard is free, that is, it does not contain any pointer guarded
88                 bool isFree() const CDS_NOEXCEPT
89                 {
90                     return pPost.load( atomics::memory_order_acquire ) == nullptr;
91                 }
92             };
93
94             /// Guard allocator
95             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
96             class guard_allocator
97             {
98                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
99
100                 atomics::atomic<guard_data *>  m_GuardList;     ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
101                 atomics::atomic<guard_data *>  m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
102                 cds::sync::spin                m_freeListLock;  ///< Access to m_FreeGuardList
103
104                 /*
105                     Unfortunately, access to the list of free guard is lock-based.
106                     Lock-free manipulations with guard free-list are ABA-prone.
107                     TODO: working with m_FreeGuardList in lock-free manner.
108                 */
109
110             private:
111                 /// Allocates new guard from the heap. The function uses aligned allocator
112                 guard_data * allocNew()
113                 {
114                     //TODO: the allocator should make block allocation
115
116                     details::guard_data * pGuard = m_GuardAllocator.New();
117
118                     // Link guard to the list
119                     // m_GuardList is an accumulating list and it cannot support concurrent deletion,
120                     // so, ABA problem is impossible for it
121                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
122                     do {
123                         pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
124                         // pHead is changed by compare_exchange_weak
125                     } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
126
127                     pGuard->init();
128                     return pGuard;
129                 }
130
131             public:
132                 // Default ctor
133                 guard_allocator() CDS_NOEXCEPT
134                     : m_GuardList( nullptr )
135                     , m_FreeGuardList( nullptr )
136                 {}
137
138                 // Destructor
139                 ~guard_allocator()
140                 {
141                     guard_data * pNext;
142                     for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
143                         pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
144                         m_GuardAllocator.Delete( pData );
145                     }
146                 }
147
148                 /// Allocates a guard from free list or from heap if free list is empty
149                 guard_data * alloc()
150                 {
151                     // Try to pop a guard from free-list
152                     details::guard_data * pGuard;
153
154                     {
155                         std::unique_lock<cds::sync::spin> al( m_freeListLock );
156                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
157                         if ( pGuard )
158                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
159                     }
160                     if ( !pGuard )
161                         return allocNew();
162
163                     pGuard->init();
164                     return pGuard;
165                 }
166
167                 /// Frees guard \p pGuard
168                 /**
169                     The function places the guard \p pGuard into free-list
170                 */
171                 void free( guard_data * pGuard ) CDS_NOEXCEPT
172                 {
173                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
174
175                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
176                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
177                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
178                 }
179
180                 /// Allocates list of guard
181                 /**
182                     The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
183
184                     cds::gc::dhp::ThreadGC supporting method
185                 */
186                 guard_data * allocList( size_t nCount )
187                 {
188                     assert( nCount != 0 );
189
190                     guard_data * pHead;
191                     guard_data * pLast;
192
193                     pHead =
194                         pLast = alloc();
195
196                     // The guard list allocated is private for the thread,
197                     // so, we can use relaxed memory order
198                     while ( --nCount ) {
199                         guard_data * p = alloc();
200                         pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
201                         pLast = p;
202                     }
203
204                     pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
205
206                     return pHead;
207                 }
208
209                 /// Frees list of guards
210                 /**
211                     The list \p pList is linked by guard's \p pThreadNext field.
212
213                     cds::gc::dhp::ThreadGC supporting method
214                 */
215                 void freeList( guard_data * pList ) CDS_NOEXCEPT
216                 {
217                     assert( pList != nullptr );
218
219                     guard_data * pLast = pList;
220                     while ( pLast->pThreadNext ) {
221                         pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
222                         guard_data * p;
223                         pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
224                         pLast = p;
225                     }
226
227                     std::unique_lock<cds::sync::spin> al( m_freeListLock );
228                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
229                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
230                 }
231
232                 /// Returns the list's head of guards allocated
233                 guard_data * begin() CDS_NOEXCEPT
234                 {
235                     return m_GuardList.load(atomics::memory_order_acquire);
236                 }
237             };
238
239             /// Retired pointer buffer
240             /**
241                 The buffer of retired nodes ready for liberating.
242                 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
243                 retired nodes.
244             */
245             class retired_ptr_buffer
246             {
247                 atomics::atomic<retired_ptr_node *>  m_pHead     ;   ///< head of buffer
248                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
249
250             public:
251                 retired_ptr_buffer() CDS_NOEXCEPT
252                     : m_pHead( nullptr )
253                     , m_nItemCount(0)
254                 {}
255
256                 ~retired_ptr_buffer() CDS_NOEXCEPT
257                 {
258                     assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
259                 }
260
261                 /// Pushes new node into the buffer. Returns current buffer size
262                 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
263                 {
264                     retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
265                     do {
266                         node.m_pNext.store( pHead, atomics::memory_order_relaxed );
267                         // pHead is changed by compare_exchange_weak
268                     } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
269
270                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
271                 }
272
273                 /// Pushes [pFirst, pLast] list linked by pNext field.
274                 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
275                 {
276                     assert( pFirst );
277                     assert( pLast );
278
279                     retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
280                     do {
281                         pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
282                         // pHead is changed by compare_exchange_weak
283                     } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
284
285                     return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
286                 }
287
288                 /// Result of \ref dhp_gc_privatize "privatize" function.
289                 /**
290                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
291                 */
292                 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
293
294                 /// Gets current list of retired pointer and clears the list
295                 /**@anchor dhp_gc_privatize
296                 */
297                 privatize_result privatize() CDS_NOEXCEPT
298                 {
299                     privatize_result res;
300
301                     // Item counter is needed only as a threshold for \p scan() function
302                     // So, we may clear the item counter without synchronization with m_pHead
303                     res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
304
305                     res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
306
307                     return res;
308                 }
309
310                 /// Returns current size of buffer (approximate)
311                 size_t size() const CDS_NOEXCEPT
312                 {
313                     return m_nItemCount.load(atomics::memory_order_relaxed);
314                 }
315             };
316
317             /// Pool of retired pointers
318             /**
319                 The class acts as an allocator of retired node.
320                 Retired pointers are linked in the lock-free list.
321             */
322             template <class Alloc = CDS_DEFAULT_ALLOCATOR>
323             class retired_ptr_pool {
324                 /// Pool item
325                 typedef retired_ptr_node    item;
326
327                 /// Count of items in block
328                 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
329
330                 /// Pool block
331                 struct block {
332                     atomics::atomic<block *> pNext;     ///< next block
333                     item        items[m_nItemPerBlock]; ///< item array
334                 };
335
336                 atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
337
338                 // To solve ABA problem we use epoch-based approach
339                 unsigned int const m_nEpochBitmask;             ///< Epoch bitmask (log2( m_nEpochCount))
340                 atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
341                 atomics::atomic<item *>* m_pEpochFree;          ///< List of free item per epoch
342                 atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
343
344                 typedef cds::details::Allocator< block, Alloc > block_allocator;
345                 typedef cds::details::Allocator< atomics::atomic<item *>, Alloc > epoch_array_alloc;
346
347             private:
348                 void allocNewBlock()
349                 {
350                     // allocate new block
351                     block * pNew = block_allocator().New();
352
353                     // link items within the block
354                     item * pLastItem = pNew->items + m_nItemPerBlock - 1;
355                     for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
356                         pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
357                         CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
358                     }
359
360                     // links new block to the block list
361                     {
362                         block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
363                         do {
364                             pNew->pNext.store( pHead, atomics::memory_order_relaxed );
365                             // pHead is changed by compare_exchange_weak
366                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
367                     }
368
369                     // links block's items to the free list
370                     {
371                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
372                         do {
373                             pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
374                             // pHead is changed by compare_exchange_weak
375                         } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
376                     }
377                 }
378
379                 unsigned int current_epoch() const CDS_NOEXCEPT
380                 {
381                     return m_nCurEpoch.load(atomics::memory_order_acquire) & m_nEpochBitmask;
382                 }
383
384                 unsigned int next_epoch() const CDS_NOEXCEPT
385                 {
386                     return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & m_nEpochBitmask;
387                 }
388
389             public:
390                 retired_ptr_pool( unsigned int nEpochCount = 8 )
391                     : m_pBlockListHead( nullptr )
392                     , m_nEpochBitmask( static_cast<unsigned int>(beans::ceil2(nEpochCount)) - 1 )
393                     , m_nCurEpoch(0)
394                     , m_pEpochFree( epoch_array_alloc().NewArray( m_nEpochBitmask + 1))
395                     , m_pGlobalFreeHead( nullptr )
396                 {
397                     
398                     for (unsigned int i = 0; i <= m_nEpochBitmask; ++i )
399                         m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
400
401                     allocNewBlock();
402                 }
403
404                 ~retired_ptr_pool()
405                 {
406                     block_allocator a;
407                     block * p;
408                     for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
409                         p = pBlock->pNext.load( atomics::memory_order_relaxed );
410                         a.Delete( pBlock );
411                     }
412
413                     epoch_array_alloc().Delete( m_pEpochFree, m_nEpochBitmask + 1 );
414                 }
415
416                 /// Increments current epoch
417                 void inc_epoch() CDS_NOEXCEPT
418                 {
419                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
420                 }
421
422                 /// Allocates the new retired pointer
423                 retired_ptr_node&  alloc()
424                 {
425                     unsigned int nEpoch;
426                     item * pItem;
427                     for (;;) {
428                         pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
429                         if ( !pItem )
430                             goto retry;
431                         if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
432                                                                          pItem->m_pNextFree.load(atomics::memory_order_acquire),
433                                                                          atomics::memory_order_acquire, atomics::memory_order_relaxed ))
434                         {
435                             goto success;
436                         }
437                     }
438
439                     // Epoch free list is empty
440                     // Alloc from global free list
441                 retry:
442                     pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
443                     do {
444                         if ( !pItem ) {
445                             allocNewBlock();
446                             goto retry;
447                         }
448                         // pItem is changed by compare_exchange_weak
449                     } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
450                                                                         pItem->m_pNextFree.load(atomics::memory_order_acquire),
451                                                                         atomics::memory_order_acquire, atomics::memory_order_relaxed ));
452
453                 success:
454                     CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
455                     return *pItem;
456                 }
457
458                 /// Allocates and initializes new retired pointer
459                 retired_ptr_node& alloc( const retired_ptr& p )
460                 {
461                     retired_ptr_node& node = alloc();
462                     node.m_ptr = p;
463                     return node;
464                 }
465
466                 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
467                 /**
468                     The list is linked on the m_pNextFree field
469                 */
470                 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
471                 {
472                     assert( pHead != nullptr );
473                     assert( pTail != nullptr );
474
475                     unsigned int nEpoch;
476                     item * pCurHead;
477                     do {
478                         pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
479                         pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
480                     } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
481                 }
482             };
483
484             /// Uninitialized guard
485             class guard
486             {
487                 friend class dhp::ThreadGC;
488             protected:
489                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
490
491             public:
492                 /// Initialize empty guard.
493                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
494                     : m_pGuard( nullptr )
495                 {}
496
497                 /// Copy-ctor is disabled
498                 guard( guard const& ) = delete;
499
500                 /// Move-ctor is disabled
501                 guard( guard&& ) = delete;
502
503                 /// Object destructor, does nothing
504                 ~guard() CDS_NOEXCEPT
505                 {}
506
507                 /// Get current guarded pointer
508                 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
509                 {
510                     assert( m_pGuard != nullptr );
511                     return m_pGuard->pPost.load( order );
512                 }
513
514                 /// Guards pointer \p p
515                 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
516                 {
517                     assert( m_pGuard != nullptr );
518                     m_pGuard->pPost.store( p, order );
519                 }
520
521                 /// Clears the guard
522                 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
523                 {
524                     assert( m_pGuard != nullptr );
525                     m_pGuard->pPost.store( nullptr, order );
526                 }
527
528                 /// Guards pointer \p p
529                 template <typename T>
530                 T * operator =(T * p) CDS_NOEXCEPT
531                 {
532                     set( reinterpret_cast<void *>( const_cast<T *>(p) ));
533                     return p;
534                 }
535
536                 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
537                 {
538                     clear();
539                     return nullptr;
540                 }
541
542             public: // for ThreadGC.
543                 /*
544                     GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
545                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
546                     despite the fact that ThreadGC is declared as friend for guard class.
547                     Therefore, we have to add set_guard/get_guard public functions
548                 */
549                 /// Set guard data
550                 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
551                 {
552                     assert( m_pGuard == nullptr );
553                     m_pGuard = pGuard;
554                 }
555
556                 /// Get current guard data
557                 details::guard_data * get_guard() CDS_NOEXCEPT
558                 {
559                     return m_pGuard;
560                 }
561                 /// Get current guard data
562                 details::guard_data * get_guard() const CDS_NOEXCEPT
563                 {
564                     return m_pGuard;
565                 }
566
567                 details::guard_data * release_guard() CDS_NOEXCEPT
568                 {
569                     details::guard_data * p = m_pGuard;
570                     m_pGuard = nullptr;
571                     return p;
572                 }
573
574                 bool is_initialized() const
575                 {
576                     return m_pGuard != nullptr;
577                 }
578             };
579
580         } // namespace details
581
582         /// Guard
583         /**
584             This class represents auto guard: ctor allocates a guard from guard pool,
585             dtor returns the guard back to the pool of free guard.
586         */
587         class Guard: public details::guard
588         {
589             typedef details::guard    base_class;
590             friend class ThreadGC;
591         public:
592             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
593             Guard(); // inline in dhp_impl.h
594
595             /// Returns guard allocated back to pool of free guards
596             ~Guard();    // inline in dhp_impl.h
597
598             /// Guards pointer \p p
599             template <typename T>
600             T * operator =(T * p) CDS_NOEXCEPT
601             {
602                 return base_class::operator =<T>( p );
603             }
604
605             std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
606             {
607                 return base_class::operator =(nullptr);
608             }
609         };
610
611         /// Array of guards
612         /**
613             This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
614             dtor returns the guards allocated back to the pool.
615         */
616         template <size_t Count>
617         class GuardArray
618         {
619             details::guard      m_arr[Count]    ;    ///< array of guard
620             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
621
622         public:
623             /// Rebind array for other size \p OtherCount
624             template <size_t OtherCount>
625             struct rebind {
626                 typedef GuardArray<OtherCount>  other   ;   ///< rebinding result
627             };
628
629         public:
630             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
631             GuardArray();    // inline in dhp_impl.h
632
633             /// The object is not copy-constructible
634             GuardArray( GuardArray const& ) = delete;
635
636             /// The object is not move-constructible
637             GuardArray( GuardArray&& ) = delete;
638
639             /// Returns guards allocated back to pool
640             ~GuardArray();    // inline in dh_impl.h
641
642             /// Returns the capacity of array
643             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
644             {
645                 return c_nCapacity;
646             }
647
648             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
649             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
650             {
651                 assert( nIndex < capacity() );
652                 return m_arr[nIndex];
653             }
654
655             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
656             const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
657             {
658                 assert( nIndex < capacity() );
659                 return m_arr[nIndex];
660             }
661
662             /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
663             template <typename T>
664             void set( size_t nIndex, T * p ) CDS_NOEXCEPT
665             {
666                 assert( nIndex < capacity() );
667                 m_arr[nIndex].set( p );
668             }
669
670             /// Clears (sets to \p nullptr) the guard \p nIndex
671             void clear( size_t nIndex ) CDS_NOEXCEPT
672             {
673                 assert( nIndex < capacity() );
674                 m_arr[nIndex].clear();
675             }
676
677             /// Clears all guards in the array
678             void clearAll() CDS_NOEXCEPT
679             {
680                 for ( size_t i = 0; i < capacity(); ++i )
681                     clear(i);
682             }
683         };
684
685         /// Memory manager (Garbage collector)
686         class CDS_EXPORT_API GarbageCollector
687         {
688         private:
689             friend class ThreadGC;
690
691             /// Internal GC statistics
692             struct internal_stat
693             {
694                 atomics::atomic<size_t>  m_nGuardCount       ;   ///< Total guard count
695                 atomics::atomic<size_t>  m_nFreeGuardCount   ;   ///< Count of free guard
696
697                 internal_stat()
698                     : m_nGuardCount(0)
699                     , m_nFreeGuardCount(0)
700                 {}
701             };
702
703         public:
704             /// Exception "No GarbageCollector object is created"
705             class not_initialized : public std::runtime_error
706             {
707             public:
708                 //@cond
709                 not_initialized()
710                     : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
711                 {}
712                 //@endcond
713             };
714
715             /// Internal GC statistics
716             struct InternalState
717             {
718                 size_t m_nGuardCount       ;   ///< Total guard count
719                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
720
721                 //@cond
722                 InternalState()
723                     : m_nGuardCount(0)
724                     , m_nFreeGuardCount(0)
725                 {}
726
727                 InternalState& operator =( internal_stat const& s )
728                 {
729                     m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
730                     m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
731
732                     return *this;
733                 }
734                 //@endcond
735             };
736
737         private:
738             static GarbageCollector * m_pManager    ;   ///< GC global instance
739
740             atomics::atomic<size_t>  m_nLiberateThreshold;   ///< Max size of retired pointer buffer to call \p scan()
741             const size_t             m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
742
743             details::guard_allocator<>      m_GuardPool         ;   ///< Guard pool
744             details::retired_ptr_pool<>     m_RetiredAllocator  ;   ///< Pool of free retired pointers
745             details::retired_ptr_buffer     m_RetiredBuffer     ;   ///< Retired pointer buffer for liberating
746
747             internal_stat   m_stat  ;   ///< Internal statistics
748             bool            m_bStatEnabled  ;   ///< Internal Statistics enabled
749
750         public:
751             /// Initializes DHP memory manager singleton
752             /**
753                 This member function creates and initializes DHP global object.
754                 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
755                 this member function is called in the \p main() function. See cds::gc::dhp for example.
756                 After calling of this function you may use CDS data structures based on cds::gc::DHP.
757
758                 \par Parameters
759                 - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
760                     the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
761                     If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
762                 - \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
763                     is initialized the GC allocates local guard pool for the thread from common guard pool.
764                     By perforce the local thread's guard pool is grown automatically from common pool.
765                     When the thread terminated its guard pool is backed to common GC's pool.
766                 - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
767                     ABA problem for internal data. \p nEpochCount specifies the epoch count, 
768                     i.e. the count of simultaneously working threads that remove the elements
769                     of DHP-based concurrent data structure. Default value is 8.
770
771             */
772             static void CDS_STDCALL Construct(
773                 size_t nLiberateThreshold = 1024
774                 , size_t nInitialThreadGuardCount = 8
775                 , size_t nEpochCount = 8
776             );
777
778             /// Destroys DHP memory manager
779             /**
780                 The member function destroys DHP global object. After calling of this function you may \b NOT
781                 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
782                 at the end of your \p main(). See cds::gc::dhp for example.
783             */
784             static void CDS_STDCALL Destruct();
785
786             /// Returns pointer to GarbageCollector instance
787             /**
788                 If DHP GC is not initialized, \p not_initialized exception is thrown
789             */
790             static GarbageCollector&   instance()
791             {
792                 if ( m_pManager == nullptr )
793                     throw not_initialized();
794                 return *m_pManager;
795             }
796
797             /// Checks if global GC object is constructed and may be used
798             static bool isUsed() CDS_NOEXCEPT
799             {
800                 return m_pManager != nullptr;
801             }
802
803         public:
804             //@{
805             /// Internal interface
806
807             /// Allocates a guard
808             details::guard_data * allocGuard()
809             {
810                 return m_GuardPool.alloc();
811             }
812
813             /// Frees guard \p g for reusing in future
814             void freeGuard(details::guard_data * pGuard )
815             {
816                 m_GuardPool.free( pGuard );
817             }
818
819             /// Allocates guard list for a thread.
820             details::guard_data * allocGuardList( size_t nCount )
821             {
822                 return m_GuardPool.allocList( nCount );
823             }
824
825             /// Frees thread's guard list pointed by \p pList
826             void freeGuardList( details::guard_data * pList )
827             {
828                 m_GuardPool.freeList( pList );
829             }
830
831             /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
832             /**@anchor dhp_gc_retirePtr
833             */
834             template <typename T>
835             void retirePtr( T * p, void (* pFunc)(T *) )
836             {
837                 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
838             }
839
840             /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
841             void retirePtr( retired_ptr const& p )
842             {
843                 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
844                     scan();
845             }
846
847         protected:
848             /// Liberate function
849             /** @anchor dhp_gc_liberate
850                 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
851                 trapped by any guard.
852             */
853             void scan();
854             //@}
855
856         public:
857             /// Get internal statistics
858             InternalState& getInternalState(InternalState& stat) const
859             {
860                 return stat = m_stat;
861             }
862
863             /// Checks if internal statistics enabled
864             bool              isStatisticsEnabled() const
865             {
866                 return m_bStatEnabled;
867             }
868
869             /// Enables/disables internal statistics
870             bool  enableStatistics( bool bEnable )
871             {
872                 bool bEnabled = m_bStatEnabled;
873                 m_bStatEnabled = bEnable;
874                 return bEnabled;
875             }
876
877         private:
878             GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount );
879             ~GarbageCollector();
880         };
881
882         /// Thread GC
883         /**
884             To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
885             that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
886             on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
887             \ref cds_threading "cds::threading::Manager::detachThread()".
888
889             The ThreadGC object maintains two list:
890             \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
891             \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
892             Free guard list is a subset of thread guard list.
893         */
894         class ThreadGC
895         {
896             GarbageCollector&   m_gc    ;   ///< reference to GC singleton
897             details::guard_data *    m_pList ;   ///< Local list of guards owned by the thread
898             details::guard_data *    m_pFree ;   ///< The list of free guard from m_pList
899
900         public:
901             /// Default constructor
902             ThreadGC()
903                 : m_gc( GarbageCollector::instance() )
904                 , m_pList( nullptr )
905                 , m_pFree( nullptr )
906             {}
907
908             /// The object is not copy-constructible
909             ThreadGC( ThreadGC const& ) = delete;
910
911             /// Dtor calls fini()
912             ~ThreadGC()
913             {
914                 fini();
915             }
916
917             /// Initialization. Repeat call is available
918             void init()
919             {
920                 if ( !m_pList ) {
921                     m_pList =
922                         m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
923                 }
924             }
925
926             /// Finalization. Repeat call is available
927             void fini()
928             {
929                 if ( m_pList ) {
930                     m_gc.freeGuardList( m_pList );
931                     m_pList =
932                         m_pFree = nullptr;
933                 }
934             }
935
936         public:
937             /// Initializes guard \p g
938             void allocGuard( dhp::details::guard& g )
939             {
940                 assert( m_pList != nullptr );
941                 if ( !g.m_pGuard ) {
942                     if ( m_pFree ) {
943                         g.m_pGuard = m_pFree;
944                         m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
945                     }
946                     else {
947                         g.m_pGuard = m_gc.allocGuard();
948                         g.m_pGuard->pThreadNext = m_pList;
949                         m_pList = g.m_pGuard;
950                     }
951                 }
952             }
953
954             /// Frees guard \p g
955             void freeGuard( dhp::details::guard& g )
956             {
957                 assert( m_pList != nullptr );
958                 if ( g.m_pGuard ) {
959                     g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
960                     g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
961                     m_pFree = g.m_pGuard;
962                     g.m_pGuard = nullptr;
963                 }
964             }
965
966             /// Initializes guard array \p arr
967             template <size_t Count>
968             void allocGuard( GuardArray<Count>& arr )
969             {
970                 assert( m_pList != nullptr );
971                 size_t nCount = 0;
972
973                 while ( m_pFree && nCount < Count ) {
974                     arr[nCount].set_guard( m_pFree );
975                     m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
976                     ++nCount;
977                 }
978
979                 while ( nCount < Count ) {
980                     details::guard& g = arr[nCount++];
981                     g.set_guard( m_gc.allocGuard() );
982                     g.get_guard()->pThreadNext = m_pList;
983                     m_pList = g.get_guard();
984                 }
985             }
986
987             /// Frees guard array \p arr
988             template <size_t Count>
989             void freeGuard( GuardArray<Count>& arr )
990             {
991                 assert( m_pList != nullptr );
992
993                 details::guard_data * pGuard;
994                 for ( size_t i = 0; i < Count - 1; ++i ) {
995                     pGuard = arr[i].get_guard();
996                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
997                     pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
998                 }
999                 pGuard = arr[Count-1].get_guard();
1000                 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
1001                 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
1002                 m_pFree = arr[0].get_guard();
1003             }
1004
1005             /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
1006             template <typename T>
1007             void retirePtr( T * p, void (* pFunc)(T *) )
1008             {
1009                 m_gc.retirePtr( p, pFunc );
1010             }
1011
1012             /// Run retiring cycle
1013             void scan()
1014             {
1015                 m_gc.scan();
1016             }
1017         };
1018     }   // namespace dhp
1019 }}  // namespace cds::gc
1020 //@endcond
1021
1022 #if CDS_COMPILER == CDS_COMPILER_MSVC
1023 #   pragma warning(pop)
1024 #endif
1025
1026 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H