2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_HP_SMR_H
32 #define CDSLIB_GC_HP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/throw_exception.h>
37 #include <cds/details/static_functor.h>
38 #include <cds/details/marked_ptr.h>
39 #include <cds/user_setup/cache_line.h>
42 @page cds_garbage_collectors_comparison Hazard Pointer SMR implementations
43 @ingroup cds_garbage_collector
49 <th>%cds::gc::DHP</th>
52 <td>Max number of guarded (hazard) pointers per thread</td>
53 <td>limited (specified at construction time)</td>
54 <td>unlimited (dynamically allocated when needed)</td>
57 <td>Max number of retired pointers<sup>1</sup></td>
58 <td>bounded, specified at construction time</td>
59 <td>bounded, adaptive, depends on current thread count and number of hazard pointer for each thread</td>
63 <td>bounded, upper bound is specified at construction time</td>
68 <sup>1</sup>Unbounded count of retired pointers means a possibility of memory exhaustion.
72 /// @defgroup cds_garbage_collector Garbage collectors
75 /// Different safe memory reclamation schemas (garbage collectors)
76 /** @ingroup cds_garbage_collector
78 This namespace specifies different safe memory reclamation (SMR) algorithms.
79 See \ref cds_garbage_collector "Garbage collectors"
87 namespace cds { namespace gc {
88 /// Hazard pointer implementation details
90 using namespace cds::gc::hp::common;
92 /// Exception "Not enough Hazard Pointer"
93 class not_enought_hazard_ptr: public std::length_error
97 not_enought_hazard_ptr()
98 : std::length_error( "Not enough Hazard Pointer" )
103 /// Exception "Hazard Pointer SMR is not initialized"
104 class not_initialized: public std::runtime_error
109 : std::runtime_error( "Global Hazard Pointer SMR object is not initialized" )
115 /// Per-thread hazard pointer storage
116 class thread_hp_storage {
118 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
122 # ifdef CDS_ENABLE_HPSTAT
123 , alloc_guard_count_(0)
124 , free_guard_count_(0)
128 new( arr ) guard[nSize];
130 for ( guard* pEnd = arr + nSize - 1; arr < pEnd; ++arr )
131 arr->next_ = arr + 1;
132 arr->next_ = nullptr;
135 thread_hp_storage() = delete;
136 thread_hp_storage( thread_hp_storage const& ) = delete;
137 thread_hp_storage( thread_hp_storage&& ) = delete;
139 size_t capacity() const CDS_NOEXCEPT
144 bool full() const CDS_NOEXCEPT
146 return free_head_ == nullptr;
151 # ifdef CDS_DISABLE_SMR_EXCEPTION
155 CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
157 guard* g = free_head_;
158 free_head_ = g->next_;
159 CDS_HPSTAT( ++alloc_guard_count_ );
163 void free( guard* g ) CDS_NOEXCEPT
165 assert( g >= array_ && g < array_ + capacity() );
169 g->next_ = free_head_;
171 CDS_HPSTAT( ++free_guard_count_ );
175 template< size_t Capacity>
176 size_t alloc( guard_array<Capacity>& arr )
179 guard* g = free_head_;
180 for ( i = 0; i < Capacity && g; ++i ) {
185 # ifdef CDS_DISABLE_SMR_EXCEPTION
186 assert( i == Capacity );
189 CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
192 CDS_HPSTAT( alloc_guard_count_ += Capacity );
196 template <size_t Capacity>
197 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
199 guard* gList = free_head_;
200 for ( size_t i = 0; i < Capacity; ++i ) {
206 CDS_HPSTAT( ++free_guard_count_ );
212 // cppcheck-suppress functionConst
215 for ( guard* cur = array_, *last = array_ + capacity(); cur < last; ++cur )
219 guard& operator[]( size_t idx )
221 assert( idx < capacity() );
226 static size_t calc_array_size( size_t capacity )
228 return sizeof( guard ) * capacity;
232 guard* free_head_; ///< Head of free guard list
233 guard* const array_; ///< HP array
234 size_t const capacity_; ///< HP array capacity
235 # ifdef CDS_ENABLE_HPSTAT
237 size_t alloc_guard_count_;
238 size_t free_guard_count_;
244 /// Per-thread retired array
248 retired_array( retired_ptr* arr, size_t capacity ) CDS_NOEXCEPT
250 , last_( arr + capacity )
252 # ifdef CDS_ENABLE_HPSTAT
253 , retire_call_count_(0)
257 retired_array() = delete;
258 retired_array( retired_array const& ) = delete;
259 retired_array( retired_array&& ) = delete;
261 size_t capacity() const CDS_NOEXCEPT
263 return last_ - retired_;
266 size_t size() const CDS_NOEXCEPT
268 return current_ - retired_;
271 bool push( retired_ptr&& p ) CDS_NOEXCEPT
274 CDS_HPSTAT( ++retire_call_count_ );
275 return ++current_ < last_;
278 retired_ptr* first() const CDS_NOEXCEPT
283 retired_ptr* last() const CDS_NOEXCEPT
288 void reset( size_t nSize ) CDS_NOEXCEPT
290 current_ = first() + nSize;
293 bool full() const CDS_NOEXCEPT
295 return current_ == last_;
298 static size_t calc_array_size( size_t capacity )
300 return sizeof( retired_ptr ) * capacity;
304 retired_ptr* current_;
305 retired_ptr* const last_;
306 retired_ptr* const retired_;
307 # ifdef CDS_ENABLE_HPSTAT
309 size_t retire_call_count_;
314 /// Internal statistics
316 size_t guard_allocated; ///< Count of allocated HP guards
317 size_t guard_freed; ///< Count of freed HP guards
318 size_t retired_count; ///< Count of retired pointers
319 size_t free_count; ///< Count of free pointers
320 size_t scan_count; ///< Count of \p scan() call
321 size_t help_scan_count; ///< Count of \p help_scan() call
323 size_t thread_rec_count; ///< Count of thread records
331 /// Clears all counters
340 thread_rec_count = 0;
347 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
348 retired_array retired_; ///< Retired data private to the thread
350 stat stat_; ///< Internal statistics for the thread
352 char pad1_[cds::c_nCacheLineSize];
353 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
354 char pad2_[cds::c_nCacheLineSize];
356 // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
357 // cppcheck-suppress uninitMemberVar
358 thread_data( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity )
359 : hazards_( guards, guard_count )
360 , retired_( retired_arr, retired_capacity )
364 thread_data() = delete;
365 thread_data( thread_data const& ) = delete;
366 thread_data( thread_data&& ) = delete;
370 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
375 /// \p smr::scan() strategy
377 classic, ///< classic scan as described in Michael's works (see smr::classic_scan() )
378 inplace ///< inplace scan without allocation (see smr::inplace_scan() )
382 /// Hazard Pointer SMR (Safe Memory Reclamation)
385 struct thread_record;
388 /// Returns the instance of Hazard Pointer \ref smr
389 static smr& instance()
391 # ifdef CDS_DISABLE_SMR_EXCEPTION
392 assert( instance_ != nullptr );
395 CDS_THROW_EXCEPTION( not_initialized());
400 /// Creates Hazard Pointer SMR singleton
402 Hazard Pointer SMR is a singleton. If HP instance is not initialized then the function creates the instance.
403 Otherwise it does nothing.
405 The Michael's HP reclamation schema depends of three parameters:
406 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
407 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
408 the function uses maximum of HP count for CDS library
409 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
410 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
411 <tt> nHazardPtrCount * nMaxThreadCount </tt>
412 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
414 static CDS_EXPORT_API void construct(
415 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
416 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
417 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
418 scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
421 // for back-copatibility
422 static void Construct(
423 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
424 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
425 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
426 scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
429 construct( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
432 /// Destroys global instance of \ref smr
434 The parameter \p bDetachAll should be used carefully: if its value is \p true,
435 then the object destroyed automatically detaches all attached threads. This feature
436 can be useful when you have no control over the thread termination, for example,
437 when \p libcds is injected into existing external thread.
439 static CDS_EXPORT_API void destruct(
440 bool bDetachAll = false ///< Detach all threads
443 // for back-compatibility
444 static void Destruct(
445 bool bDetachAll = false ///< Detach all threads
448 destruct( bDetachAll );
451 /// Checks if global SMR object is constructed and may be used
452 static bool isUsed() CDS_NOEXCEPT
454 return instance_ != nullptr;
457 /// Set memory management functions
459 @note This function may be called <b>BEFORE</b> creating an instance
460 of Hazard Pointer SMR
462 SMR object allocates some memory for thread-specific data and for
464 By default, a standard \p new and \p delete operators are used for this.
466 static CDS_EXPORT_API void set_memory_allocator(
467 void* ( *alloc_func )( size_t size ),
468 void (*free_func )( void * p )
471 /// Returns max Hazard Pointer count per thread
472 size_t get_hazard_ptr_count() const CDS_NOEXCEPT
474 return hazard_ptr_count_;
477 /// Returns max thread count
478 size_t get_max_thread_count() const CDS_NOEXCEPT
480 return max_thread_count_;
483 /// Returns max size of retired objects array
484 size_t get_max_retired_ptr_count() const CDS_NOEXCEPT
486 return max_retired_ptr_count_;
489 /// Get current scan strategy
490 scan_type get_scan_type() const
495 /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count
497 If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
499 static void check_hazard_ptr_count( size_t nRequiredCount )
501 if ( instance().get_hazard_ptr_count() < nRequiredCount ) {
502 # ifdef CDS_DISABLE_SMR_EXCEPTION
503 assert( false ); // not enough hazard ptr
505 CDS_THROW_EXCEPTION( not_enought_hazard_ptr() );
510 /// Returns thread-local data for the current thread
511 static CDS_EXPORT_API thread_data* tls();
513 static CDS_EXPORT_API void attach_thread();
514 static CDS_EXPORT_API void detach_thread();
516 /// Get internal statistics
517 void statistics( stat& st );
519 public: // for internal use only
520 /// The main garbage collecting function
522 This function is called internally when upper bound of thread's list of reclaimed pointers
525 There are the following scan algorithm:
526 - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use
527 - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory
529 Use \p set_scan_type() member function to setup appropriate scan algorithm.
531 void scan( thread_data* pRec )
533 ( this->*scan_func_ )( pRec );
536 /// Helper scan routine
538 The function guarantees that every node that is eligible for reuse is eventually freed, barring
539 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
540 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
541 to thread's list of reclaimed pointers.
543 The function is called internally by \p scan().
545 CDS_EXPORT_API void help_scan( thread_data* pThis );
549 size_t nHazardPtrCount, ///< Hazard pointer count per thread
550 size_t nMaxThreadCount, ///< Max count of simultaneous working thread in your application
551 size_t nMaxRetiredPtrCount, ///< Capacity of the array of retired objects for the thread
552 scan_type nScanType ///< Scan type (see \ref scan_type enum)
555 CDS_EXPORT_API ~smr();
557 CDS_EXPORT_API void detach_all_thread();
559 /// Classic scan algorithm
560 /** @anchor hzp_gc_classic_scan
561 Classical scan algorithm as described in Michael's paper.
563 A scan includes four stages. The first stage involves scanning the array HP for non-null values.
564 Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer.
565 Only stage 1 accesses shared variables. The following stages operate only on private variables.
567 The second stage of a scan involves sorting local list of protected pointers to allow
568 binary search in the third stage.
570 The third stage of a scan involves checking each reclaimed node
571 against the pointers in local list of protected pointers. If the binary search yields
572 no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list
573 of reclaimed pointers.
575 The forth stage prepares new thread's private list of reclaimed pointers
576 that could not be freed during the current scan, where they remain until the next scan.
578 This algorithm allocates memory for internal HP array.
580 This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
583 CDS_EXPORT_API void classic_scan( thread_data* pRec );
585 /// In-place scan algorithm
586 /** @anchor hzp_gc_inplace_scan
587 Unlike the \p classic_scan() algorithm, \p %inplace_scan() does not allocate any memory.
588 All operations are performed in-place.
590 CDS_EXPORT_API void inplace_scan( thread_data* pRec );
593 CDS_EXPORT_API thread_record* create_thread_data();
594 static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
596 /// Allocates Hazard Pointer SMR thread private data
597 CDS_EXPORT_API thread_record* alloc_thread_data();
599 /// Free HP SMR thread-private data
600 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
603 static CDS_EXPORT_API smr* instance_;
605 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
607 size_t const hazard_ptr_count_; ///< max count of thread's hazard pointer
608 size_t const max_thread_count_; ///< max count of thread
609 size_t const max_retired_ptr_count_; ///< max count of retired ptr per thread
610 scan_type const scan_type_; ///< scan type (see \ref scan_type enum)
611 void ( smr::*scan_func_ )( thread_data* pRec );
616 // for backward compatibility
617 typedef smr GarbageCollector;
622 /// Hazard Pointer SMR (Safe Memory Reclamation)
623 /** @ingroup cds_garbage_collector
625 Implementation of classic Hazard Pointer SMR
628 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
629 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
630 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
632 Hazard Pointer SMR is a singleton. The main user-level part of Hazard Pointer schema is
633 \p %cds::gc::HP class and its nested classes. Before use any HP-related class you must initialize \p %HP
634 by contructing \p %cds::gc::HP object in beginning of your \p main().
635 See \ref cds_how_to_use "How to use" section for details how to apply SMR schema.
640 /// Native guarded pointer type
641 typedef hp::hazard_ptr guarded_pointer;
644 template <typename T> using atomic_ref = atomics::atomic<T *>;
646 /// Atomic marked pointer
647 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
650 template <typename T> using atomic_type = atomics::atomic<T>;
652 /// Exception "Not enough Hazard Pointer"
653 typedef hp::not_enought_hazard_ptr not_enought_hazard_ptr_exception;
655 /// Internal statistics
656 typedef hp::stat stat;
658 /// Hazard Pointer guard
660 A guard is a hazard pointer.
661 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer.
663 \p %Guard object is movable but not copyable.
665 The guard object can be in two states:
666 - unlinked - the guard is not linked with any internal hazard pointer.
667 In this state no operation except \p link() and move assignment is supported.
668 - linked (default) - the guard allocates an internal hazard pointer and completely operable.
670 Due to performance reason the implementation does not check state of the guard in runtime.
672 @warning Move assignment transfers the guard in unlinked state, use with care.
677 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
679 @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
682 : guard_( hp::smr::tls()->hazards_.alloc() )
685 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
686 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
690 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
691 Guard( Guard&& src ) CDS_NOEXCEPT
692 : guard_( src.guard_ )
694 src.guard_ = nullptr;
697 /// Move assignment: the internal guards are swapped between \p src and \p this
699 @warning \p src will become in unlinked state if \p this was unlinked on entry.
701 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
703 std::swap( guard_, src.guard_ );
707 /// Copy ctor is prohibited - the guard is not copyable
708 Guard( Guard const& ) = delete;
710 /// Copy assignment is prohibited
711 Guard& operator=( Guard const& ) = delete;
713 /// Frees the internal hazard pointer if the guard is in linked state
719 /// Checks if the guard object linked with any internal hazard pointer
720 bool is_linked() const
722 return guard_ != nullptr;
725 /// Links the guard with internal hazard pointer if the guard is in unlinked state
727 @warning Can throw \p not_enought_hazard_ptr_exception if internal hazard pointer array is exhausted.
732 guard_ = hp::smr::tls()->hazards_.alloc();
735 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
739 hp::smr::tls()->hazards_.free( guard_ );
744 /// Protects a pointer of type \p atomic<T*>
746 Return the value of \p toGuard
748 The function tries to load \p toGuard and to store it
749 to the HP slot repeatedly until the guard's value equals \p toGuard
751 @warning The guad object should be in linked state, otherwise the result is undefined
753 template <typename T>
754 T protect( atomics::atomic<T> const& toGuard )
756 assert( guard_ != nullptr );
758 T pCur = toGuard.load(atomics::memory_order_acquire);
761 pRet = assign( pCur );
762 pCur = toGuard.load(atomics::memory_order_acquire);
763 } while ( pRet != pCur );
767 /// Protects a converted pointer of type \p atomic<T*>
769 Return the value of \p toGuard
771 The function tries to load \p toGuard and to store result of \p f functor
772 to the HP slot repeatedly until the guard's value equals \p toGuard.
774 The function is useful for intrusive containers when \p toGuard is a node pointer
775 that should be converted to a pointer to the value before protecting.
776 The parameter \p f of type Func is a functor that makes this conversion:
779 value_type * operator()( T * p );
782 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
784 @warning The guad object should be in linked state, otherwise the result is undefined
786 template <typename T, class Func>
787 T protect( atomics::atomic<T> const& toGuard, Func f )
789 assert( guard_ != nullptr );
791 T pCur = toGuard.load(atomics::memory_order_acquire);
796 pCur = toGuard.load(atomics::memory_order_acquire);
797 } while ( pRet != pCur );
801 /// Store \p p to the guard
803 The function equals to a simple assignment the value \p p to guard, no loop is performed.
804 Can be used for a pointer that cannot be changed concurrently or if the pointer is already
805 guarded by another guard.
807 @warning The guad object should be in linked state, otherwise the result is undefined
809 template <typename T>
812 assert( guard_ != nullptr );
815 hp::smr::tls()->sync();
820 std::nullptr_t assign( std::nullptr_t )
822 assert( guard_ != nullptr );
829 /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state)
830 void copy( Guard const& src )
832 assign( src.get_native());
835 /// Store marked pointer \p p to the guard
837 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
838 Can be used for a marked pointer that cannot be changed concurrently or if the marked pointer
839 is already guarded by another guard.
841 @warning The guard object should be in linked state, otherwise the result is undefined
843 template <typename T, int BITMASK>
844 T * assign( cds::details::marked_ptr<T, BITMASK> p )
846 return assign( p.ptr());
849 /// Clear value of the guard (valid only in linked state)
855 /// Get the value currently protected (valid only in linked state)
856 template <typename T>
859 assert( guard_ != nullptr );
860 return guard_->get_as<T>();
863 /// Get native hazard pointer stored (valid only in linked state)
864 guarded_pointer get_native() const
866 assert( guard_ != nullptr );
867 return guard_->get();
873 hp::guard* g = guard_;
878 hp::guard*& guard_ref()
890 /// Array of Hazard Pointer guards
892 The class is intended for allocating an array of hazard pointer guards.
893 Template parameter \p Count defines the size of the array.
895 template <size_t Count>
899 /// Rebind array for other size \p Count2
900 template <size_t Count2>
902 typedef GuardArray<Count2> other; ///< rebinding result
906 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
909 /// Default ctor allocates \p Count hazard pointers
912 hp::smr::tls()->hazards_.alloc( guards_ );
915 /// Move ctor is prohibited
916 GuardArray( GuardArray&& ) = delete;
918 /// Move assignment is prohibited
919 GuardArray& operator=( GuardArray&& ) = delete;
921 /// Copy ctor is prohibited
922 GuardArray( GuardArray const& ) = delete;
924 /// Copy assignment is prohibited
925 GuardArray& operator=( GuardArray const& ) = delete;
927 /// Frees allocated hazard pointers
930 hp::smr::tls()->hazards_.free( guards_ );
933 /// Protects a pointer of type \p atomic<T*>
935 Return the value of \p toGuard
937 The function tries to load \p toGuard and to store it
938 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
940 template <typename T>
941 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
943 assert( nIndex < capacity());
947 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
948 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
953 /// Protects a pointer of type \p atomic<T*>
955 Return the value of \p toGuard
957 The function tries to load \p toGuard and to store it
958 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
960 The function is useful for intrusive containers when \p toGuard is a node pointer
961 that should be converted to a pointer to the value type before guarding.
962 The parameter \p f of type Func is a functor that makes this conversion:
965 value_type * operator()( T * p );
968 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
970 template <typename T, class Func>
971 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
973 assert( nIndex < capacity());
977 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
978 } while ( pRet != toGuard.load(atomics::memory_order_acquire));
983 /// Store \p to the slot \p nIndex
985 The function equals to a simple assignment, no loop is performed.
987 template <typename T>
988 T * assign( size_t nIndex, T * p )
990 assert( nIndex < capacity() );
992 guards_.set( nIndex, p );
993 hp::smr::tls()->sync();
997 /// Store marked pointer \p p to the guard
999 The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
1000 Can be used for a marked pointer that cannot be changed concurrently.
1002 template <typename T, int BITMASK>
1003 T * assign( size_t nIndex, cds::details::marked_ptr<T, BITMASK> p )
1005 return assign( nIndex, p.ptr());
1008 /// Copy guarded value from \p src guard to slot at index \p nIndex
1009 void copy( size_t nIndex, Guard const& src )
1011 assign( nIndex, src.get_native());
1014 /// Copy guarded value from slot \p nSrcIndex to the slot \p nDestIndex
1015 void copy( size_t nDestIndex, size_t nSrcIndex )
1017 assign( nDestIndex, get_native( nSrcIndex ));
1020 /// Clear value of the slot \p nIndex
1021 void clear( size_t nIndex )
1023 guards_.clear( nIndex );
1026 /// Get current value of slot \p nIndex
1027 template <typename T>
1028 T * get( size_t nIndex ) const
1030 assert( nIndex < capacity() );
1031 return guards_[nIndex]->template get_as<T>();
1034 /// Get native hazard pointer stored
1035 guarded_pointer get_native( size_t nIndex ) const
1037 assert( nIndex < capacity());
1038 return guards_[nIndex]->get();
1042 hp::guard* release( size_t nIndex ) CDS_NOEXCEPT
1044 return guards_.release( nIndex );
1048 /// Capacity of the guard array
1049 static CDS_CONSTEXPR size_t capacity()
1056 hp::guard_array<c_nCapacity> guards_;
1062 A guarded pointer is a pair of a pointer and GC's guard.
1063 Usually, it is used for returning a pointer to an element of a lock-free container.
1064 The guard prevents the pointer to be early disposed (freed) by SMR.
1065 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1068 - \p GuardedType - a type which the guard stores
1069 - \p ValueType - a value type
1070 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1072 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1073 In such case the \p %guarded_ptr is:
1075 typedef cds::gc::HP::guarded_ptr< foo > intrusive_guarded_ptr;
1078 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1086 struct value_accessor {
1087 std::string* operator()( foo* pFoo ) const
1089 return &(pFoo->value);
1094 typedef cds::gc::HP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1097 You don't need use this class directly.
1098 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1100 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1104 struct trivial_cast {
1105 ValueType * operator()( GuardedType * p ) const
1111 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1115 typedef GuardedType guarded_type; ///< Guarded type
1116 typedef ValueType value_type; ///< Value type
1118 /// Functor for casting \p guarded_type to \p value_type
1119 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1122 /// Creates empty guarded pointer
1123 guarded_ptr() CDS_NOEXCEPT
1128 explicit guarded_ptr( hp::guard* g ) CDS_NOEXCEPT
1132 /// Initializes guarded pointer with \p p
1133 explicit guarded_ptr( guarded_type* p ) CDS_NOEXCEPT
1138 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1144 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1145 : guard_( gp.guard_ )
1147 gp.guard_ = nullptr;
1151 template <typename GT, typename VT, typename C>
1152 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1153 : guard_( gp.guard_ )
1155 gp.guard_ = nullptr;
1158 /// Ctor from \p Guard
1159 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1160 : guard_( g.release())
1163 /// The guarded pointer is not copy-constructible
1164 guarded_ptr( guarded_ptr const& gp ) = delete;
1166 /// Clears the guarded pointer
1168 \ref release() is called if guarded pointer is not \ref empty()
1170 ~guarded_ptr() CDS_NOEXCEPT
1175 /// Move-assignment operator
1176 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1178 std::swap( guard_, gp.guard_ );
1182 /// Move-assignment from \p Guard
1183 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1185 std::swap( guard_, g.guard_ref());
1189 /// The guarded pointer is not copy-assignable
1190 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1192 /// Returns a pointer to guarded value
1193 value_type * operator ->() const CDS_NOEXCEPT
1196 return value_cast()( guard_->get_as<guarded_type>());
1199 /// Returns a reference to guarded value
1200 value_type& operator *() CDS_NOEXCEPT
1203 return *value_cast()( guard_->get_as<guarded_type>());
1206 /// Returns const reference to guarded value
1207 value_type const& operator *() const CDS_NOEXCEPT
1210 return *value_cast()( guard_->get_as<guarded_type>());
1213 /// Checks if the guarded pointer is \p nullptr
1214 bool empty() const CDS_NOEXCEPT
1216 return !guard_ || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1219 /// \p bool operator returns <tt>!empty()</tt>
1220 explicit operator bool() const CDS_NOEXCEPT
1225 /// Clears guarded pointer
1227 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1228 Dereferncing the guarded pointer after \p release() is dangerous.
1230 void release() CDS_NOEXCEPT
1236 // For internal use only!!!
1237 void reset(guarded_type * p) CDS_NOEXCEPT
1250 guard_ = hp::smr::tls()->hazards_.alloc();
1256 hp::smr::tls()->hazards_.free( guard_ );
1270 enum class scan_type {
1271 classic = hp::classic, ///< classic scan as described in Michael's papers
1272 inplace = hp::inplace ///< inplace scan without allocation
1275 /// Initializes %HP singleton
1277 The constructor initializes Hazard Pointer SMR singleton with passed parameters.
1278 If the instance does not yet exist then the function creates the instance.
1279 Otherwise it does nothing.
1281 The Michael's %HP reclamation schema depends of three parameters:
1282 - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from
1283 the data structure algorithms. If \p nHazardPtrCount = 0, the defaul value 8 is used
1284 - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100.
1285 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
1286 <tt> nHazardPtrCount * nMaxThreadCount </tt>. Default is <tt>2 * nHazardPtrCount * nMaxThreadCount </tt>.
1289 size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
1290 size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
1291 size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
1292 scan_type nScanType = scan_type::inplace ///< Scan type (see \p scan_type enum)
1298 nMaxRetiredPtrCount,
1299 static_cast<hp::scan_type>(nScanType)
1303 /// Terminates GC singleton
1305 The destructor destroys %HP global object. After calling of this function you may \b NOT
1306 use CDS data structures based on \p %cds::gc::HP.
1307 Usually, %HP object is destroyed at the end of your \p main().
1311 hp::smr::destruct( true );
1314 /// Checks that required hazard pointer count \p nCountNeeded is less or equal then max hazard pointer count
1316 If <tt> nRequiredCount > get_hazard_ptr_count()</tt> then the exception \p not_enought_hazard_ptr is thrown
1318 static void check_available_guards( size_t nCountNeeded )
1320 hp::smr::check_hazard_ptr_count( nCountNeeded );
1323 /// Set memory management functions
1325 @note This function may be called <b>BEFORE</b> creating an instance
1326 of Hazard Pointer SMR
1328 SMR object allocates some memory for thread-specific data and for
1329 creating SMR object.
1330 By default, a standard \p new and \p delete operators are used for this.
1332 static void set_memory_allocator(
1333 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1334 void( *free_func )( void * p ) ///< \p free() function
1337 hp::smr::set_memory_allocator( alloc_func, free_func );
1340 /// Returns max Hazard Pointer count
1341 static size_t max_hazard_count()
1343 return hp::smr::instance().get_hazard_ptr_count();
1346 /// Returns max count of thread
1347 static size_t max_thread_count()
1349 return hp::smr::instance().get_max_thread_count();
1352 /// Returns capacity of retired pointer array
1353 static size_t retired_array_capacity()
1355 return hp::smr::instance().get_max_retired_ptr_count();
1358 /// Retire pointer \p p with function \p func
1360 The function places pointer \p p to array of pointers ready for removing.
1361 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1362 \p func is a disposer: when \p p can be safely removed, \p func is called.
1364 template <typename T>
1365 static void retire( T * p, void( *func )( T * ))
1367 hp::thread_data* rec = hp::smr::tls();
1368 if ( !rec->retired_.push( hp::retired_ptr( p, func )))
1369 hp::smr::instance().scan( rec );
1372 /// Retire pointer \p p with functor of type \p Disposer
1374 The function places pointer \p p to array of pointers ready for removing.
1375 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1377 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1379 template <typename T>
1381 void operator()( T * p ) ; // disposing operator
1384 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1385 - it should be stateless functor
1386 - it should be default-constructible
1387 - the result of functor call with argument \p p should not depend on where the functor will be called.
1390 Operator \p delete functor:
1392 template <typename T>
1394 void operator ()( T * p ) {
1399 // How to call HP::retire method
1402 // ... use p in lock-free manner
1404 cds::gc::HP::retire<disposer>( p ) ; // place p to retired pointer array of HP GC
1407 Functor based on \p std::allocator :
1409 template <typename Alloc = std::allocator<int> >
1411 template <typename T>
1412 void operator()( T * p ) {
1413 typedef typename Alloc::templare rebind<T>::other alloc_t;
1416 a.deallocate( p, 1 );
1421 template <class Disposer, typename T>
1422 static void retire( T * p )
1424 if ( !hp::smr::tls()->retired_.push( hp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1428 /// Get current scan strategy
1429 static scan_type getScanType()
1431 return static_cast<scan_type>( hp::smr::instance().get_scan_type());
1434 /// Checks if Hazard Pointer GC is constructed and may be used
1435 static bool isUsed()
1437 return hp::smr::isUsed();
1440 /// Forces SMR call for current thread
1442 Usually, this function should not be called directly.
1446 hp::smr::instance().scan( hp::smr::tls());
1449 /// Synonym for \p scan()
1450 static void force_dispose()
1455 /// Returns internal statistics
1457 The function clears \p st before gathering statistics.
1459 @note Internal statistics is available only if you compile
1460 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
1462 static void statistics( stat& st )
1464 hp::smr::instance().statistics( st );
1467 /// Returns post-mortem statistics
1469 Post-mortem statistics is gathered in the \p %HP object destructor
1470 and can be accessible after destructing the global \p %HP object.
1472 @note Internal statistics is available only if you compile
1473 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
1481 // Initialize HP SMR
1484 // deal with HP-based data structured
1488 // HP object destroyed
1489 // Get total post-mortem statistics
1490 cds::gc::HP::stat const& st = cds::gc::HP::postmortem_statistics();
1492 printf( "HP statistics:\n"
1493 "\tthread count = %llu\n"
1494 "\tguard allocated = %llu\n"
1495 "\tguard freed = %llu\n"
1496 "\tretired data count = %llu\n"
1497 "\tfree data count = %llu\n"
1498 "\tscan() call count = %llu\n"
1499 "\thelp_scan() call count = %llu\n",
1500 st.thread_rec_count,
1501 st.guard_allocated, st.guard_freed,
1502 st.retired_count, st.free_count,
1503 st.scan_count, st.help_scan_count
1510 static stat const& postmortem_statistics();
1513 }} // namespace cds::gc
1515 #endif // #ifndef CDSLIB_GC_HP_SMR_H