2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/details/lib.h>
37 #include <cds/threading/model.h>
38 #include <cds/intrusive/free_list_selector.h>
39 #include <cds/details/throw_exception.h>
40 #include <cds/details/static_functor.h>
41 #include <cds/details/marked_ptr.h>
42 #include <cds/user_setup/cache_line.h>
44 namespace cds { namespace gc {
46 /// Dynamic (adaptive) Hazard Pointer implementation details
48 using namespace cds::gc::hp::common;
50 /// Exception "Dynamic Hazard Pointer SMR is not initialized"
51 class not_initialized: public std::runtime_error
56 : std::runtime_error( "Global DHP SMR object is not initialized" )
62 struct guard_block: public cds::intrusive::FreeListImpl::node
64 atomics::atomic<guard_block*> next_block_; // next block in the thread list
67 : next_block_( nullptr )
72 return reinterpret_cast<guard*>( this + 1 );
78 /// \p guard_block allocator (global object)
83 static hp_allocator& instance();
85 CDS_EXPORT_API guard_block* alloc();
86 void free( guard_block* block )
88 free_list_.put( block );
93 #ifdef CDS_ENABLE_HPSTAT
97 CDS_EXPORT_API ~hp_allocator();
100 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
101 #ifdef CDS_ENABLE_HPSTAT
103 atomics::atomic<size_t> block_allocated_; ///< count of allocated blocks
109 /// Per-thread hazard pointer storage
110 class thread_hp_storage
114 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
117 , initial_capacity_( nSize )
118 # ifdef CDS_ENABLE_HPSTAT
119 , alloc_guard_count_( 0 )
120 , free_guard_count_( 0 )
121 , extend_call_count_( 0 )
125 new( arr ) guard[nSize];
126 extended_list_.store( nullptr, atomics::memory_order_release );
129 thread_hp_storage() = delete;
130 thread_hp_storage( thread_hp_storage const& ) = delete;
131 thread_hp_storage( thread_hp_storage&& ) = delete;
140 if ( cds_unlikely( free_head_ == nullptr )) {
142 assert( free_head_ != nullptr );
145 guard* g = free_head_;
146 free_head_ = g->next_;
147 CDS_HPSTAT( ++alloc_guard_count_ );
151 void free( guard* g ) CDS_NOEXCEPT
155 g->next_ = free_head_;
157 CDS_HPSTAT( ++free_guard_count_ );
161 template< size_t Capacity>
162 size_t alloc( guard_array<Capacity>& arr )
164 for ( size_t i = 0; i < Capacity; ++i ) {
165 if ( cds_unlikely( free_head_ == nullptr ))
167 arr.reset( i, free_head_ );
168 free_head_ = free_head_->next_;
170 CDS_HPSTAT( alloc_guard_count_ += Capacity );
174 template <size_t Capacity>
175 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
177 guard* gList = free_head_;
178 for ( size_t i = 0; i < Capacity; ++i ) {
184 CDS_HPSTAT( ++free_guard_count_ );
193 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
196 // free all extended blocks
197 hp_allocator& a = hp_allocator::instance();
198 for ( guard_block* p = extended_list_.load( atomics::memory_order_relaxed ); p; ) {
199 guard_block* next = p->next_block_.load( atomics::memory_order_relaxed );
204 extended_list_.store( nullptr, atomics::memory_order_release );
209 assert( extended_list_.load(atomics::memory_order_relaxed) == nullptr );
212 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
221 assert( free_head_ == nullptr );
223 guard_block* block = hp_allocator::instance().alloc();
224 block->next_block_.store( extended_list_.load( atomics::memory_order_relaxed ), atomics::memory_order_release );
225 extended_list_.store( block, atomics::memory_order_release );
226 free_head_ = block->first();
227 CDS_HPSTAT( ++extend_call_count_ );
231 guard* free_head_; ///< Head of free guard list
232 atomics::atomic<guard_block*> extended_list_; ///< Head of extended guard blocks allocated for the thread
233 guard* const array_; ///< initial HP array
234 size_t const initial_capacity_; ///< Capacity of \p array_
235 # ifdef CDS_ENABLE_HPSTAT
237 size_t alloc_guard_count_;
238 size_t free_guard_count_;
239 size_t extend_call_count_;
245 struct retired_block: public cds::intrusive::FreeListImpl::node
247 retired_block* next_; ///< Next block in thread-private retired array
249 static size_t const c_capacity = 256;
255 retired_ptr* first() const
257 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
260 retired_ptr* last() const
262 return first() + c_capacity;
268 class retired_allocator
272 static retired_allocator& instance();
274 CDS_EXPORT_API retired_block* alloc();
275 void free( retired_block* block )
277 block->next_ = nullptr;
278 free_list_.put( block );
283 #ifdef CDS_ENABLE_HPSTAT
284 : block_allocated_(0)
287 CDS_EXPORT_API ~retired_allocator();
290 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
291 #ifdef CDS_ENABLE_HPSTAT
293 atomics::atomic<size_t> block_allocated_; ///< Count of allocated blocks
299 /// Per-thread retired array
304 retired_array() CDS_NOEXCEPT
305 : current_block_( nullptr )
306 , current_cell_( nullptr )
307 , list_head_( nullptr )
308 , list_tail_( nullptr )
310 # ifdef CDS_ENABLE_HPSTAT
311 , retire_call_count_( 0 )
312 , extend_call_count_( 0 )
316 retired_array( retired_array const& ) = delete;
317 retired_array( retired_array&& ) = delete;
325 bool push( retired_ptr const& p ) CDS_NOEXCEPT
327 assert( current_block_ != nullptr );
328 assert( current_block_->first() <= current_cell_ );
329 assert( current_cell_ < current_block_->last());
330 //assert( &p != current_cell_ );
333 CDS_HPSTAT( ++retire_call_count_ );
335 if ( ++current_cell_ == current_block_->last()) {
336 // goto next block if exists
337 if ( current_block_->next_ ) {
338 current_block_ = current_block_->next_;
339 current_cell_ = current_block_->first();
344 // smr::scan() extend retired_array if needed
351 bool repush( retired_ptr* p ) CDS_NOEXCEPT
353 bool ret = push( *p );
354 CDS_HPSTAT( --retire_call_count_ );
359 private: // called by smr
362 if ( list_head_ == nullptr ) {
363 retired_block* block = retired_allocator::instance().alloc();
364 assert( block->next_ == nullptr );
369 current_cell_ = block->first();
377 retired_allocator& alloc = retired_allocator::instance();
378 for ( retired_block* p = list_head_; p; ) {
379 retired_block* next = p->next_;
386 list_tail_ = nullptr;
387 current_cell_ = nullptr;
394 assert( list_head_ != nullptr );
395 assert( current_block_ == list_tail_ );
396 assert( current_cell_ == current_block_->last());
398 retired_block* block = retired_allocator::instance().alloc();
399 assert( block->next_ == nullptr );
401 current_block_ = list_tail_ = list_tail_->next_ = block;
402 current_cell_ = block->first();
404 CDS_HPSTAT( ++extend_call_count_ );
409 return current_block_ == nullptr
410 || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
414 retired_block* current_block_;
415 retired_ptr* current_cell_; // in current_block_
417 retired_block* list_head_;
418 retired_block* list_tail_;
420 # ifdef CDS_ENABLE_HPSTAT
422 size_t retire_call_count_;
423 size_t extend_call_count_;
428 /// Internal statistics
430 size_t guard_allocated; ///< Count of allocated HP guards
431 size_t guard_freed; ///< Count of freed HP guards
432 size_t retired_count; ///< Count of retired pointers
433 size_t free_count; ///< Count of free pointers
434 size_t scan_count; ///< Count of \p scan() call
435 size_t help_scan_count; ///< Count of \p help_scan() call
437 size_t thread_rec_count; ///< Count of thread records
439 size_t hp_block_count; ///< Count of extended HP blocks allocated
440 size_t retired_block_count; ///< Count of retired blocks allocated
441 size_t hp_extend_count; ///< Count of hp array \p extend() call
442 size_t retired_extend_count; ///< Count of retired array \p extend() call
450 /// Clears all counters
461 retired_block_count =
463 retired_extend_count = 0;
470 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
471 retired_array retired_; ///< Retired data private to the thread
473 char pad1_[cds::c_nCacheLineSize];
474 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
475 char pad2_[cds::c_nCacheLineSize];
477 # ifdef CDS_ENABLE_HPSTAT
478 size_t free_call_count_;
479 size_t scan_call_count_;
480 size_t help_scan_call_count_;
483 // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
484 // cppcheck-suppress uninitMemberVar
485 thread_data( guard* guards, size_t guard_count )
486 : hazards_( guards, guard_count )
488 # ifdef CDS_ENABLE_HPSTAT
489 , free_call_count_(0)
490 , scan_call_count_(0)
491 , help_scan_call_count_(0)
495 thread_data() = delete;
496 thread_data( thread_data const& ) = delete;
497 thread_data( thread_data&& ) = delete;
501 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
507 // Dynamic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
510 struct thread_record;
513 /// Returns the instance of Hazard Pointer \ref smr
514 static smr& instance()
516 # ifdef CDS_DISABLE_SMR_EXCEPTION
517 assert( instance_ != nullptr );
520 CDS_THROW_EXCEPTION( not_initialized());
525 /// Creates Dynamic Hazard Pointer SMR singleton
527 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
528 Otherwise it does nothing.
530 The Michael's HP reclamation schema depends of three parameters:
531 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
532 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
533 the function uses maximum of HP count for CDS library
534 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
535 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
536 <tt> nHazardPtrCount * nMaxThreadCount </tt>
537 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
539 static CDS_EXPORT_API void construct(
540 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
543 // for back-copatibility
544 static void Construct(
545 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
548 construct( nInitialHazardPtrCount );
551 /// Destroys global instance of \ref smr
553 The parameter \p bDetachAll should be used carefully: if its value is \p true,
554 then the object destroyed automatically detaches all attached threads. This feature
555 can be useful when you have no control over the thread termination, for example,
556 when \p libcds is injected into existing external thread.
558 static CDS_EXPORT_API void destruct(
559 bool bDetachAll = false ///< Detach all threads
562 // for back-compatibility
563 static void Destruct(
564 bool bDetachAll = false ///< Detach all threads
567 destruct( bDetachAll );
570 /// Checks if global SMR object is constructed and may be used
571 static bool isUsed() CDS_NOEXCEPT
573 return instance_ != nullptr;
576 /// Set memory management functions
578 @note This function may be called <b>BEFORE</b> creating an instance
579 of Dynamic Hazard Pointer SMR
581 SMR object allocates some memory for thread-specific data and for
583 By default, a standard \p new and \p delete operators are used for this.
585 static CDS_EXPORT_API void set_memory_allocator(
586 void* ( *alloc_func )( size_t size ),
587 void( *free_func )( void * p )
590 /// Returns thread-local data for the current thread
591 static CDS_EXPORT_API thread_data* tls();
593 static CDS_EXPORT_API void attach_thread();
594 static CDS_EXPORT_API void detach_thread();
596 /// Get internal statistics
597 CDS_EXPORT_API void statistics( stat& st );
599 public: // for internal use only
600 /// The main garbage collecting function
601 CDS_EXPORT_API void scan( thread_data* pRec );
603 /// Helper scan routine
605 The function guarantees that every node that is eligible for reuse is eventually freed, barring
606 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
607 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
608 to thread's list of reclaimed pointers.
610 The function is called internally by \p scan().
612 CDS_EXPORT_API void help_scan( thread_data* pThis );
614 hp_allocator& get_hp_allocator()
616 return hp_allocator_;
619 retired_allocator& get_retired_allocator()
621 return retired_allocator_;
625 CDS_EXPORT_API explicit smr(
626 size_t nInitialHazardPtrCount
629 CDS_EXPORT_API ~smr();
631 CDS_EXPORT_API void detach_all_thread();
634 CDS_EXPORT_API thread_record* create_thread_data();
635 static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
637 /// Allocates Hazard Pointer SMR thread private data
638 CDS_EXPORT_API thread_record* alloc_thread_data();
640 /// Free HP SMR thread-private data
641 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
644 static CDS_EXPORT_API smr* instance_;
646 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
647 size_t const initial_hazard_count_; ///< initial number of hazard pointers per thread
648 hp_allocator hp_allocator_;
649 retired_allocator retired_allocator_;
652 std::atomic<size_t> last_plist_size_; ///< HP array size in last scan() call
657 // for backward compatibility
658 typedef smr GarbageCollector;
662 inline hp_allocator& hp_allocator::instance()
664 return smr::instance().get_hp_allocator();
667 inline retired_allocator& retired_allocator::instance()
669 return smr::instance().get_retired_allocator();
676 /// Dynamic (adaptie) Hazard Pointer SMR
677 /** @ingroup cds_garbage_collector
679 Implementation of Dynamic (adaptive) Hazard Pointer SMR
682 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
683 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
684 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
686 %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
688 @note Internally, %DHP depends on free-list implementation. There are
689 DCAS-based free-list \p cds::intrusive::TaggedFreeList and more complicated CAS-based free-list
690 \p cds::intrusive::FreeList. For x86 architecture and GCC/clang, libcds selects appropriate free-list
691 based on \p -mcx16 compiler flag. You may manually disable DCAS support specifying
692 \p -DCDS_DISABLE_128BIT_ATOMIC for 64bit build or \p -DCDS_DISABLE_64BIT_ATOMIC for 32bit build
693 in compiler command line. All your projects and libcds MUST be compiled with the same flags -
694 either with DCAS support or without it.
695 For MS VC++ compiler DCAS is not supported.
697 See \ref cds_how_to_use "How to use" section for details how to apply SMR.
702 /// Native guarded pointer type
703 typedef void* guarded_pointer;
706 template <typename T> using atomic_ref = atomics::atomic<T *>;
710 @headerfile cds/gc/dhp.h
712 template <typename T> using atomic_type = atomics::atomic<T>;
714 /// Atomic marked pointer
715 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
717 /// Internal statistics
718 typedef dhp::stat stat;
720 /// Dynamic Hazard Pointer guard
722 A guard is a hazard pointer.
723 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
725 \p %Guard object is movable but not copyable.
727 The guard object can be in two states:
728 - unlinked - the guard is not linked with any internal hazard pointer.
729 In this state no operation except \p link() and move assignment is supported.
730 - linked (default) - the guard allocates an internal hazard pointer and fully operable.
732 Due to performance reason the implementation does not check state of the guard in runtime.
734 @warning Move assignment can transfer the guard in unlinked state, use with care.
739 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
741 : guard_( dhp::smr::tls()->hazards_.alloc())
744 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
745 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
749 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
750 Guard( Guard&& src ) CDS_NOEXCEPT
751 : guard_( src.guard_ )
753 src.guard_ = nullptr;
756 /// Move assignment: the internal guards are swapped between \p src and \p this
758 @warning \p src will become in unlinked state if \p this was unlinked on entry.
760 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
762 std::swap( guard_, src.guard_ );
766 /// Copy ctor is prohibited - the guard is not copyable
767 Guard( Guard const& ) = delete;
769 /// Copy assignment is prohibited
770 Guard& operator=( Guard const& ) = delete;
772 /// Frees the internal hazard pointer if the guard is in linked state
778 /// Checks if the guard object linked with any internal hazard pointer
779 bool is_linked() const
781 return guard_ != nullptr;
784 /// Links the guard with internal hazard pointer if the guard is in unlinked state
788 guard_ = dhp::smr::tls()->hazards_.alloc();
791 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
795 dhp::smr::tls()->hazards_.free( guard_ );
800 /// Protects a pointer of type <tt> atomic<T*> </tt>
802 Return the value of \p toGuard
804 The function tries to load \p toGuard and to store it
805 to the HP slot repeatedly until the guard's value equals \p toGuard
807 template <typename T>
808 T protect( atomics::atomic<T> const& toGuard )
810 assert( guard_ != nullptr );
812 T pCur = toGuard.load(atomics::memory_order_acquire);
815 pRet = assign( pCur );
816 pCur = toGuard.load(atomics::memory_order_acquire);
817 } while ( pRet != pCur );
821 /// Protects a converted pointer of type <tt> atomic<T*> </tt>
823 Return the value of \p toGuard
825 The function tries to load \p toGuard and to store result of \p f functor
826 to the HP slot repeatedly until the guard's value equals \p toGuard.
828 The function is useful for intrusive containers when \p toGuard is a node pointer
829 that should be converted to a pointer to the value type before guarding.
830 The parameter \p f of type Func is a functor that makes this conversion:
833 value_type * operator()( T * p );
836 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
838 template <typename T, class Func>
839 T protect( atomics::atomic<T> const& toGuard, Func f )
841 assert( guard_ != nullptr );
843 T pCur = toGuard.load(atomics::memory_order_acquire);
848 pCur = toGuard.load(atomics::memory_order_acquire);
849 } while ( pRet != pCur );
853 /// Store \p p to the guard
855 The function is just an assignment, no loop is performed.
856 Can be used for a pointer that cannot be changed concurrently
857 or for already guarded pointer.
859 template <typename T>
862 assert( guard_ != nullptr );
865 dhp::smr::tls()->sync();
870 std::nullptr_t assign( std::nullptr_t )
872 assert( guard_ != nullptr );
879 /// Store marked pointer \p p to the guard
881 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
882 Can be used for a marked pointer that cannot be changed concurrently
883 or for already guarded pointer.
885 template <typename T, int BITMASK>
886 T* assign( cds::details::marked_ptr<T, BITMASK> p )
888 return assign( p.ptr());
891 /// Copy from \p src guard to \p this guard
892 void copy( Guard const& src )
894 assign( src.get_native());
897 /// Clears value of the guard
900 assert( guard_ != nullptr );
905 /// Gets the value currently protected (relaxed read)
906 template <typename T>
909 assert( guard_ != nullptr );
910 return guard_->get_as<T>();
913 /// Gets native guarded pointer stored
914 void* get_native() const
916 assert( guard_ != nullptr );
917 return guard_->get();
921 dhp::guard* release()
923 dhp::guard* g = guard_;
928 dhp::guard*& guard_ref()
940 /// Array of Dynamic Hazard Pointer guards
942 The class is intended for allocating an array of hazard pointer guards.
943 Template parameter \p Count defines the size of the array.
945 A \p %GuardArray object is not copy- and move-constructible
946 and not copy- and move-assignable.
948 template <size_t Count>
952 /// Rebind array for other size \p OtherCount
953 template <size_t OtherCount>
955 typedef GuardArray<OtherCount> other ; ///< rebinding result
959 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
962 /// Default ctor allocates \p Count hazard pointers
965 dhp::smr::tls()->hazards_.alloc( guards_ );
968 /// Move ctor is prohibited
969 GuardArray( GuardArray&& ) = delete;
971 /// Move assignment is prohibited
972 GuardArray& operator=( GuardArray&& ) = delete;
974 /// Copy ctor is prohibited
975 GuardArray( GuardArray const& ) = delete;
977 /// Copy assignment is prohibited
978 GuardArray& operator=( GuardArray const& ) = delete;
980 /// Frees allocated hazard pointers
983 dhp::smr::tls()->hazards_.free( guards_ );
986 /// Protects a pointer of type \p atomic<T*>
988 Return the value of \p toGuard
990 The function tries to load \p toGuard and to store it
991 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
993 template <typename T>
994 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
996 assert( nIndex < capacity());
1000 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
1001 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
1006 /// Protects a pointer of type \p atomic<T*>
1008 Return the value of \p toGuard
1010 The function tries to load \p toGuard and to store it
1011 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
1013 The function is useful for intrusive containers when \p toGuard is a node pointer
1014 that should be converted to a pointer to the value type before guarding.
1015 The parameter \p f of type Func is a functor to make that conversion:
1018 value_type * operator()( T * p );
1021 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
1023 template <typename T, class Func>
1024 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
1026 assert( nIndex < capacity());
1030 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
1031 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
1036 /// Store \p p to the slot \p nIndex
1038 The function is just an assignment, no loop is performed.
1040 template <typename T>
1041 T * assign( size_t nIndex, T * p )
1043 assert( nIndex < capacity());
1045 guards_.set( nIndex, p );
1046 dhp::smr::tls()->sync();
1050 /// Store marked pointer \p p to the guard
1052 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
1053 Can be used for a marked pointer that cannot be changed concurrently
1054 or for already guarded pointer.
1056 template <typename T, int Bitmask>
1057 T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
1059 return assign( nIndex, p.ptr());
1062 /// Copy guarded value from \p src guard to slot at index \p nIndex
1063 void copy( size_t nIndex, Guard const& src )
1065 assign( nIndex, src.get_native());
1068 /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
1069 void copy( size_t nDestIndex, size_t nSrcIndex )
1071 assign( nDestIndex, get_native( nSrcIndex ));
1074 /// Clear value of the slot \p nIndex
1075 void clear( size_t nIndex )
1077 guards_.clear( nIndex );
1080 /// Get current value of slot \p nIndex
1081 template <typename T>
1082 T * get( size_t nIndex ) const
1084 assert( nIndex < capacity());
1085 return guards_[nIndex]->template get_as<T>();
1088 /// Get native guarded pointer stored
1089 guarded_pointer get_native( size_t nIndex ) const
1091 assert( nIndex < capacity());
1092 return guards_[nIndex]->get();
1096 dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
1098 return guards_.release( nIndex );
1102 /// Capacity of the guard array
1103 static CDS_CONSTEXPR size_t capacity()
1110 dhp::guard_array<c_nCapacity> guards_;
1116 A guarded pointer is a pair of a pointer and GC's guard.
1117 Usually, it is used for returning a pointer to the item from an lock-free container.
1118 The guard prevents the pointer to be early disposed (freed) by GC.
1119 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1122 - \p GuardedType - a type which the guard stores
1123 - \p ValueType - a value type
1124 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1126 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1127 In such case the \p %guarded_ptr is:
1129 typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1132 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1140 struct value_accessor {
1141 std::string* operator()( foo* pFoo ) const
1143 return &(pFoo->value);
1148 typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1151 You don't need use this class directly.
1152 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1154 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1158 struct trivial_cast {
1159 ValueType * operator()( GuardedType * p ) const
1165 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1169 typedef GuardedType guarded_type; ///< Guarded type
1170 typedef ValueType value_type; ///< Value type
1172 /// Functor for casting \p guarded_type to \p value_type
1173 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1176 /// Creates empty guarded pointer
1177 guarded_ptr() CDS_NOEXCEPT
1182 explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1186 /// Initializes guarded pointer with \p p
1187 explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1192 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1198 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1199 : guard_( gp.guard_ )
1201 gp.guard_ = nullptr;
1205 template <typename GT, typename VT, typename C>
1206 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1207 : guard_( gp.guard_ )
1209 gp.guard_ = nullptr;
1212 /// Ctor from \p Guard
1213 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1214 : guard_( g.release())
1217 /// The guarded pointer is not copy-constructible
1218 guarded_ptr( guarded_ptr const& gp ) = delete;
1220 /// Clears the guarded pointer
1222 \ref release is called if guarded pointer is not \ref empty
1224 ~guarded_ptr() CDS_NOEXCEPT
1229 /// Move-assignment operator
1230 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1232 std::swap( guard_, gp.guard_ );
1236 /// Move-assignment from \p Guard
1237 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1239 std::swap( guard_, g.guard_ref());
1243 /// The guarded pointer is not copy-assignable
1244 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1246 /// Returns a pointer to guarded value
1247 value_type * operator ->() const CDS_NOEXCEPT
1250 return value_cast()( guard_->get_as<guarded_type>());
1253 /// Returns a reference to guarded value
1254 value_type& operator *() CDS_NOEXCEPT
1257 return *value_cast()( guard_->get_as<guarded_type>());
1260 /// Returns const reference to guarded value
1261 value_type const& operator *() const CDS_NOEXCEPT
1264 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1267 /// Checks if the guarded pointer is \p nullptr
1268 bool empty() const CDS_NOEXCEPT
1270 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1273 /// \p bool operator returns <tt>!empty()</tt>
1274 explicit operator bool() const CDS_NOEXCEPT
1279 /// Clears guarded pointer
1281 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1282 Dereferncing the guarded pointer after \p release() is dangerous.
1284 void release() CDS_NOEXCEPT
1290 // For internal use only!!!
1291 void reset(guarded_type * p) CDS_NOEXCEPT
1305 guard_ = dhp::smr::tls()->hazards_.alloc();
1311 dhp::smr::tls()->hazards_.free( guard_ );
1324 /// Initializes %DHP memory manager singleton
1326 Constructor creates and initializes %DHP global object.
1327 %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1328 it is created in the beginning of \p main() function.
1329 After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1331 \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1332 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1333 By perforce the local thread's guard pool is grown automatically from common pool.
1334 When the thread terminated its guard pool is backed to common GC's pool.
1337 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
1340 dhp::smr::construct( nInitialHazardPtrCount );
1343 /// Destroys %DHP memory manager
1345 The destructor destroys %DHP global object. After calling of this function you may \b NOT
1346 use CDS data structures based on \p %cds::gc::DHP.
1347 Usually, %DHP object is destroyed at the end of your \p main().
1351 dhp::GarbageCollector::destruct( true );
1354 /// Checks if count of hazard pointer is no less than \p nCountNeeded
1356 The function always returns \p true since the guard count is unlimited for
1357 \p %gc::DHP garbage collector.
1359 static CDS_CONSTEXPR bool check_available_guards(
1360 #ifdef CDS_DOXYGEN_INVOKED
1361 size_t nCountNeeded,
1370 /// Set memory management functions
1372 @note This function may be called <b>BEFORE</b> creating an instance
1373 of Dynamic Hazard Pointer SMR
1375 SMR object allocates some memory for thread-specific data and for creating SMR object.
1376 By default, a standard \p new and \p delete operators are used for this.
1378 static void set_memory_allocator(
1379 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1380 void( *free_func )( void * p ) ///< \p free() function
1383 dhp::smr::set_memory_allocator( alloc_func, free_func );
1386 /// Retire pointer \p p with function \p pFunc
1388 The function places pointer \p p to array of pointers ready for removing.
1389 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1390 \p func is a disposer: when \p p can be safely removed, \p func is called.
1392 template <typename T>
1393 static void retire( T * p, void (* func)(void *))
1395 dhp::thread_data* rec = dhp::smr::tls();
1396 if ( !rec->retired_.push( dhp::retired_ptr( p, func )))
1397 dhp::smr::instance().scan( rec );
1400 /// Retire pointer \p p with functor of type \p Disposer
1402 The function places pointer \p p to array of pointers ready for removing.
1403 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1405 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1407 template <typename T>
1409 void operator()( T * p ) ; // disposing operator
1412 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1413 - it should be stateless functor
1414 - it should be default-constructible
1415 - the result of functor call with argument \p p should not depend on where the functor will be called.
1418 Operator \p delete functor:
1420 template <typename T>
1422 void operator ()( T * p ) {
1427 // How to call HP::retire method
1430 // ... use p in lock-free manner
1432 cds::gc::DHP::retire<disposer>( p ) ; // place p to retired pointer array of DHP SMR
1435 Functor based on \p std::allocator :
1437 template <typename Alloc = std::allocator<int> >
1439 template <typename T>
1440 void operator()( T * p ) {
1441 typedef typename Alloc::templare rebind<T>::other alloc_t;
1444 a.deallocate( p, 1 );
1449 template <class Disposer, typename T>
1450 static void retire( T* p )
1452 if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1456 /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1457 static bool isUsed()
1459 return dhp::smr::isUsed();
1462 /// Forced GC cycle call for current thread
1464 Usually, this function should not be called directly.
1468 dhp::smr::instance().scan( dhp::smr::tls());
1471 /// Synonym for \p scan()
1472 static void force_dispose()
1477 /// Returns internal statistics
1479 The function clears \p st before gathering statistics.
1481 @note Internal statistics is available only if you compile
1482 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
1484 static void statistics( stat& st )
1486 dhp::smr::instance().statistics( st );
1489 /// Returns post-mortem statistics
1491 Post-mortem statistics is gathered in the \p %DHP object destructor
1492 and can be accessible after destructing the global \p %DHP object.
1494 @note Internal statistics is available only if you compile
1495 \p libcds and your program with \p -DCDS_ENABLE_HPSTAT.
1503 // Initialize DHP SMR
1506 // deal with DHP-based data structured
1510 // DHP object destroyed
1511 // Get total post-mortem statistics
1512 cds::gc::DHP::stat const& st = cds::gc::DHP::postmortem_statistics();
1514 printf( "DHP statistics:\n"
1515 " thread count = %llu\n"
1516 " guard allocated = %llu\n"
1517 " guard freed = %llu\n"
1518 " retired data count = %llu\n"
1519 " free data count = %llu\n"
1520 " scan() call count = %llu\n"
1521 " help_scan() call count = %llu\n",
1522 st.thread_rec_count,
1523 st.guard_allocated, st.guard_freed,
1524 st.retired_count, st.free_count,
1525 st.scan_count, st.help_scan_count
1532 CDS_EXPORT_API static stat const& postmortem_statistics();
1535 }} // namespace cds::gc
1537 #endif // #ifndef CDSLIB_GC_DHP_SMR_H