2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_DHP_SMR_H
32 #define CDSLIB_GC_DHP_SMR_H
35 #include <cds/gc/details/hp_common.h>
36 #include <cds/intrusive/free_list_selector.h>
37 #include <cds/details/throw_exception.h>
38 #include <cds/details/static_functor.h>
39 #include <cds/details/marked_ptr.h>
40 #include <cds/user_setup/cache_line.h>
42 namespace cds { namespace gc {
44 /// Dynamic (adaptive) Hazard Pointer implementation details
46 using namespace cds::gc::hp::common;
48 /// Exception "Dynamic Hazard Pointer SMR is not initialized"
49 class not_initialized: public std::runtime_error
54 : std::runtime_error( "Global DHP SMR object is not initialized" )
60 struct guard_block: public cds::intrusive::FreeListImpl::node
62 guard_block* next_; // next block in the thread list
70 return reinterpret_cast<guard*>( this + 1 );
76 /// \p guard_block allocator (global object)
81 static hp_allocator& instance();
83 CDS_EXPORT_API guard_block* alloc();
84 void free( guard_block* block )
86 free_list_.put( block );
92 CDS_EXPORT_API ~hp_allocator();
95 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
100 /// Per-thread hazard pointer storage
101 class thread_hp_storage
105 thread_hp_storage( guard* arr, size_t nSize ) CDS_NOEXCEPT
107 , extended_list_( nullptr )
109 , initial_capacity_( nSize )
112 new( arr ) guard[nSize];
115 thread_hp_storage() = delete;
116 thread_hp_storage( thread_hp_storage const& ) = delete;
117 thread_hp_storage( thread_hp_storage&& ) = delete;
126 if ( cds_unlikely( free_head_ == nullptr )) {
128 assert( free_head_ != nullptr );
131 guard* g = free_head_;
132 free_head_ = g->next_;
136 void free( guard* g ) CDS_NOEXCEPT
140 g->next_ = free_head_;
145 template< size_t Capacity>
146 size_t alloc( guard_array<Capacity>& arr )
148 for ( size_t i = 0; i < Capacity; ++i ) {
149 if ( cds_unlikely( free_head_ == nullptr ))
151 arr.reset( i, free_head_ );
152 free_head_ = free_head_->next_;
157 template <size_t Capacity>
158 void free( guard_array<Capacity>& arr ) CDS_NOEXCEPT
160 guard* gList = free_head_;
161 for ( size_t i = 0; i < Capacity; ++i ) {
175 for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur )
178 // free all extended blocks
179 hp_allocator& alloc = hp_allocator::instance();
180 for ( guard_block* p = extended_list_; p; ) {
181 guard_block* next = p->next_;
186 extended_list_ = nullptr;
191 assert( extended_list_ == nullptr );
194 for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p )
203 assert( free_head_ == nullptr );
205 guard_block* block = hp_allocator::instance().alloc();
206 block->next_ = extended_list_;
207 extended_list_ = block;
208 free_head_ = block->first();
212 guard* free_head_; ///< Head of free guard list
213 guard_block* extended_list_; ///< Head of extended guard blocks allocated for the thread
214 guard* const array_; ///< initial HP array
215 size_t const initial_capacity_; ///< Capacity of \p array_
220 struct retired_block: public cds::intrusive::FreeListImpl::node
222 retired_block* next_; ///< Next block in thread-private retired array
224 static size_t const c_capacity = 256;
230 retired_ptr* first() const
232 return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
235 retired_ptr* last() const
237 return first() + c_capacity;
243 class retired_allocator
247 static retired_allocator& instance();
249 CDS_EXPORT_API retired_block* alloc();
250 void free( retired_block* block )
252 block->next_ = nullptr;
253 free_list_.put( block );
259 CDS_EXPORT_API ~retired_allocator();
262 cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
267 /// Per-thread retired array
272 retired_array() CDS_NOEXCEPT
273 : current_block_( nullptr )
274 , current_cell_( nullptr )
275 , list_head_( nullptr )
276 , list_tail_( nullptr )
280 retired_array( retired_array const& ) = delete;
281 retired_array( retired_array&& ) = delete;
289 bool push( retired_ptr const& p ) CDS_NOEXCEPT
291 assert( current_block_ != nullptr );
292 assert( current_block_->first() <= current_cell_ );
293 assert( current_cell_ < current_block_->last() );
294 //assert( &p != current_cell_ );
297 if ( ++current_cell_ == current_block_->last() ) {
298 // goto next block if exists
299 if ( current_block_->next_ ) {
300 current_block_ = current_block_->next_;
301 current_cell_ = current_block_->first();
306 // smr::scan() extend retired_array if needed
313 bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
315 bool ret = push( *p );
320 private: // called by smr
323 if ( list_head_ == nullptr ) {
324 retired_block* block = retired_allocator::instance().alloc();
325 assert( block->next_ == nullptr );
330 current_cell_ = block->first();
338 retired_allocator& alloc = retired_allocator::instance();
339 for ( retired_block* p = list_head_; p; ) {
340 retired_block* next = p->next_;
347 list_tail_ = nullptr;
348 current_cell_ = nullptr;
355 assert( list_head_ != nullptr );
356 assert( current_block_ == list_tail_ );
357 assert( current_cell_ == current_block_->last() );
359 retired_block* block = retired_allocator::instance().alloc();
360 assert( block->next_ == nullptr );
362 list_tail_ = list_tail_->next_ = block;
363 current_cell_ = block->first();
369 return current_block_ == nullptr
370 || ( current_block_ == list_head_ && current_cell_ == current_block_->first());
374 retired_block* current_block_;
375 retired_ptr* current_cell_; // in current_block_
377 retired_block* list_head_;
378 retired_block* list_tail_;
386 thread_hp_storage hazards_; ///< Hazard pointers private to the thread
387 retired_array retired_; ///< Retired data private to the thread
389 char pad1_[cds::c_nCacheLineSize];
390 atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
391 char pad2_[cds::c_nCacheLineSize];
393 // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
394 // cppcheck-suppress uninitMemberVar
395 thread_data( guard* guards, size_t guard_count )
396 : hazards_( guards, guard_count )
400 thread_data() = delete;
401 thread_data( thread_data const& ) = delete;
402 thread_data( thread_data&& ) = delete;
406 sync_.fetch_add( 1, atomics::memory_order_acq_rel );
412 // Dynmic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation)
415 struct thread_record;
418 /// Returns the instance of Hazard Pointer \ref smr
419 static smr& instance()
421 # ifdef CDS_DISABLE_SMR_EXCEPTION
422 assert( instance_ != nullptr );
425 CDS_THROW_EXCEPTION( not_initialized() );
430 /// Creates Dynamic Hazard Pointer SMR singleton
432 Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance.
433 Otherwise it does nothing.
435 The Michael's HP reclamation schema depends of three parameters:
436 - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
437 the data structure algorithms. By default, if \p nHazardPtrCount = 0,
438 the function uses maximum of HP count for CDS library
439 - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
440 - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
441 <tt> nHazardPtrCount * nMaxThreadCount </tt>
442 Default is <tt>2 * nHazardPtrCount * nMaxThreadCount</tt>
444 static CDS_EXPORT_API void construct(
445 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
448 // for back-copatibility
449 static void Construct(
450 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
453 construct( nInitialHazardPtrCount );
456 /// Destroys global instance of \ref smr
458 The parameter \p bDetachAll should be used carefully: if its value is \p true,
459 then the object destroyed automatically detaches all attached threads. This feature
460 can be useful when you have no control over the thread termination, for example,
461 when \p libcds is injected into existing external thread.
463 static CDS_EXPORT_API void destruct(
464 bool bDetachAll = false ///< Detach all threads
467 // for back-compatibility
468 static void Destruct(
469 bool bDetachAll = false ///< Detach all threads
472 destruct( bDetachAll );
475 /// Checks if global SMR object is constructed and may be used
476 static bool isUsed() CDS_NOEXCEPT
478 return instance_ != nullptr;
481 /// Set memory management functions
483 @note This function may be called <b>BEFORE</b> creating an instance
484 of Dynamic Hazard Pointer SMR
486 SMR object allocates some memory for thread-specific data and for
488 By default, a standard \p new and \p delete operators are used for this.
490 static CDS_EXPORT_API void set_memory_allocator(
491 void* ( *alloc_func )( size_t size ),
492 void( *free_func )( void * p )
495 /// Returns thread-local data for the current thread
496 static CDS_EXPORT_API thread_data* tls();
498 static CDS_EXPORT_API void attach_thread();
499 static CDS_EXPORT_API void detach_thread();
501 public: // for internal use only
502 /// The main garbage collecting function
503 CDS_EXPORT_API void scan( thread_data* pRec );
505 /// Helper scan routine
507 The function guarantees that every node that is eligible for reuse is eventually freed, barring
508 thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(),
509 where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
510 to thread's list of reclaimed pointers.
512 The function is called internally by \p scan().
514 CDS_EXPORT_API void help_scan( thread_data* pThis );
516 hp_allocator& get_hp_allocator()
518 return hp_allocator_;
521 retired_allocator& get_retired_allocator()
523 return retired_allocator_;
527 CDS_EXPORT_API explicit smr(
528 size_t nInitialHazardPtrCount
531 CDS_EXPORT_API ~smr();
533 CDS_EXPORT_API void detach_all_thread();
536 CDS_EXPORT_API thread_record* create_thread_data();
537 static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
539 /// Allocates Hazard Pointer SMR thread private data
540 CDS_EXPORT_API thread_record* alloc_thread_data();
542 /// Free HP SMR thread-private data
543 CDS_EXPORT_API void free_thread_data( thread_record* pRec );
546 static CDS_EXPORT_API smr* instance_;
548 atomics::atomic< thread_record*> thread_list_; ///< Head of thread list
549 size_t const initial_hazard_count_; ///< initial number of hazard pointers per thread
550 hp_allocator hp_allocator_;
551 retired_allocator retired_allocator_;
554 std::atomic<size_t> last_plist_size_; ///< HP array size in last scan() call
559 // for backward compatibility
560 typedef smr GarbageCollector;
564 inline hp_allocator& hp_allocator::instance()
566 return smr::instance().get_hp_allocator();
569 inline retired_allocator& retired_allocator::instance()
571 return smr::instance().get_retired_allocator();
578 /// Dynamic (adaptie) Hazard Pointer SMR
579 /** @ingroup cds_garbage_collector
581 Implementation of Dynamic (adaptive) Hazard Pointer SMR
584 - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
585 - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
586 - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
588 %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
590 See \ref cds_how_to_use "How to use" section for details how to apply SMR.
595 /// Native guarded pointer type
596 typedef void* guarded_pointer;
599 template <typename T> using atomic_ref = atomics::atomic<T *>;
603 @headerfile cds/gc/dhp.h
605 template <typename T> using atomic_type = atomics::atomic<T>;
607 /// Atomic marked pointer
608 template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
611 /// Dynamic Hazard Pointer guard
613 A guard is a hazard pointer.
614 Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
616 \p %Guard object is movable but not copyable.
618 The guard object can be in two states:
619 - unlinked - the guard is not linked with any internal hazard pointer.
620 In this state no operation except \p link() and move assignment is supported.
621 - linked (default) - the guard allocates an internal hazard pointer and fully operable.
623 Due to performance reason the implementation does not check state of the guard in runtime.
625 @warning Move assignment can transfer the guard in unlinked state, use with care.
630 /// Default ctor allocates a guard (hazard pointer) from thread-private storage
632 : guard_( dhp::smr::tls()->hazards_.alloc() )
635 /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
636 explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
640 /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
641 Guard( Guard&& src ) CDS_NOEXCEPT
642 : guard_( src.guard_ )
644 src.guard_ = nullptr;
647 /// Move assignment: the internal guards are swapped between \p src and \p this
649 @warning \p src will become in unlinked state if \p this was unlinked on entry.
651 Guard& operator=( Guard&& src ) CDS_NOEXCEPT
653 std::swap( guard_, src.guard_ );
657 /// Copy ctor is prohibited - the guard is not copyable
658 Guard( Guard const& ) = delete;
660 /// Copy assignment is prohibited
661 Guard& operator=( Guard const& ) = delete;
663 /// Frees the internal hazard pointer if the guard is in linked state
669 /// Checks if the guard object linked with any internal hazard pointer
670 bool is_linked() const
672 return guard_ != nullptr;
675 /// Links the guard with internal hazard pointer if the guard is in unlinked state
679 guard_ = dhp::smr::tls()->hazards_.alloc();
682 /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
686 dhp::smr::tls()->hazards_.free( guard_ );
691 /// Protects a pointer of type <tt> atomic<T*> </tt>
693 Return the value of \p toGuard
695 The function tries to load \p toGuard and to store it
696 to the HP slot repeatedly until the guard's value equals \p toGuard
698 template <typename T>
699 T protect( atomics::atomic<T> const& toGuard )
701 assert( guard_ != nullptr );
703 T pCur = toGuard.load(atomics::memory_order_acquire);
706 pRet = assign( pCur );
707 pCur = toGuard.load(atomics::memory_order_acquire);
708 } while ( pRet != pCur );
712 /// Protects a converted pointer of type <tt> atomic<T*> </tt>
714 Return the value of \p toGuard
716 The function tries to load \p toGuard and to store result of \p f functor
717 to the HP slot repeatedly until the guard's value equals \p toGuard.
719 The function is useful for intrusive containers when \p toGuard is a node pointer
720 that should be converted to a pointer to the value type before guarding.
721 The parameter \p f of type Func is a functor that makes this conversion:
724 value_type * operator()( T * p );
727 Really, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
729 template <typename T, class Func>
730 T protect( atomics::atomic<T> const& toGuard, Func f )
732 assert( guard_ != nullptr );
734 T pCur = toGuard.load(atomics::memory_order_acquire);
739 pCur = toGuard.load(atomics::memory_order_acquire);
740 } while ( pRet != pCur );
744 /// Store \p p to the guard
746 The function is just an assignment, no loop is performed.
747 Can be used for a pointer that cannot be changed concurrently
748 or for already guarded pointer.
750 template <typename T>
753 assert( guard_ != nullptr );
756 dhp::smr::tls()->sync();
761 std::nullptr_t assign( std::nullptr_t )
763 assert( guard_ != nullptr );
770 /// Store marked pointer \p p to the guard
772 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
773 Can be used for a marked pointer that cannot be changed concurrently
774 or for already guarded pointer.
776 template <typename T, int BITMASK>
777 T* assign( cds::details::marked_ptr<T, BITMASK> p )
779 return assign( p.ptr());
782 /// Copy from \p src guard to \p this guard
783 void copy( Guard const& src )
785 assign( src.get_native());
788 /// Clears value of the guard
791 assert( guard_ != nullptr );
796 /// Gets the value currently protected (relaxed read)
797 template <typename T>
800 assert( guard_ != nullptr );
801 return guard_->get_as<T>();
804 /// Gets native guarded pointer stored
805 void* get_native() const
807 assert( guard_ != nullptr );
808 return guard_->get();
812 dhp::guard* release()
814 dhp::guard* g = guard_;
819 dhp::guard*& guard_ref()
831 /// Array of Dynamic Hazard Pointer guards
833 The class is intended for allocating an array of hazard pointer guards.
834 Template parameter \p Count defines the size of the array.
836 A \p %GuardArray object is not copy- and move-constructible
837 and not copy- and move-assignable.
839 template <size_t Count>
843 /// Rebind array for other size \p OtherCount
844 template <size_t OtherCount>
846 typedef GuardArray<OtherCount> other ; ///< rebinding result
850 static CDS_CONSTEXPR const size_t c_nCapacity = Count;
853 /// Default ctor allocates \p Count hazard pointers
856 dhp::smr::tls()->hazards_.alloc( guards_ );
859 /// Move ctor is prohibited
860 GuardArray( GuardArray&& ) = delete;
862 /// Move assignment is prohibited
863 GuardArray& operator=( GuardArray&& ) = delete;
865 /// Copy ctor is prohibited
866 GuardArray( GuardArray const& ) = delete;
868 /// Copy assignment is prohibited
869 GuardArray& operator=( GuardArray const& ) = delete;
871 /// Frees allocated hazard pointers
874 dhp::smr::tls()->hazards_.free( guards_ );
877 /// Protects a pointer of type \p atomic<T*>
879 Return the value of \p toGuard
881 The function tries to load \p toGuard and to store it
882 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
884 template <typename T>
885 T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
887 assert( nIndex < capacity() );
891 pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire));
892 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
897 /// Protects a pointer of type \p atomic<T*>
899 Return the value of \p toGuard
901 The function tries to load \p toGuard and to store it
902 to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
904 The function is useful for intrusive containers when \p toGuard is a node pointer
905 that should be converted to a pointer to the value type before guarding.
906 The parameter \p f of type Func is a functor to make that conversion:
909 value_type * operator()( T * p );
912 Actually, the result of <tt> f( toGuard.load()) </tt> is assigned to the hazard pointer.
914 template <typename T, class Func>
915 T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
917 assert( nIndex < capacity() );
921 assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire)));
922 } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
927 /// Store \p p to the slot \p nIndex
929 The function is just an assignment, no loop is performed.
931 template <typename T>
932 T * assign( size_t nIndex, T * p )
934 assert( nIndex < capacity() );
936 guards_.set( nIndex, p );
937 dhp::smr::tls()->sync();
941 /// Store marked pointer \p p to the guard
943 The function is just an assignment of <tt>p.ptr()</tt>, no loop is performed.
944 Can be used for a marked pointer that cannot be changed concurrently
945 or for already guarded pointer.
947 template <typename T, int Bitmask>
948 T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
950 return assign( nIndex, p.ptr());
953 /// Copy guarded value from \p src guard to slot at index \p nIndex
954 void copy( size_t nIndex, Guard const& src )
956 assign( nIndex, src.get_native());
959 /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
960 void copy( size_t nDestIndex, size_t nSrcIndex )
962 assign( nDestIndex, get_native( nSrcIndex ));
965 /// Clear value of the slot \p nIndex
966 void clear( size_t nIndex )
968 guards_.clear( nIndex );
971 /// Get current value of slot \p nIndex
972 template <typename T>
973 T * get( size_t nIndex ) const
975 assert( nIndex < capacity() );
976 return guards_[nIndex]->template get_as<T>();
979 /// Get native guarded pointer stored
980 guarded_pointer get_native( size_t nIndex ) const
982 assert( nIndex < capacity() );
983 return guards_[nIndex]->get();
987 dhp::guard* release( size_t nIndex ) CDS_NOEXCEPT
989 return guards_.release( nIndex );
993 /// Capacity of the guard array
994 static CDS_CONSTEXPR size_t capacity()
1001 dhp::guard_array<c_nCapacity> guards_;
1007 A guarded pointer is a pair of a pointer and GC's guard.
1008 Usually, it is used for returning a pointer to the item from an lock-free container.
1009 The guard prevents the pointer to be early disposed (freed) by GC.
1010 After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time.
1013 - \p GuardedType - a type which the guard stores
1014 - \p ValueType - a value type
1015 - \p Cast - a functor for converting <tt>GuardedType*</tt> to <tt>ValueType*</tt>. Default is \p void (no casting).
1017 For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed.
1018 In such case the \p %guarded_ptr is:
1020 typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr;
1023 For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed.
1031 struct value_accessor {
1032 std::string* operator()( foo* pFoo ) const
1034 return &(pFoo->value);
1039 typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr;
1042 You don't need use this class directly.
1043 All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor.
1045 template <typename GuardedType, typename ValueType=GuardedType, typename Cast=void >
1049 struct trivial_cast {
1050 ValueType * operator()( GuardedType * p ) const
1056 template <typename GT, typename VT, typename C> friend class guarded_ptr;
1060 typedef GuardedType guarded_type; ///< Guarded type
1061 typedef ValueType value_type; ///< Value type
1063 /// Functor for casting \p guarded_type to \p value_type
1064 typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
1067 /// Creates empty guarded pointer
1068 guarded_ptr() CDS_NOEXCEPT
1073 explicit guarded_ptr( dhp::guard* g ) CDS_NOEXCEPT
1077 /// Initializes guarded pointer with \p p
1078 explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
1083 explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
1089 guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
1090 : guard_( gp.guard_ )
1092 gp.guard_ = nullptr;
1096 template <typename GT, typename VT, typename C>
1097 guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
1098 : guard_( gp.guard_ )
1100 gp.guard_ = nullptr;
1103 /// Ctor from \p Guard
1104 explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
1105 : guard_( g.release())
1108 /// The guarded pointer is not copy-constructible
1109 guarded_ptr( guarded_ptr const& gp ) = delete;
1111 /// Clears the guarded pointer
1113 \ref release is called if guarded pointer is not \ref empty
1115 ~guarded_ptr() CDS_NOEXCEPT
1120 /// Move-assignment operator
1121 guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
1123 std::swap( guard_, gp.guard_ );
1127 /// Move-assignment from \p Guard
1128 guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
1130 std::swap( guard_, g.guard_ref());
1134 /// The guarded pointer is not copy-assignable
1135 guarded_ptr& operator=(guarded_ptr const& gp) = delete;
1137 /// Returns a pointer to guarded value
1138 value_type * operator ->() const CDS_NOEXCEPT
1141 return value_cast()( guard_->get_as<guarded_type>() );
1144 /// Returns a reference to guarded value
1145 value_type& operator *() CDS_NOEXCEPT
1148 return *value_cast()( guard_->get_as<guarded_type>() );
1151 /// Returns const reference to guarded value
1152 value_type const& operator *() const CDS_NOEXCEPT
1155 return *value_cast()(reinterpret_cast<guarded_type *>(guard_->get()));
1158 /// Checks if the guarded pointer is \p nullptr
1159 bool empty() const CDS_NOEXCEPT
1161 return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr;
1164 /// \p bool operator returns <tt>!empty()</tt>
1165 explicit operator bool() const CDS_NOEXCEPT
1170 /// Clears guarded pointer
1172 If the guarded pointer has been released, the pointer can be disposed (freed) at any time.
1173 Dereferncing the guarded pointer after \p release() is dangerous.
1175 void release() CDS_NOEXCEPT
1181 // For internal use only!!!
1182 void reset(guarded_type * p) CDS_NOEXCEPT
1196 guard_ = dhp::smr::tls()->hazards_.alloc();
1202 dhp::smr::tls()->hazards_.free( guard_ );
1215 /// Initializes %DHP memory manager singleton
1217 Constructor creates and initializes %DHP global object.
1218 %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually,
1219 it is created in the beginning of \p main() function.
1220 After creating of global object you may use CDS data structures based on \p %cds::gc::DHP.
1222 \p nInitialThreadGuardCount - initial count of guard allocated for each thread.
1223 When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool.
1224 By perforce the local thread's guard pool is grown automatically from common pool.
1225 When the thread terminated its guard pool is backed to common GC's pool.
1228 size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
1231 dhp::smr::construct( nInitialHazardPtrCount );
1234 /// Destroys %DHP memory manager
1236 The destructor destroys %DHP global object. After calling of this function you may \b NOT
1237 use CDS data structures based on \p %cds::gc::DHP.
1238 Usually, %DHP object is destroyed at the end of your \p main().
1242 dhp::GarbageCollector::destruct( true );
1245 /// Checks if count of hazard pointer is no less than \p nCountNeeded
1247 The function always returns \p true since the guard count is unlimited for
1248 \p %gc::DHP garbage collector.
1250 static CDS_CONSTEXPR bool check_available_guards(
1251 #ifdef CDS_DOXYGEN_INVOKED
1252 size_t nCountNeeded,
1261 /// Set memory management functions
1263 @note This function may be called <b>BEFORE</b> creating an instance
1264 of Dynamic Hazard Pointer SMR
1266 SMR object allocates some memory for thread-specific data and for creating SMR object.
1267 By default, a standard \p new and \p delete operators are used for this.
1269 static void set_memory_allocator(
1270 void* ( *alloc_func )( size_t size ), ///< \p malloc() function
1271 void( *free_func )( void * p ) ///< \p free() function
1274 dhp::smr::set_memory_allocator( alloc_func, free_func );
1277 /// Retire pointer \p p with function \p pFunc
1279 The function places pointer \p p to array of pointers ready for removing.
1280 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1281 \p func is a disposer: when \p p can be safely removed, \p func is called.
1283 template <typename T>
1284 static void retire( T * p, void (* func)(T *))
1286 dhp::thread_data* rec = dhp::smr::tls();
1287 if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
1288 dhp::smr::instance().scan( rec );
1291 /// Retire pointer \p p with functor of type \p Disposer
1293 The function places pointer \p p to array of pointers ready for removing.
1294 (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
1296 Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
1298 template <typename T>
1300 void operator()( T * p ) ; // disposing operator
1303 Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type:
1304 - it should be stateless functor
1305 - it should be default-constructible
1306 - the result of functor call with argument \p p should not depend on where the functor will be called.
1309 Operator \p delete functor:
1311 template <typename T>
1313 void operator ()( T * p ) {
1318 // How to call HP::retire method
1321 // ... use p in lock-free manner
1323 cds::gc::DHP::retire<disposer>( p ) ; // place p to retired pointer array of DHP SMR
1326 Functor based on \p std::allocator :
1328 template <typename Alloc = std::allocator<int> >
1330 template <typename T>
1331 void operator()( T * p ) {
1332 typedef typename Alloc::templare rebind<T>::other alloc_t;
1335 a.deallocate( p, 1 );
1340 template <class Disposer, typename T>
1341 static void retire( T * p )
1343 if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor<Disposer, T>::call )))
1347 /// Checks if Dynamic Hazard Pointer GC is constructed and may be used
1348 static bool isUsed()
1350 return dhp::smr::isUsed();
1353 /// Forced GC cycle call for current thread
1355 Usually, this function should not be called directly.
1359 dhp::smr::instance().scan( dhp::smr::tls() );
1362 /// Synonym for \p scan()
1363 static void force_dispose()
1369 }} // namespace cds::gc
1371 #endif // #ifndef CDSLIB_GC_DHP_SMR_H