Fix typos, no functional changes
[libcds.git] / cds / gc / details / dhp.h
index 70e70c5d070cf0fadc6f8764f2a9ab8c4a76e94d..4491bd4d6b5477115eea163d1d12ed2646e8df00 100644 (file)
@@ -1,14 +1,14 @@
 //$$CDS-header$$
 
-#ifndef __CDS_GC_DETAILS_DHP_H
-#define __CDS_GC_DETAILS_DHP_H
+#ifndef CDSLIB_GC_DETAILS_DHP_H
+#define CDSLIB_GC_DETAILS_DHP_H
 
 #include <mutex>        // unique_lock
-#include <cds/cxx11_atomic.h>
+#include <cds/algo/atomic.h>
 #include <cds/gc/details/retired_ptr.h>
 #include <cds/details/aligned_allocator.h>
 #include <cds/details/allocator.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
 
 #if CDS_COMPILER == CDS_COMPILER_MSVC
 #   pragma warning(push)
@@ -63,15 +63,13 @@ namespace cds { namespace gc {
 
             /// Internal guard representation
             struct guard_data {
-                typedef retired_ptr_node *      handoff_ptr ;   ///< trapped value type
-                typedef void *  guarded_ptr  ;   ///< type of value guarded
+                typedef void * guarded_ptr;  ///< type of value guarded
 
-                atomics::atomic<guarded_ptr>         pPost   ;   ///< pointer guarded
+                atomics::atomic<guarded_ptr>  pPost;       ///< pointer guarded
+                atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
+                atomics::atomic<guard_data *> pNextFree;   ///< pointer to the next item in global or thread-local free-list
 
-                atomics::atomic<guard_data *>     pGlobalNext ;   ///< next item of global list of allocated guards
-                atomics::atomic<guard_data *>     pNextFree   ;   ///< pointer to the next item in global or thread-local free-list
-
-                guard_data *             pThreadNext ;   ///< next item of thread's local list of guards
+                guard_data * pThreadNext; ///< next item of thread's local list of guards
 
                 guard_data() CDS_NOEXCEPT
                     : pPost( nullptr )
@@ -98,9 +96,9 @@ namespace cds { namespace gc {
             {
                 cds::details::Allocator<details::guard_data>  m_GuardAllocator    ;   ///< guard allocator
 
-                atomics::atomic<guard_data *>    m_GuardList ;       ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
-                atomics::atomic<guard_data *>    m_FreeGuardList ;   ///< Head of free guard list (linked by guard_data::pNextFree field)
-                SpinLock                m_freeListLock  ;   ///< Access to m_FreeGuardList
+                atomics::atomic<guard_data *>  m_GuardList;     ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
+                atomics::atomic<guard_data *>  m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
+                cds::sync::spin                m_freeListLock;  ///< Access to m_FreeGuardList
 
                 /*
                     Unfortunately, access to the list of free guard is lock-based.
@@ -117,7 +115,7 @@ namespace cds { namespace gc {
                     details::guard_data * pGuard = m_GuardAllocator.New();
 
                     // Link guard to the list
-                    // m_GuardList is accumulated list and it cannot support concurrent deletion,
+                    // m_GuardList is an accumulating list and it cannot support concurrent deletion,
                     // so, ABA problem is impossible for it
                     details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
                     do {
@@ -153,7 +151,7 @@ namespace cds { namespace gc {
                     details::guard_data * pGuard;
 
                     {
-                        std::unique_lock<SpinLock> al( m_freeListLock );
+                        std::unique_lock<cds::sync::spin> al( m_freeListLock );
                         pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
                         if ( pGuard )
                             m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
@@ -173,7 +171,7 @@ namespace cds { namespace gc {
                 {
                     pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
 
-                    std::unique_lock<SpinLock> al( m_freeListLock );
+                    std::unique_lock<cds::sync::spin> al( m_freeListLock );
                     pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
                     m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
                 }
@@ -225,7 +223,7 @@ namespace cds { namespace gc {
                         pLast = p;
                     }
 
-                    std::unique_lock<SpinLock> al( m_freeListLock );
+                    std::unique_lock<cds::sync::spin> al( m_freeListLock );
                     pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
                     m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
                 }
@@ -249,7 +247,7 @@ namespace cds { namespace gc {
                 atomics::atomic<size_t>              m_nItemCount;   ///< buffer's item count
 
             public:
-                CDS_CONSTEXPR retired_ptr_buffer() CDS_NOEXCEPT
+                retired_ptr_buffer() CDS_NOEXCEPT
                     : m_pHead( nullptr )
                     , m_nItemCount(0)
                 {}
@@ -271,6 +269,21 @@ namespace cds { namespace gc {
                     return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
                 }
 
+                /// Pushes [pFirst, pLast] list linked by pNext field.
+                size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
+                {
+                    assert( pFirst );
+                    assert( pLast );
+
+                    retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
+                    do {
+                        pLast->m_pNext = pHead;
+                        // pHead is changed by compare_exchange_weak
+                    } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+
+                    return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
+                }
+
                 /// Result of \ref dhp_gc_privatve "privatize" function.
                 /**
                     The \p privatize function returns retired node list as \p first and the size of that list as \p second.
@@ -313,17 +326,17 @@ namespace cds { namespace gc {
 
                 /// Pool block
                 struct block {
-                    block *     pNext   ;   ///< next block
-                    item        items[m_nItemPerBlock]  ;   ///< item array
+                    block *     pNext;  ///< next block
+                    item        items[m_nItemPerBlock];   ///< item array
                 };
 
-                atomics::atomic<block *> m_pBlockListHead    ;   ///< head of of allocated block list
+                atomics::atomic<block *> m_pBlockListHead;   ///< head of of allocated block list
 
                 // To solve ABA problem we use epoch-based approach
-                static const unsigned int c_nEpochCount = 4     ;   ///< Max epoch count
-                atomics::atomic<unsigned int>    m_nCurEpoch ;   ///< Current epoch
-                atomics::atomic<item *>  m_pEpochFree[c_nEpochCount]  ;   ///< List of free item per epoch
-                atomics::atomic<item *>  m_pGlobalFreeHead   ;   ///< Head of unallocated item list
+                static const unsigned int c_nEpochCount = 4   ///< Max epoch count
+                atomics::atomic<unsigned int> m_nCurEpoch;      ///< Current epoch
+                atomics::atomic<item *>  m_pEpochFree[c_nEpochCount];   ///< List of free item per epoch
+                atomics::atomic<item *>  m_pGlobalFreeHead;     ///< Head of unallocated item list
 
                 cds::details::Allocator< block, Alloc > m_BlockAllocator    ;   ///< block allocator
 
@@ -340,7 +353,7 @@ namespace cds { namespace gc {
                         CDS_STRICT_DO( pItem->m_pNext = nullptr );
                     }
 
-                    // link new block to block list
+                    // links new block to the block list
                     {
                         block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
                         do {
@@ -349,7 +362,7 @@ namespace cds { namespace gc {
                         } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
                     }
 
-                    // link block's items to free list
+                    // links block's items to the free list
                     {
                         item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
                         do {
@@ -396,7 +409,7 @@ namespace cds { namespace gc {
                     m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
                 }
 
-                /// Allocates new retired pointer
+                /// Allocates the new retired pointer
                 retired_ptr_node&  alloc()
                 {
                     unsigned int nEpoch;
@@ -434,7 +447,7 @@ namespace cds { namespace gc {
                     return node;
                 }
 
-                /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
+                /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
                 /**
                     The list is linked on the m_pNextFree field
                 */
@@ -455,36 +468,45 @@ namespace cds { namespace gc {
             /// Uninitialized guard
             class guard
             {
-                friend class ThreadGC;
+                friend class dhp::ThreadGC;
             protected:
                 details::guard_data * m_pGuard ;    ///< Pointer to guard data
+
             public:
                 /// Initialize empty guard.
                 CDS_CONSTEXPR guard() CDS_NOEXCEPT
                     : m_pGuard( nullptr )
                 {}
 
-                /// The object is not copy-constructible
+                /// Copy-ctor is disabled
                 guard( guard const& ) = delete;
 
+                /// Move-ctor is disabled
+                guard( guard&& ) = delete;
+
                 /// Object destructor, does nothing
                 ~guard() CDS_NOEXCEPT
                 {}
 
+                /// Get current guarded pointer
+                void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
+                {
+                    assert( m_pGuard != nullptr );
+                    return m_pGuard->pPost.load( order );
+                }
+
                 /// Guards pointer \p p
-                void set( void * p ) CDS_NOEXCEPT
+                void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
                 {
                     assert( m_pGuard != nullptr );
-                    m_pGuard->pPost.store( p, atomics::memory_order_release );
-                    //CDS_COMPILER_RW_BARRIER;
+                    m_pGuard->pPost.store( p, order );
                 }
 
                 /// Clears the guard
-                void clear() CDS_NOEXCEPT
+                void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
                 {
                     assert( m_pGuard != nullptr );
-                    m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
-                    CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
+                    m_pGuard->pPost.store( nullptr, order );
                 }
 
                 /// Guards pointer \p p
@@ -503,10 +525,9 @@ namespace cds { namespace gc {
 
             public: // for ThreadGC.
                 /*
-                    GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
+                    GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
                     the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
                     despite the fact that ThreadGC is declared as friend for guard class.
-                    We should not like to declare m_pGuard member as public one.
                     Therefore, we have to add set_guard/get_guard public functions
                 */
                 /// Set guard data
@@ -526,6 +547,18 @@ namespace cds { namespace gc {
                 {
                     return m_pGuard;
                 }
+
+                details::guard_data * release_guard() CDS_NOEXCEPT
+                {
+                    details::guard_data * p = m_pGuard;
+                    m_pGuard = nullptr;
+                    return p;
+                }
+
+                bool is_initialized() const
+                {
+                    return m_pGuard != nullptr;
+                }
             };
 
         } // namespace details
@@ -539,20 +572,12 @@ namespace cds { namespace gc {
         {
             typedef details::guard    base_class;
             friend class ThreadGC;
-
-            ThreadGC&    m_gc    ;    ///< ThreadGC object of current thread
         public:
             /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
-            Guard( ThreadGC& gc ) CDS_NOEXCEPT;
+            Guard(); // inline in dhp_impl.h
 
             /// Returns guard allocated back to pool of free guards
-            ~Guard() CDS_NOEXCEPT;    // inline after GarbageCollector
-
-            /// Returns DHP GC object
-            ThreadGC& getGC() CDS_NOEXCEPT
-            {
-                return m_gc;
-            }
+            ~Guard();    // inline in dhp_impl.h
 
             /// Guards pointer \p p
             template <typename T>
@@ -576,7 +601,6 @@ namespace cds { namespace gc {
         class GuardArray
         {
             details::guard      m_arr[Count]    ;    ///< array of guard
-            ThreadGC&           m_gc    ;            ///< ThreadGC object of current thread
             const static size_t c_nCapacity = Count ;   ///< Array capacity (equal to \p Count template parameter)
 
         public:
@@ -588,16 +612,16 @@ namespace cds { namespace gc {
 
         public:
             /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
-            GuardArray( ThreadGC& gc ) CDS_NOEXCEPT;    // inline below
-
-            /// The object is not default-constructible
-            GuardArray() = delete;
+            GuardArray();    // inline in dhp_impl.h
 
             /// The object is not copy-constructible
             GuardArray( GuardArray const& ) = delete;
 
+            /// The object is not move-constructible
+            GuardArray( GuardArray&& ) = delete;
+
             /// Returns guards allocated back to pool
-            ~GuardArray() CDS_NOEXCEPT;    // inline below
+            ~GuardArray();    // inline in dh_impl.h
 
             /// Returns the capacity of array
             CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
@@ -605,12 +629,6 @@ namespace cds { namespace gc {
                 return c_nCapacity;
             }
 
-            /// Returns DHP ThreadGC object
-            ThreadGC& getGC() CDS_NOEXCEPT
-            {
-                return m_gc;
-            }
-
             /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
             details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
             {
@@ -668,7 +686,15 @@ namespace cds { namespace gc {
 
         public:
             /// Exception "No GarbageCollector object is created"
-            CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
+            class not_initialized : public std::runtime_error
+            {
+            public:
+                //@cond
+                not_initialized()
+                    : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
+                {}
+                //@endcond
+            };
 
             /// Internal GC statistics
             struct InternalState
@@ -676,6 +702,7 @@ namespace cds { namespace gc {
                 size_t m_nGuardCount       ;   ///< Total guard count
                 size_t m_nFreeGuardCount   ;   ///< Count of free guard
 
+                //@cond
                 InternalState()
                     : m_nGuardCount(0)
                     , m_nFreeGuardCount(0)
@@ -688,6 +715,7 @@ namespace cds { namespace gc {
 
                     return *this;
                 }
+                //@endcond
             };
 
         private:
@@ -736,12 +764,12 @@ namespace cds { namespace gc {
 
             /// Returns pointer to GarbageCollector instance
             /**
-                If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
+                If DHP GC is not initialized, \p not_initialized exception is thrown
             */
             static GarbageCollector&   instance()
             {
                 if ( m_pManager == nullptr )
-                    throw DHPManagerEmpty();
+                    throw not_initialized();
                 return *m_pManager;
             }
 
@@ -886,27 +914,32 @@ namespace cds { namespace gc {
 
         public:
             /// Initializes guard \p g
-            void allocGuard( Guard& g )
+            void allocGuard( dhp::details::guard& g )
             {
                 assert( m_pList != nullptr );
-                if ( m_pFree ) {
-                    g.m_pGuard = m_pFree;
-                    m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
-                }
-                else {
-                    g.m_pGuard = m_gc.allocGuard();
-                    g.m_pGuard->pThreadNext = m_pList;
-                    m_pList = g.m_pGuard;
+                if ( !g.m_pGuard ) {
+                    if ( m_pFree ) {
+                        g.m_pGuard = m_pFree;
+                        m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
+                    }
+                    else {
+                        g.m_pGuard = m_gc.allocGuard();
+                        g.m_pGuard->pThreadNext = m_pList;
+                        m_pList = g.m_pGuard;
+                    }
                 }
             }
 
             /// Frees guard \p g
-            void freeGuard( Guard& g )
+            void freeGuard( dhp::details::guard& g )
             {
                 assert( m_pList != nullptr );
-                g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
-                g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
-                m_pFree = g.m_pGuard;
+                if ( g.m_pGuard ) {
+                    g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+                    g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
+                    m_pFree = g.m_pGuard;
+                    g.m_pGuard = nullptr;
+                }
             }
 
             /// Initializes guard array \p arr
@@ -955,37 +988,12 @@ namespace cds { namespace gc {
                 m_gc.retirePtr( p, pFunc );
             }
 
+            /// Run retiring cycle
             void scan()
             {
                 m_gc.scan();
             }
         };
-
-        //////////////////////////////////////////////////////////
-        // Inlines
-
-        inline Guard::Guard(ThreadGC& gc)
-            : m_gc( gc )
-        {
-            getGC().allocGuard( *this );
-        }
-        inline Guard::~Guard()
-        {
-            getGC().freeGuard( *this );
-        }
-
-        template <size_t Count>
-        inline GuardArray<Count>::GuardArray( ThreadGC& gc )
-            : m_gc( gc )
-        {
-            getGC().allocGuard( *this );
-        }
-        template <size_t Count>
-        inline GuardArray<Count>::~GuardArray()
-        {
-            getGC().freeGuard( *this );
-        }
-
     }   // namespace dhp
 }}  // namespace cds::gc
 //@endcond
@@ -994,4 +1002,4 @@ namespace cds { namespace gc {
 #   pragma warning(pop)
 #endif
 
-#endif // #ifndef __CDS_GC_DETAILS_DHP_H
+#endif // #ifndef CDSLIB_GC_DETAILS_DHP_H