Replace cds::lock::scoped_lock with std::unique_lock, remove cds/lock/scoped_lock.h
[libcds.git] / cds / urcu / details / sig_threaded.h
index 57da1681035dc47cb8e0bc52e84132a50e95fe20..2f5db2e215edcdc8453ea8f4f781c0d2e4ff58a7 100644 (file)
@@ -3,11 +3,12 @@
 #ifndef _CDS_URCU_DETAILS_SIG_THREADED_H
 #define _CDS_URCU_DETAILS_SIG_THREADED_H
 
+#include <mutex>    //unique_lock
 #include <cds/urcu/details/sh.h>
 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
 
 #include <cds/urcu/dispose_thread.h>
-#include <cds/backoff_strategy.h>
+#include <cds/algo/backoff_strategy.h>
 #include <cds/container/vyukov_mpmc_cycle_queue.h>
 
 namespace cds { namespace urcu {
@@ -43,7 +44,7 @@ namespace cds { namespace urcu {
             epoch_retired_ptr
             ,cds::opt::buffer< cds::opt::v::dynamic_buffer< epoch_retired_ptr > >
         >
-        ,class Lock = cds_std::mutex
+        ,class Lock = std::mutex
         ,class DisposerThread = dispose_thread<Buffer>
         ,class Backoff = cds::backoff::Default
     >
@@ -79,7 +80,7 @@ namespace cds { namespace urcu {
     protected:
         //@cond
         buffer_type                     m_Buffer;
-        CDS_ATOMIC::atomic<uint64_t>    m_nCurEpoch;
+        atomics::atomic<uint64_t>    m_nCurEpoch;
         lock_type                       m_Lock;
         size_t const                    m_nCapacity;
         disposer_thread                 m_DisposerThread;
@@ -94,7 +95,7 @@ namespace cds { namespace urcu {
         /// Checks if the singleton is created and ready to use
         static bool isUsed()
         {
-            return singleton_ptr::s_pRCU != null_ptr<singleton_vtbl *>();
+            return singleton_ptr::s_pRCU != nullptr;
         }
 
     protected:
@@ -151,10 +152,10 @@ namespace cds { namespace urcu {
                 if ( bDetachAll )
                     pThis->m_ThreadList.detach_all();
 
-                pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
+                pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire ));
 
                 delete pThis;
-                singleton_ptr::s_pRCU = null_ptr<singleton_vtbl *>();
+                singleton_ptr::s_pRCU = nullptr;
             }
         }
 
@@ -169,7 +170,7 @@ namespace cds { namespace urcu {
         virtual void retire_ptr( retired_ptr& p )
         {
             if ( p.m_p ) {
-                epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) );
+                epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) );
                 push_buffer( ep );
             }
         }
@@ -178,7 +179,7 @@ namespace cds { namespace urcu {
         template <typename ForwardIterator>
         void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
         {
-            uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+            uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
             while ( itFirst != itLast ) {
                 epoch_retired_ptr p( *itFirst, nEpoch );
                 ++itFirst;
@@ -195,11 +196,11 @@ namespace cds { namespace urcu {
         //@cond
         void synchronize( bool bSync )
         {
-            uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release );
+            uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release );
 
-            CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+            atomics::atomic_thread_fence( atomics::memory_order_acquire );
             {
-                cds::lock::scoped_lock<lock_type> sl( m_Lock );
+                std::unique_lock<lock_type> sl( m_Lock );
 
                 back_off bkOff;
                 base_class::force_membar_all_threads( bkOff );