#ifndef _CDS_URCU_DETAILS_SIG_THREADED_H
#define _CDS_URCU_DETAILS_SIG_THREADED_H
+#include <mutex> //unique_lock
#include <cds/urcu/details/sh.h>
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
#include <cds/urcu/dispose_thread.h>
-#include <cds/backoff_strategy.h>
+#include <cds/algo/backoff_strategy.h>
#include <cds/container/vyukov_mpmc_cycle_queue.h>
namespace cds { namespace urcu {
epoch_retired_ptr
,cds::opt::buffer< cds::opt::v::dynamic_buffer< epoch_retired_ptr > >
>
- ,class Lock = cds_std::mutex
+ ,class Lock = std::mutex
,class DisposerThread = dispose_thread<Buffer>
,class Backoff = cds::backoff::Default
>
protected:
//@cond
buffer_type m_Buffer;
- CDS_ATOMIC::atomic<uint64_t> m_nCurEpoch;
+ atomics::atomic<uint64_t> m_nCurEpoch;
lock_type m_Lock;
size_t const m_nCapacity;
disposer_thread m_DisposerThread;
/// Checks if the singleton is created and ready to use
static bool isUsed()
{
- return singleton_ptr::s_pRCU != null_ptr<singleton_vtbl *>();
+ return singleton_ptr::s_pRCU != nullptr;
}
protected:
if ( bDetachAll )
pThis->m_ThreadList.detach_all();
- pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
+ pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire ));
delete pThis;
- singleton_ptr::s_pRCU = null_ptr<singleton_vtbl *>();
+ singleton_ptr::s_pRCU = nullptr;
}
}
virtual void retire_ptr( retired_ptr& p )
{
if ( p.m_p ) {
- epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) );
+ epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) );
push_buffer( ep );
}
}
template <typename ForwardIterator>
void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
{
- uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
epoch_retired_ptr p( *itFirst, nEpoch );
++itFirst;
//@cond
void synchronize( bool bSync )
{
- uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release );
+ uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+ atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
- cds::lock::scoped_lock<lock_type> sl( m_Lock );
+ std::unique_lock<lock_type> sl( m_Lock );
back_off bkOff;
base_class::force_membar_all_threads( bkOff );