#define CDSLIB_URCU_DETAILS_GPB_H
#include <mutex>
+#include <limits>
#include <cds/urcu/details/gp.h>
#include <cds/algo/backoff_strategy.h>
#include <cds/container/vyukov_mpmc_cycle_queue.h>
~general_buffered()
{
- clear_buffer( (uint64_t) -1 );
+ clear_buffer( std::numeric_limits< uint64_t >::max());
}
void flip_and_wait()
static void Destruct( bool bDetachAll = false )
{
if ( isUsed() ) {
- instance()->clear_buffer( (uint64_t) -1 );
+ instance()->clear_buffer( std::numeric_limits< uint64_t >::max());
if ( bDetachAll )
instance()->m_ThreadList.detach_all();
delete instance();
#define CDSLIB_URCU_DETAILS_GPT_H
#include <mutex> //unique_lock
+#include <limits>
#include <cds/urcu/details/gp.h>
#include <cds/urcu/dispose_thread.h>
#include <cds/algo/backoff_strategy.h>
protected:
//@cond
- buffer_type m_Buffer;
- atomics::atomic<uint64_t> m_nCurEpoch;
- lock_type m_Lock;
- size_t const m_nCapacity;
- disposer_thread m_DisposerThread;
+ buffer_type m_Buffer;
+ atomics::atomic<uint64_t> m_nCurEpoch;
+ lock_type m_Lock;
+ size_t const m_nCapacity;
+ disposer_thread m_DisposerThread;
//@endcond
public:
if ( bDetachAll )
pThis->m_ThreadList.detach_all();
- pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire ));
+ pThis->m_DisposerThread.stop( pThis->m_Buffer, std::numeric_limits< uint64_t >::max());
delete pThis;
singleton_ptr::s_pRCU = nullptr;
template <typename ForwardIterator>
void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
{
- uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_acquire );
while ( itFirst != itLast ) {
epoch_retired_ptr ep( *itFirst, nEpoch );
++itFirst;
template <typename Func>
void batch_retire( Func e )
{
- uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_acquire );
for ( retired_ptr p{ e() }; p.m_p; ) {
epoch_retired_ptr ep( p, nEpoch );
p = e();
{
uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release );
- atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
std::unique_lock<lock_type> sl( m_Lock );
flip_and_wait();
flip_and_wait();
-
- m_DisposerThread.dispose( m_Buffer, nPrevEpoch, bSync );
}
- atomics::atomic_thread_fence( atomics::memory_order_release );
+
+ m_DisposerThread.dispose( m_Buffer, nPrevEpoch, bSync );
}
void force_dispose()
{
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
#include <mutex>
+#include <limits>
#include <cds/algo/backoff_strategy.h>
#include <cds/container/vyukov_mpmc_cycle_queue.h>
~signal_buffered()
{
- clear_buffer( (uint64_t) -1 );
+ clear_buffer( std::numeric_limits< uint64_t >::max() );
}
void clear_buffer( uint64_t nEpoch )
static void Destruct( bool bDetachAll = false )
{
if ( isUsed() ) {
- instance()->clear_buffer( (uint64_t) -1 );
+ instance()->clear_buffer( std::numeric_limits< uint64_t >::max());
if ( bDetachAll )
instance()->m_ThreadList.detach_all();
delete instance();
#ifndef CDSLIB_URCU_DETAILS_SIG_THREADED_H
#define CDSLIB_URCU_DETAILS_SIG_THREADED_H
-#include <mutex> //unique_lock
#include <cds/urcu/details/sh.h>
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+#include <mutex> //unique_lock
+#include <limits>
#include <cds/urcu/dispose_thread.h>
#include <cds/algo/backoff_strategy.h>
#include <cds/container/vyukov_mpmc_cycle_queue.h>
if ( bDetachAll )
pThis->m_ThreadList.detach_all();
- pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire ));
+ pThis->m_DisposerThread.stop( pThis->m_Buffer, std::numeric_limits< uint64_t >::max());
delete pThis;
singleton_ptr::s_pRCU = nullptr;
uint64_t nCurEpoch;
bool bQuit = false;
+ epoch_retired_ptr rest;
+
while ( !bQuit ) {
{
unique_lock lock( m_Mutex );
m_pBuffer = nullptr;
}
+ if ( rest.m_p ) {
+ assert( rest.m_nEpoch < nCurEpoch );
+ rest.free();
+ }
+
if ( pBuffer )
- dispose_buffer( pBuffer, nCurEpoch );
+ rest = dispose_buffer( pBuffer, nCurEpoch );
}
}
- void dispose_buffer( buffer_type * pBuf, uint64_t nCurEpoch )
+ epoch_retired_ptr dispose_buffer( buffer_type * pBuf, uint64_t nCurEpoch )
{
epoch_retired_ptr p;
- while ( pBuf->pop( p ) ) {
+ while ( pBuf->pop( p )) {
if ( p.m_nEpoch <= nCurEpoch ) {
p.free();
}
else {
- pBuf->push( p );
+ if ( !pBuf->push( p ))
+ return p;
break;
}
}
+ return epoch_retired_ptr();
}
//@endcond