{
thread_list_node * m_pNext ; ///< next list record
ThreadGC * m_pOwner ; ///< Owner of record
- CDS_ATOMIC::atomic<cds::OS::ThreadId> m_idOwner ; ///< Id of thread owned; 0 - record is free
+ CDS_ATOMIC::atomic<std::thread::id> m_idOwner ; ///< Id of thread owned; 0 - record is free
bool m_bFree ; ///< Node is help-scanned
//@cond
: thread_descriptor( HzpMgr ),
m_pNext( nullptr ),
m_pOwner( nullptr ),
- m_idOwner( cds::OS::nullThreadId() ),
+ m_idOwner( std::thread::id() ),
m_bFree( false )
{}
~thread_list_node()
{
assert( m_pOwner == nullptr );
- assert( m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::nullThreadId() );
+ assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::thread::id() );
}
//@endcond
};
struct hplist_node: public details::HPRec
{
hplist_node * m_pNextNode ; ///< next hazard ptr record in list
- CDS_ATOMIC::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
+ CDS_ATOMIC::atomic<std::thread::id> m_idOwner; ///< Owner thread id; 0 - the record is free (not owned)
CDS_ATOMIC::atomic<bool> m_bFree ; ///< true if record if free (not owned)
//@cond
hplist_node( const GarbageCollector& HzpMgr )
: HPRec( HzpMgr ),
m_pNextNode(NULL),
- m_idOwner( OS::nullThreadId() ),
+ m_idOwner( std::thread::id() ),
m_bFree( true )
{}
~hplist_node()
{
- assert( m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == OS::nullThreadId() );
+ assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::thread::id() );
assert( m_bFree.load(CDS_ATOMIC::memory_order_relaxed) );
}
//@endcond
typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator;
typedef unsigned long long owner_t;
- typedef cds::OS::ThreadId threadId_t;
+ typedef std::thread::id threadId_t;
typedef cds::lock::Spin spinlock_type;
typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock;
void acquire( size_t const * arrHash, lock_array_ptr * pLockArr, lock_type ** parrLock )
{
- owner_t me = (owner_t) cds::OS::getCurrentThreadId();
+ owner_t me = (owner_t)std::this_thread::get_id();
owner_t who;
back_off bkoff;
void acquire_all()
{
- owner_t me = (owner_t) cds::OS::getCurrentThreadId();
+ owner_t me = (owner_t)std::this_thread::get_id();
back_off bkoff;
while ( true ) {
void acquire_resize( lock_array_ptr * pOldLocks )
{
- owner_t me = (owner_t) cds::OS::getCurrentThreadId();
+ owner_t me = (owner_t)std::this_thread::get_id();
while ( true ) {
{
protected:
//@cond
- typedef cds::OS::ThreadId tag_type;
+ typedef std::thread::id tag_type;
enum tag_value {
Available = -1,
*/
bool push( value_type& val )
{
- tag_type const curId = cds::OS::getCurrentThreadId();
+ tag_type const curId = std::this_thread::get_id();
// Insert new item at bottom of the heap
m_Lock.lock();
typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator;
typedef unsigned long long owner_t;
- typedef cds::OS::ThreadId threadId_t;
+ typedef std::thread::id threadId_t;
typedef cds::lock::Spin spinlock_type;
typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock;
lock_type& acquire( size_t nHash )
{
- owner_t me = (owner_t) cds::OS::getCurrentThreadId();
+ owner_t me = (owner_t) std::this_thread::get_id();
owner_t who;
back_off bkoff;
lock_array_ptr acquire_all()
{
- owner_t me = (owner_t) cds::OS::getCurrentThreadId();
+ owner_t me = (owner_t)std::this_thread::get_id();
owner_t who;
back_off bkoff;
bool acquire_resize()
{
- owner_t me = (owner_t) cds::OS::getCurrentThreadId();
+ owner_t me = (owner_t)std::this_thread::get_id();
back_off bkoff;
for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
class Spinlock
{
public:
- typedef Backoff backoff_strategy ; ///< back-off strategy type
+ typedef Backoff backoff_strategy; ///< back-off strategy type
private:
- CDS_ATOMIC::atomic<bool> m_spin ; ///< Spin
+ CDS_ATOMIC::atomic<bool> m_spin; ///< Spin
# ifdef CDS_DEBUG
- typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
+ typename std::thread::id m_dbgOwnerId; ///< Owner thread id (only for debug mode)
# endif
public:
/// Construct free (unlocked) spin-lock
Spinlock() CDS_NOEXCEPT
# ifdef CDS_DEBUG
- :m_dbgOwnerId( OS::nullThreadId() )
+ :m_dbgOwnerId( std::thread::id() )
# endif
{
m_spin.store( false, CDS_ATOMIC::memory_order_relaxed );
*/
Spinlock( bool bLocked ) CDS_NOEXCEPT
# ifdef CDS_DEBUG
- :m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::nullThreadId() )
+ :m_dbgOwnerId( bLocked ? std::this_thread::get_id() : std::thread::id() )
# endif
{
m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed );
Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
: m_spin( false )
# ifdef CDS_DEBUG
- , m_dbgOwnerId( OS::nullThreadId() )
+ , m_dbgOwnerId( std::thread::id() )
# endif
{}
CDS_DEBUG_DO(
if ( !bCurrent ) {
- m_dbgOwnerId = OS::getCurrentThreadId();
+ m_dbgOwnerId = std::this_thread::get_id();
}
)
return !bCurrent;
Backoff backoff;
// Deadlock detected
- assert( m_dbgOwnerId != OS::getCurrentThreadId() );
+ assert( m_dbgOwnerId != std::this_thread::get_id() );
// TATAS algorithm
while ( !tryLock() ) {
backoff();
}
}
- assert( m_dbgOwnerId == OS::getCurrentThreadId() );
+ assert( m_dbgOwnerId == std::this_thread::get_id() );
}
/// Unlock the spin-lock. Debug version: deadlock may be detected
{
assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
- assert( m_dbgOwnerId == OS::getCurrentThreadId() );
- CDS_DEBUG_DO( m_dbgOwnerId = OS::nullThreadId() ;)
+ assert( m_dbgOwnerId == std::this_thread::get_id() );
+ CDS_DEBUG_DO( m_dbgOwnerId = std::thread::id() );)
m_spin.store( false, CDS_ATOMIC::memory_order_release );
}
template <typename Integral, class Backoff>
class ReentrantSpinT
{
- typedef OS::ThreadId thread_id ; ///< The type of thread id
+ typedef std::thread::id thread_id; ///< The type of thread id
public:
typedef Integral integral_type ; ///< The integral type
private:
CDS_ATOMIC::atomic<integral_type> m_spin ; ///< spin-lock atomic
- thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::nullThreadId()
+ thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to std::thread::id()
private:
//@cond
void free() CDS_NOEXCEPT
{
- m_OwnerId = OS::nullThreadId();
+ m_OwnerId = std::thread::id();
}
bool isOwned( thread_id tid ) const CDS_NOEXCEPT
/// Default constructor initializes spin to free (unlocked) state
ReentrantSpinT() CDS_NOEXCEPT
: m_spin(0)
- , m_OwnerId( OS::nullThreadId() )
+ , m_OwnerId( std::thread::id() )
{}
/// Dummy copy constructor
*/
ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
: m_spin(0)
- , m_OwnerId( OS::nullThreadId() )
+ , m_OwnerId( std::thread::id() )
{}
/// Construct object for specified state
ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
: m_spin(0),
- m_OwnerId( OS::nullThreadId() )
+ m_OwnerId( std::thread::id() )
{
if ( bLocked )
lock();
*/
bool is_locked() const CDS_NOEXCEPT
{
- return !( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() ));
+ return !(m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( std::this_thread::get_id() ));
}
/// Try to lock the spin-lock (synonym for \ref try_lock)
bool tryLock() CDS_NOEXCEPT
{
- thread_id tid = OS::getCurrentThreadId();
+ thread_id tid = std::this_thread::get_id();
if ( tryLockOwned( tid ) )
return true;
if ( tryAcquireLock()) {
CDS_NOEXCEPT_( noexcept( tryAcquireLock(nTryCount) ))
# endif
{
- thread_id tid = OS::getCurrentThreadId();
+ thread_id tid = std::this_thread::get_id();
if ( tryLockOwned( tid ) )
return true;
if ( tryAcquireLock( nTryCount )) {
/// Lock the object waits if it is busy
void lock() CDS_NOEXCEPT
{
- thread_id tid = OS::getCurrentThreadId();
+ thread_id tid = std::this_thread::get_id();
if ( !tryLockOwned( tid ) ) {
acquireLock();
beOwner( tid );
/// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
bool unlock() CDS_NOEXCEPT
{
- if ( isOwned( OS::getCurrentThreadId() ) ) {
+ if ( isOwned( std::this_thread::get_id() ) ) {
integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed );
if ( n > 1 )
m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed );
}
/// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
- bool changeOwner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
+ bool changeOwner( std::thread::id newOwnerId ) CDS_NOEXCEPT
{
- if ( isOwned( OS::getCurrentThreadId() ) ) {
- assert( newOwnerId != OS::nullThreadId() );
+ if ( isOwned( std::this_thread::get_id() ) ) {
+ assert( newOwnerId != std::thread::id() );
m_OwnerId = newOwnerId;
return true;
}
namespace cds { namespace OS {
/// posix-related wrappers
namespace posix {
- /// Posix thread id type
- typedef pthread_t ThreadId;
-
- /// Null thread id constant
- CDS_CONSTEXPR static inline ThreadId nullThreadId() { return 0 ; }
-
- /// Get current thread id
- static inline ThreadId getCurrentThreadId() { return pthread_self() ; }
-
/// Checks if thread \p id is alive
- static inline bool isThreadAlive( ThreadId id )
+ static inline bool isThreadAlive( std::thread::id id )
{
// if sig is zero, error checking is performed but no signal is actually sent.
// ESRCH - No thread could be found corresponding to that specified by the given thread ID
} // namespace posix
- using posix::ThreadId;
-
- using posix::nullThreadId;
- using posix::getCurrentThreadId;
using posix::isThreadAlive;
using posix::yield;
using posix::backoff;
#ifndef __CDS_OS_THREAD_H
#define __CDS_OS_THREAD_H
+#include <thread>
#include <cds/details/defs.h>
#if CDS_OS_TYPE == CDS_OS_WIN32 || CDS_OS_TYPE == CDS_OS_WIN64 || CDS_OS_TYPE == CDS_OS_MINGW
/// Windows-specific functions
namespace Win32 {
- /// OS-specific type of thread identifier
- typedef DWORD ThreadId;
-
- /// OS-specific type of thread handle
- typedef HANDLE ThreadHandle;
-
- /// Get null thread id
- CDS_CONSTEXPR static inline ThreadId nullThreadId()
- {
- return 0;
- }
-
- /// Get current thread id
- static inline ThreadId getCurrentThreadId()
- {
- return ::GetCurrentThreadId();
- }
-
/// Tests whether the thread is alive
- static inline bool isThreadAlive( ThreadId id )
+ static inline bool isThreadAlive( std::thread::id id )
{
HANDLE h = ::OpenThread( SYNCHRONIZE, FALSE, id );
if ( h == NULL )
}
} // namespace Win32
- using Win32::ThreadId;
- using Win32::ThreadHandle;
-
- using Win32::nullThreadId;
- using Win32::getCurrentThreadId;
using Win32::isThreadAlive;
using Win32::yield;
using Win32::backoff;
template <typename ThreadData>
struct thread_list_record {
ThreadData * m_pNext ; ///< Next item in thread list
- CDS_ATOMIC::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
+ CDS_ATOMIC::atomic<std::thread::id> m_idOwner; ///< Owner thread id; 0 - the record is free (not owned)
thread_list_record()
: m_pNext( nullptr )
- , m_idOwner( cds::OS::nullThreadId() )
+ , m_idOwner( std::thread::id() )
{}
~thread_list_record()
thread_record * alloc()
{
thread_record * pRec;
- cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId();
- cds::OS::ThreadId const curThreadId = cds::OS::getCurrentThreadId();
+ std::thread::id const nullThreadId = std::thread::id();
+ std::thread::id const curThreadId = std::this_thread::get_id();
// First try to reuse a retired (non-active) HP record
for ( pRec = m_pHead.load( CDS_ATOMIC::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
- cds::OS::ThreadId thId = nullThreadId;
+ std::thread::id thId = nullThreadId;
if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
continue;
return pRec;
void retire( thread_record * pRec )
{
assert( pRec != nullptr );
- pRec->m_list.m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release );
+ pRec->m_list.m_idOwner.store( std::thread::id(), CDS_ATOMIC::memory_order_release );
}
void detach_all()
{
thread_record * pNext = nullptr;
- cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId();
+ std::thread::id const nullThreadId = std::thread::id();
for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) {
pNext = pRec->m_list.m_pNext;
void destroy()
{
allocator_type al;
- CDS_DEBUG_DO( cds::OS::ThreadId const nullThreadId = cds::OS::nullThreadId() ;)
- CDS_DEBUG_DO( cds::OS::ThreadId const mainThreadId = cds::OS::getCurrentThreadId() ;)
+ CDS_DEBUG_DO( std::thread::id const nullThreadId = std::thread::id(); )
+ CDS_DEBUG_DO( std::thread::id const mainThreadId = std::this_thread::get_id(); )
thread_record * p = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_seq_cst );
while ( p ) {
template <class Backoff>
inline void gp_singleton<RCUtag>::flip_and_wait( Backoff& bkoff )
{
- OS::ThreadId const nullThreadId = OS::nullThreadId();
+ std::thread::id const nullThreadId = std::thread::id();
m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst );
for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
}
template <typename RCUtag>
- inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
+ inline void sh_singleton<RCUtag>::raise_signal( std::thread::id tid )
{
pthread_kill( tid, m_nSigNo );
}
template <class Backoff>
inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
{
- OS::ThreadId const nullThreadId = OS::nullThreadId();
+ std::thread::id const nullThreadId = std::thread::id();
// Send "need membar" signal to all RCU threads
for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
- OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
+ std::thread::id tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
if ( tid != nullThreadId ) {
pRec->m_bNeedMemBar.store( true, CDS_ATOMIC::memory_order_release );
raise_signal( tid );
// Wait while all RCU threads process the signal
for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
- OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
+ std::thread::id tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
if ( tid != nullThreadId ) {
bkOff.reset();
while ( (tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire )) != nullThreadId
template <class Backoff>
void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
{
- OS::ThreadId const nullThreadId = OS::nullThreadId();
+ std::thread::id const nullThreadId = std::thread::id();
for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
void set_signal_handler();
void clear_signal_handler();
static void signal_handler( int signo, siginfo_t * sigInfo, void * context );
- void raise_signal( cds::OS::ThreadId tid );
+ void raise_signal( std::thread::id tid );
template <class Backoff>
void force_membar_all_threads( Backoff& bkOff );
# endif
#endif
-static cds::OS::ThreadId s_MainThreadId = 0;
-static HINSTANCE s_DllInstance = NULL;
+static std::thread::id s_MainThreadId = 0;
+static HINSTANCE s_DllInstance = NULL;
#if _WIN32_WINNT < 0x0601
// For Windows below Windows 7
switch ( fdwReason ) {
case DLL_PROCESS_ATTACH:
s_DllInstance = hinstDLL;
- s_MainThreadId = cds::OS::getCurrentThreadId();
+ s_MainThreadId = std::this_thread::get_id();
#if _WIN32_WINNT < 0x0601
discover_topology();
#endif
{
thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
while ( pNode ) {
- assert( pNode->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::nullThreadId() );
+ assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::thread::id() );
clearHRCThreadDesc( pNode );
thread_list_node * pNext = pNode->m_pNext;
deleteHRCThreadDesc( pNode );
GarbageCollector::thread_list_node * GarbageCollector::getHRCThreadDescForCurrentThread() const
{
thread_list_node * hprec;
- const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
+ const std::thread::id curThreadId = std::this_thread::get_id();
for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
if ( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire ) == curThreadId ) {
CDS_HRC_STATISTIC( ++m_Stat.m_AllocHRCThreadDesc );
thread_list_node * hprec;
- const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
- const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
+ const std::thread::id nullThreadId = std::thread::id();
+ const std::thread::id curThreadId = std::this_thread::get_id();
// First try to reuse a retired (non-active) HP record
for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
- cds::OS::ThreadId expectedThreadId = nullThreadId;
+ std::thread::id expectedThreadId = nullThreadId;
if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) )
continue;
hprec->m_pOwner = pThreadGC;
assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
/*
It is possible that
- pNode->m_idOwner.value() != cds::OS::getCurrentThreadId()
+ pNode->m_idOwner.value() != std::this_thread::get_id()
if the destruction of thread object is called by the destructor
after thread termination
*/
- assert( pNode->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != cds::OS::nullThreadId() );
+ assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) != std::thread::id() );
pNode->m_pOwner = nullptr;
- pNode->m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release );
+ pNode->m_idOwner.store( std::thread::id(), CDS_ATOMIC::memory_order_release );
assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
}
typedef std::vector< ContainerNode * > hazard_ptr_list;
details::thread_descriptor * pRec = pThreadGC->m_pDesc;
- assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
+ assert( static_cast< thread_list_node *>(pRec)->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::this_thread::get_id() );
// Step 1: mark all pRec->m_arrRetired items as "traced"
{
CDS_HRC_STATISTIC( ++m_Stat.m_HelpScanCalls );
- const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
- const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
+ const std::thread::id nullThreadId = std::thread::id();
+ const std::thread::id curThreadId = std::this_thread::get_id();
for ( thread_list_node * pRec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_pNext )
{
// If threadDesc is free then own its
- cds::OS::ThreadId expectedThreadId = nullThreadId;
+ std::thread::id expectedThreadId = nullThreadId;
if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed) )
{
continue;
{
CDS_HRC_STATISTIC( ++m_Stat.m_CleanUpAllCalls );
- //const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
+ //const std::thread::id nullThreadId = std::thread::id();
thread_list_node * pThread = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
while ( pThread ) {
for ( size_t i = 0; i < pThread->m_arrRetired.capacity(); ++i ) {
GarbageCollector::~GarbageCollector()
{
- CDS_DEBUG_DO( const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId() ;)
- CDS_DEBUG_DO( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
+ CDS_DEBUG_DO( const std::thread::id nullThreadId = std::thread::id(); )
+ CDS_DEBUG_DO( const std::thread::id mainThreadId = std::this_thread::get_id(); )
hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
hplist_node * hprec;
- const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
- const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
+ const std::thread::id nullThreadId = std::thread::id();
+ const std::thread::id curThreadId = std::this_thread::get_id();
// First try to reuse a retired (non-active) HP record
for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
- cds::OS::ThreadId thId = nullThreadId;
+ std::thread::id thId = nullThreadId;
if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
continue;
hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_release );
pRec->clear();
Scan( pRec );
hplist_node * pNode = static_cast<hplist_node *>( pRec );
- pNode->m_idOwner.store( cds::OS::nullThreadId(), CDS_ATOMIC::memory_order_release );
+ pNode->m_idOwner.store( std::thread::id(), CDS_ATOMIC::memory_order_release );
}
void GarbageCollector::detachAllThread()
{
hplist_node * pNext = NULL;
- const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
+ const std::thread::id nullThreadId = std::thread::id();
for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = pNext ) {
pNext = hprec->m_pNextNode;
if ( hprec->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
{
CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
- assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
+ assert( static_cast<hplist_node *>(pThis)->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::this_thread::get_id() );
- const cds::OS::ThreadId nullThreadId = cds::OS::nullThreadId();
- const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
+ const std::thread::id nullThreadId = std::thread::id();
+ const std::thread::id curThreadId = std::this_thread::get_id();
for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
// If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
// Owns hprec if it is empty.
// Several threads may work concurrently so we use atomic technique only.
{
- cds::OS::ThreadId curOwner = hprec->m_idOwner.load(CDS_ATOMIC::memory_order_acquire);
+ std::thread::id curOwner = hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire );
if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
continue;
size_t nData;
CDS_ATOMIC::atomic<size_t> nEnsureCall;
CDS_ATOMIC::atomic<bool> bInitialized;
- cds::OS::ThreadId threadId ; // insert thread id
+ std::thread::id threadId; // insert thread id
typedef cds::lock::Spinlock< cds::backoff::pause > lock_type;
mutable lock_type m_access;
, nData(0)
, nEnsureCall(0)
, bInitialized( false )
- , threadId( cds::OS::getCurrentThreadId() )
+ , threadId( std::this_thread::get_id() )
{}
value_type( value_type const& s )
, nData(s.nData)
, nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
, bInitialized( s.bInitialized.load(CDS_ATOMIC::memory_order_relaxed) )
- , threadId( cds::OS::getCurrentThreadId() )
+ , threadId( std::this_thread::get_id() )
{}
// boost::container::flat_map requires operator =
struct value_type {
size_t nKey;
size_t nData;
- CDS_ATOMIC::atomic<size_t> nEnsureCall;
- bool volatile bInitialized;
- cds::OS::ThreadId threadId ; // insert thread id
+ CDS_ATOMIC::atomic<size_t> nEnsureCall;
+ bool volatile bInitialized;
+ std::thread::id threadId; // insert thread id
typedef cds::lock::Spinlock< cds::backoff::pause > lock_type;
mutable lock_type m_access;
, nData(0)
, nEnsureCall(0)
, bInitialized( false )
- , threadId( cds::OS::getCurrentThreadId() )
+ , threadId( std::this_thread::get_id() )
{}
value_type( value_type const& s )
, nData(s.nData)
, nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
, bInitialized( s.bInitialized )
- , threadId( cds::OS::getCurrentThreadId() )
+ , threadId( std::this_thread::get_id() )
{}
// boost::container::flat_map requires operator =