//@cond
template <typename ThreadData>
struct thread_list_record {
- ThreadData * m_pNext; ///< Next item in thread list
+ atomics::atomic<ThreadData*> m_pNext; ///< Next item in thread list
atomics::atomic<OS::ThreadId> m_idOwner; ///< Owner thread id; 0 - the record is free (not owned)
thread_list_record()
cds::OS::ThreadId const curThreadId = cds::OS::get_current_thread_id();
// First, try to reuse a retired (non-active) HP record
- for ( pRec = m_pHead.load( atomics::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
+ for ( pRec = m_pHead.load( atomics::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext.load( atomics::memory_order_relaxed )) {
cds::OS::ThreadId thId = nullThreadId;
if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
continue;
do {
// Compiler barriers: assignment MUST BE inside the loop
CDS_COMPILER_RW_BARRIER;
- pRec->m_list.m_pNext = pOldHead;
+ pRec->m_list.m_pNext.store( pOldHead, atomics::memory_order_relaxed );
CDS_COMPILER_RW_BARRIER;
} while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
thread_record * pNext = nullptr;
cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId;
- for ( thread_record * pRec = m_pHead.load(atomics::memory_order_acquire); pRec; pRec = pNext ) {
- pNext = pRec->m_list.m_pNext;
- if ( pRec->m_list.m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
+ for ( thread_record * pRec = m_pHead.load( atomics::memory_order_acquire ); pRec; pRec = pNext ) {
+ pNext = pRec->m_list.m_pNext.load( atomics::memory_order_relaxed );
+ if ( pRec->m_list.m_idOwner.load( atomics::memory_order_relaxed ) != nullThreadId ) {
retire( pRec );
}
}
thread_record * p = m_pHead.exchange( nullptr, atomics::memory_order_acquire );
while ( p ) {
- thread_record * pNext = p->m_list.m_pNext;
+ thread_record * pNext = p->m_list.m_pNext.load( atomics::memory_order_relaxed );
assert( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
|| p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
++itRetired;
}
vect.clear();
- pNext = hprec->m_pNextNode;
+ pNext = hprec->m_pNextNode.load( atomics::memory_order_relaxed );
hprec->m_bFree.store( true, atomics::memory_order_relaxed );
DeleteHPRec( hprec );
}
const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
// First try to reuse a retired (non-active) HP record
- for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hprec = m_pListHead.load( atomics::memory_order_relaxed ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
cds::OS::ThreadId thId = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
+ if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
continue;
- hprec->m_bFree.store( false, atomics::memory_order_release );
+ hprec->m_bFree.store( false, atomics::memory_order_relaxed );
return hprec;
}
// Allocate and push a new HP record
hprec = NewHPRec( curThreadId );
- hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
+ hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_relaxed );
do {
- // Compiler barriers: assignment MUST BE inside the loop
- CDS_COMPILER_RW_BARRIER;
- hprec->m_pNextNode = pOldHead;
- CDS_COMPILER_RW_BARRIER;
+ hprec->m_pNextNode.store( pOldHead, atomics::memory_order_relaxed );
} while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
return hprec;
{
hplist_node * pNext = nullptr;
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
- for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
- pNext = hprec->m_pNextNode;
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = pNext ) {
+ pNext = hprec->m_pNextNode.load( atomics::memory_order_relaxed );
if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
free_hp_record( hprec );
}
// Stage 1: Scan HP list and insert non-null values in plist
- hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
+ hplist_node * pNode = m_pListHead.load(atomics::memory_order_relaxed);
while ( pNode ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
if ( hptr )
plist.push_back( hptr );
}
- pNode = pNode->m_pNextNode;
+ pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed );
}
// Sort plist to simplify search in
*/
// Search guarded pointers in retired array
- hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
+ hplist_node * pNode = m_pListHead.load( atomics::memory_order_relaxed );
{
details::retired_ptr dummyRetired;
while ( pNode ) {
- if ( !pNode->m_bFree.load( atomics::memory_order_acquire )) {
+ if ( !pNode->m_bFree.load( atomics::memory_order_relaxed )) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
pRec->sync();
void * hptr = pNode->m_hzp[i].get();
}
}
}
- pNode = pNode->m_pNextNode;
+ pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed );
}
}
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
- for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
// If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
- if ( hprec->m_bFree.load(atomics::memory_order_acquire))
+ if ( hprec->m_bFree.load(atomics::memory_order_relaxed))
continue;
// Owns hprec if it is empty.
// Several threads may work concurrently so we use atomic technique only.
{
- cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
- if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
- continue;
- }
- else {
- curOwner = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
+ cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_relaxed);
+ if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) {
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
continue;
}
+ else
+ continue;
+ //else {
+ // curOwner = nullThreadId;
+ // if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
+ // continue;
+ //}
}
// We own the thread successfully. Now, we can see whether hp_record has retired pointers.
src.clear();
CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- hprec->m_bFree.store(true, atomics::memory_order_release);
+ hprec->m_bFree.store(true, atomics::memory_order_relaxed);
hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
Scan( pThis );
stat.nTotalRetiredPtrCount =
stat.nRetiredPtrInFreeHPRecs = 0;
- for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
++stat.nHPRecAllocated;
stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();