typedef retired_vector_impl::iterator iterator;
/// Constructor
- retired_vector( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
+ explicit retired_vector( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
~retired_vector()
{}
char padding2[cds::c_nCacheLineSize];
/// Ctor
- hp_record( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
+ explicit hp_record( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
~hp_record()
{}
{
hplist_node * m_pNextNode; ///< next hazard ptr record in list
atomics::atomic<OS::ThreadId> m_idOwner; ///< Owner thread id; 0 - the record is free (not owned)
- atomics::atomic<bool> m_bFree; ///< true if record if free (not owned)
+ atomics::atomic<bool> m_bFree; ///< true if record is free (not owned)
//@cond
- hplist_node( const GarbageCollector& HzpMgr )
+ explicit hplist_node( const GarbageCollector& HzpMgr )
: hp_record( HzpMgr ),
m_pNextNode( nullptr ),
m_idOwner( OS::c_NullThreadId ),
m_bFree( true )
{}
+ hplist_node( const GarbageCollector& HzpMgr, OS::ThreadId owner )
+ : hp_record( HzpMgr ),
+ m_pNextNode( nullptr ),
+ m_idOwner( owner ),
+ m_bFree( false )
+ {}
+
~hplist_node()
{
assert( m_idOwner.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId );
~GarbageCollector();
/// Allocate new HP record
- hplist_node * NewHPRec();
+ hplist_node * NewHPRec( OS::ThreadId owner );
/// Permanently deletes HPrecord \p pNode
/**
/// Allocates auxiliary node; can return \p nullptr if the table exhausted
aux_node_type* alloc_aux_node()
{
+ aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire );
+
for ( ;; ) {
- aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed );
assert( aux_segment != nullptr );
// try to allocate from current aux segment
aux_node_segment* new_aux_segment = allocate_aux_segment();
new_aux_segment->next_segment = aux_segment;
new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
- if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ))
+ CDS_COMPILER_RW_BARRIER;
+
+ if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ))
return new( new_aux_segment->segment()) aux_node_type();
free_aux_segment( new_aux_segment );
, m_idOwner( cds::OS::c_NullThreadId )
{}
+ explicit thread_list_record( OS::ThreadId owner )
+ : m_pNext( nullptr )
+ , m_idOwner( owner )
+ {}
+
~thread_list_record()
{}
};
// No records available for reuse
// Allocate and push a new record
- pRec = allocator_type().New();
- pRec->m_list.m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
+ pRec = allocator_type().New( curThreadId );
+ CDS_COMPILER_RW_BARRIER;
thread_record * pOldHead = m_pHead.load( atomics::memory_order_acquire );
do {
pRec->m_list.m_pNext = pOldHead;
+ // Compiler barrier: assignment above MUST BE inside the loop
+ CDS_COMPILER_RW_BARRIER;
} while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
return pRec;
atomics::atomic<uint32_t> m_nAccessControl ; \
thread_list_record< thread_data > m_list ; \
thread_data(): m_nAccessControl(0) {} \
+ explicit thread_data( OS::ThreadId owner ): m_nAccessControl(0), m_list(owner) {} \
~thread_data() {} \
}
atomics::atomic<bool> m_bNeedMemBar ; \
thread_list_record< thread_data > m_list ; \
thread_data(): m_nAccessControl(0), m_bNeedMemBar(false) {} \
+ explicit thread_data( OS::ThreadId owner ): m_nAccessControl(0), m_bNeedMemBar(false), m_list(owner) {} \
~thread_data() {} \
}
}
}
- inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
+ inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec( OS::ThreadId owner )
{
CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
- return new hplist_node( *this );
+ return new hplist_node( *this, owner );
}
inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
// No HP records available for reuse
// Allocate and push a new HP record
- hprec = NewHPRec();
- hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
- hprec->m_bFree.store( false, atomics::memory_order_release );
+ hprec = NewHPRec( curThreadId );
+ CDS_COMPILER_RW_BARRIER;
hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
do {
hprec->m_pNextNode = pOldHead;
+ // Compiler barrier: assignment above MUST BE inside the loop
+ CDS_COMPILER_RW_BARRIER;
} while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
return hprec;
# suppressions=<supression_file_name>
# verosity=n Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
# history_size=[0..7], default 2
+# detect_deadlocks=0 - some data structs in libcds tests use a lot of node-level mutexes.
+# TSan has the hardcoded limit =16 for the number of mutex per thread.
+# To prevent "possibly deadlock" reporting disable deadlock detection.
+# Suppression can help in that case but stack unwinding increases
+# test time significantly.
# false: LazyList potential deadlock
deadlock:cds/intrusive/impl/lazy_list.h