{
assert( m_pThreadRec.get() == nullptr );
publication_record_type* pRec = cxx11_allocator().New();
- m_pAllocatedHead =
+ m_pAllocatedHead =
m_pHead = pRec;
m_pThreadRec.reset( pRec );
m_Stat.onCreatePubRecord();
publication_record* p = m_pHead;
bool bOpDone = false;
while ( p ) {
- switch ( p->nState.load( memory_model::memory_order_acquire ) ) {
+ switch ( p->nState.load( memory_model::memory_order_acquire )) {
case active:
if ( p->op() >= req_Operation ) {
p->nAge.store( nCurAge, memory_model::memory_order_relaxed );
- owner.fc_apply( static_cast<publication_record_type*>( p ) );
+ owner.fc_apply( static_cast<publication_record_type*>( p ));
operation_done( *p );
bOpDone = true;
}
void batch_combining( Container& owner )
{
// The thread is a combiner
- assert( !m_Mutex.try_lock() );
+ assert( !m_Mutex.try_lock());
unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_relaxed ) + 1;
for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass )
- owner.fc_process( begin(), end() );
+ owner.fc_process( begin(), end());
combining_pass( owner, nCurAge );
m_Stat.onCombining();
m_Stat.onPassiveWaitIteration();
// Wait while operation processing
- if ( m_waitStrategy.wait( *this, *pRec ) )
+ if ( m_waitStrategy.wait( *this, *pRec ))
m_Stat.onWakeupByNotifying();
- if ( m_Mutex.try_lock() ) {
+ if ( m_Mutex.try_lock()) {
if ( pRec->op( memory_model::memory_order_acquire ) == req_Response ) {
// Operation is done
m_Mutex.unlock();
try_again:
publication_record * pPrev = m_pHead;
for ( publication_record * p = pPrev->pNext.load( memory_model::memory_order_acquire ); p; ) {
- switch ( p->nState.load( memory_model::memory_order_relaxed ) ) {
+ switch ( p->nState.load( memory_model::memory_order_relaxed )) {
case active:
if ( p->nAge.load( memory_model::memory_order_relaxed ) + m_nCompactFactor < nCurAge )
{
publication_record * pNext = p->pNext.load( memory_model::memory_order_relaxed );
if ( pPrev->pNext.compare_exchange_strong( p, pNext,
- memory_model::memory_order_acquire, atomics::memory_order_relaxed ) )
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
p->nState.store( inactive, memory_model::memory_order_release );
p = pNext;
*/
bool dequeue( value_type& dest )
{
- return dequeue_with( [&dest]( value_type& src ) {
+ return dequeue_with( [&dest]( value_type& src ) {
// TSan finds a race between this read of \p src and node_type constructor
// I think, it is wrong
CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
*/
bool dequeue( value_type& dest )
{
- return dequeue_with( [&dest]( value_type& src ) {
+ return dequeue_with( [&dest]( value_type& src ) {
// TSan finds a race between this read of \p src and node_type constructor
// I think, it is wrong
CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
to \p boost library root directory. The test projects search \p boost libraries in:
- for 32bit: <tt>\$(BOOST_PATH)/stage/lib</tt>, <tt>\$(BOOST_PATH)/stage32/lib</tt>, and <tt>\$(BOOST_PATH)/bin</tt>.
- for 64bit: <tt>\$(BOOST_PATH)/stage64/lib</tt> and <tt>\$(BOOST_PATH)/bin</tt>.
-
+
All tests are based on googletest framework. The following environment variables specify
where to find gtest include and library directories:
- \p GTEST_ROOT - gtest root directory. <tt>\$(GTEST_ROOT)/include</tt> specifies full path to
static value_type * gc_protect( marked_node_ptr p )
{
- return node_traits::to_value_ptr( p.ptr() );
+ return node_traits::to_value_ptr( p.ptr());
}
static void dispose_node( value_type * pVal )
{
assert( pVal != nullptr );
- typename node_builder::node_disposer()( node_traits::to_node_ptr( pVal ) );
+ typename node_builder::node_disposer()( node_traits::to_node_ptr( pVal ));
disposer()( pVal );
}
void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur )
{
if ( pCur->is_upper_level( nLevel )) {
- marked_node_ptr p( pCur.ptr() );
+ marked_node_ptr p( pCur.ptr());
typename gc::Guard hp;
marked_node_ptr pSucc = hp.protect( pCur->next( nLevel ), gc_protect );
- if ( pSucc.bits() &&
- pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_acquire, atomics::memory_order_relaxed ) )
+ if ( pSucc.bits() &&
+ pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
- if ( pCur->level_unlinked() ) {
- gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
+ if ( pCur->level_unlinked()) {
+ gc::retire( node_traits::to_value_ptr( pCur.ptr()), dispose_node );
m_Stat.onEraseWhileFind();
}
}
int nCmp = 1;
for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
- pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ) );
+ pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ));
while ( true ) {
pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect );
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted
// try to help deleting pCur
help_remove( nLevel, pPred, pCur );
goto retry;
}
else {
- nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp < 0 ) {
pPred = pCur.ptr();
pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); // pPrev guard := cur guard
pPred = m_Head.head();
for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
- pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ) );
+ pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ));
pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect );
// pCur.bits() means that pPred is logically deleted
// head cannot be deleted
assert( pCur.bits() == 0 );
- if ( pCur.ptr() ) {
+ if ( pCur.ptr()) {
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
// try to help deleting pCur
help_remove( nLevel, pPred, pCur );
pos.pSucc[nLevel] = pCur.ptr();
}
- return ( pos.pCur = pCur.ptr() ) != nullptr;
+ return ( pos.pCur = pCur.ptr()) != nullptr;
}
bool find_max_position( position& pos )
pPred = m_Head.head();
for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
- pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ) );
+ pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ));
while ( true ) {
pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect );
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
// try to help deleting pCur
help_remove( nLevel, pPred, pCur );
goto retry;
}
else {
- if ( !pSucc.ptr() )
+ if ( !pSucc.ptr())
break;
pPred = pCur.ptr();
- pos.guards.copy( nLevel * 2, nLevel * 2 + 1 );
+ pos.guards.copy( nLevel * 2, nLevel * 2 + 1 );
}
}
pos.pSucc[nLevel] = pCur.ptr();
}
- return ( pos.pCur = pCur.ptr() ) != nullptr;
+ return ( pos.pCur = pCur.ptr()) != nullptr;
}
bool renew_insert_position( value_type& val, node_type * pNode, position& pos )
int nCmp = 1;
for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
- pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ) );
+ pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ));
while ( true ) {
pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect );
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
if ( pSucc.bits()) {
// Set pNode->next
// pNode->next can have "logical deleted" flag if another thread is removing pNode right now
if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
- memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
+ memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
{
// pNode has been marked as removed while we are inserting it
// Stop inserting
if ( pSucc.bits() == 0 ) {
bkoff.reset();
while ( !( pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | 1,
- memory_model::memory_order_release, atomics::memory_order_acquire )
+ memory_model::memory_order_release, atomics::memory_order_acquire )
|| pSucc.bits() != 0 ))
{
bkoff();
while ( true ) {
if ( pDel->next( 0 ).compare_exchange_strong( p, p | 1, memory_model::memory_order_release, atomics::memory_order_acquire ))
{
- f( *node_traits::to_value_ptr( pDel ) );
+ f( *node_traits::to_value_ptr( pDel ));
// Physical deletion
// try fast erase
for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ) )
+ if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+ memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
{
pDel->level_unlinked();
}
else {
// Make slow erase
# ifdef CDS_DEBUG
- if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ) )
+ if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ))
assert( pDel != pos.pCur );
# else
find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
pCur = guards.protect( 1, pPred->next( nLevel ), gc_protect );
while ( pCur != pNull ) {
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pPred is being removed
if ( ++attempt < 4 ) {
bkoff();
return find_fastpath_abort;
}
- if ( pCur.ptr() ) {
+ if ( pCur.ptr()) {
int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp < 0 ) {
guards.copy( 0, 1 );
}
else if ( nCmp == 0 ) {
// found
- f( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ f( *node_traits::to_value_ptr( pCur.ptr()), val );
return find_fastpath_found;
}
else {
bool find_slowpath( Q& val, Compare cmp, Func f )
{
position pos;
- if ( find_position( val, pos, cmp, true ) ) {
+ if ( find_position( val, pos, cmp, true )) {
assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
f( *node_traits::to_value_ptr( pos.pCur ), val );
template <typename Q, typename Compare, typename Func>
bool find_with_( Q& val, Compare cmp, Func f )
{
- switch ( find_fastpath( val, cmp, f ) ) {
+ switch ( find_fastpath( val, cmp, f )) {
case find_fastpath_found:
m_Stat.onFindFastSuccess();
return true;
break;
}
- if ( find_slowpath( val, cmp, f ) ) {
+ if ( find_slowpath( val, cmp, f )) {
m_Stat.onFindSlowSuccess();
return true;
}
guarded_ptr get_with_( Q const& val, Compare cmp )
{
guarded_ptr gp;
- if ( find_with_( val, cmp, [&gp]( value_type& found, Q const& ) { gp.reset( &found ); } ) )
+ if ( find_with_( val, cmp, [&gp]( value_type& found, Q const& ) { gp.reset( &found ); } ))
return gp;
return guarded_ptr();
}
{
position pos;
- if ( !find_position( val, pos, cmp, false ) ) {
+ if ( !find_position( val, pos, cmp, false )) {
m_Stat.onEraseFailed();
return false;
}
node_type * pDel = pos.pCur;
typename gc::Guard gDel;
- gDel.assign( node_traits::to_value_ptr( pDel ) );
+ gDel.assign( node_traits::to_value_ptr( pDel ));
assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
unsigned int nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, f ) ) {
+ if ( try_remove_at( pDel, pos, f )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onEraseSuccess();
guarded_ptr gp;
for (;;) {
- if ( !find_position( val, pos, cmp, false ) ) {
+ if ( !find_position( val, pos, cmp, false )) {
m_Stat.onExtractFailed();
return guarded_ptr();
}
node_type * pDel = pos.pCur;
- gp.reset( node_traits::to_value_ptr( pDel ) );
+ gp.reset( node_traits::to_value_ptr( pDel ));
assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
unsigned int nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, []( value_type const& ) {} ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractSuccess();
guarded_ptr gp;
for ( ;;) {
- if ( !find_min_position( pos ) ) {
+ if ( !find_min_position( pos )) {
// The list is empty
m_Stat.onExtractMinFailed();
return guarded_ptr();
node_type * pDel = pos.pCur;
unsigned int nHeight = pDel->height();
- gp.reset( node_traits::to_value_ptr( pDel ) );
+ gp.reset( node_traits::to_value_ptr( pDel ));
- if ( try_remove_at( pDel, pos, []( value_type const& ) {} ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMinSuccess();
guarded_ptr gp;
for ( ;;) {
- if ( !find_max_position( pos ) ) {
+ if ( !find_max_position( pos )) {
// The list is empty
m_Stat.onExtractMaxFailed();
return guarded_ptr();
node_type * pDel = pos.pCur;
unsigned int nHeight = pDel->height();
- gp.reset( node_traits::to_value_ptr( pDel ) );
+ gp.reset( node_traits::to_value_ptr( pDel ));
- if ( try_remove_at( pDel, pos, []( value_type const& ) {} ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMaxSuccess();
void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos )
{
- marked_node_ptr p( pCur.ptr() );
+ marked_node_ptr p( pCur.ptr());
if ( pCur->is_upper_level( nLevel )
&& pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( pCur->level_unlinked()) {
- if ( !is_extracted( pSucc ) ) {
+ if ( !is_extracted( pSucc )) {
// We cannot free the node at this moment because RCU is locked
// Link deleted nodes to a chain to free later
- pos.dispose( pCur.ptr() );
+ pos.dispose( pCur.ptr());
m_Stat.onEraseWhileFind();
}
else
template <typename Q, typename Compare >
bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pPred;
marked_node_ptr pSucc;
while ( true ) {
pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
help_remove( nLevel, pPred, pCur, pSucc, pos );
goto retry;
}
else {
- nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp < 0 )
pPred = pCur.ptr();
else if ( nCmp == 0 && bStopIfFound )
bool find_min_position( position& pos )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pPred;
marked_node_ptr pSucc;
// head cannot be deleted
assert( pCur.bits() == 0 );
- if ( pCur.ptr() ) {
+ if ( pCur.ptr()) {
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
help_remove( nLevel, pPred, pCur, pSucc, pos );
goto retry;
pos.pPrev[nLevel] = pPred;
pos.pSucc[nLevel] = pCur.ptr();
}
- return ( pos.pCur = pCur.ptr() ) != nullptr;
+ return ( pos.pCur = pCur.ptr()) != nullptr;
}
bool find_max_position( position& pos )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pPred;
marked_node_ptr pSucc;
while ( true ) {
pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
help_remove( nLevel, pPred, pCur, pSucc, pos );
goto retry;
}
else {
- if ( !pSucc.ptr() )
+ if ( !pSucc.ptr())
break;
pPred = pCur.ptr();
pos.pSucc[nLevel] = pCur.ptr();
}
- return ( pos.pCur = pCur.ptr() ) != nullptr;
+ return ( pos.pCur = pCur.ptr()) != nullptr;
}
bool renew_insert_position( value_type& val, node_type * pNode, position& pos )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pPred;
marked_node_ptr pSucc;
while ( true ) {
pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
goto retry;
- if ( pSucc.bits() ) {
+ if ( pSucc.bits()) {
// pCur is marked, i.e. logically deleted.
if ( pCur.ptr() == pNode ) {
// Node is removing while we are inserting it
goto retry;
}
else {
- nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp < 0 )
pPred = pCur.ptr();
else
template <typename Func>
bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
unsigned int const nHeight = pNode->height();
pNode->clear_tower();
// Set pNode->next
// pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now
if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
- memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
+ memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
{
// pNode has been marked as removed while we are inserting it
// Stop inserting
if ( !renew_insert_position( val, pNode, pos )) {
// The node has been deleted while we are inserting it
// Update current height for concurent removing
- CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ) );
+ CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ));
m_Stat.onRemoveWhileInsert();
bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
{
assert( pDel != nullptr );
- assert( gc::is_locked() );
+ assert( gc::is_locked());
marked_node_ptr pSucc;
back_off bkoff;
}
}
- marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr() );
+ marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr());
while ( true ) {
if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire ))
{
- f( *node_traits::to_value_ptr( pDel ) );
+ f( *node_traits::to_value_ptr( pDel ));
// physical deletion
// try fast erase
for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ) )
+ if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+ memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
{
pDel->level_unlinked();
}
else {
// Make slow erase
# ifdef CDS_DEBUG
- if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ) )
+ if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ))
assert( pDel != pos.pCur );
# else
find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
m_Stat.onFastExtract();
return true;
}
- else if ( p.bits() ) {
+ else if ( p.bits()) {
// Another thread is deleting pDel right now
m_Stat.onEraseContention();
return false;
pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
while ( pCur != pNull ) {
- if ( pCur.bits() ) {
+ if ( pCur.bits()) {
// pPred is being removed
if ( ++attempt < 4 ) {
bkoff();
return find_fastpath_abort;
}
- if ( pCur.ptr() ) {
- int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ if ( pCur.ptr()) {
+ int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp < 0 ) {
pPred = pCur.ptr();
pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
}
else if ( nCmp == 0 ) {
// found
- f( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ f( *node_traits::to_value_ptr( pCur.ptr()), val );
return find_fastpath_found;
}
else // pCur > val - go down
template <typename Q, typename Compare, typename Func>
bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
{
- if ( find_position( val, pos, cmp, true ) ) {
+ if ( find_position( val, pos, cmp, true )) {
assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
f( *node_traits::to_value_ptr( pos.pCur ), val );
{
rcu_lock l;
- switch ( find_fastpath( val, cmp, f ) ) {
+ switch ( find_fastpath( val, cmp, f )) {
case find_fastpath_found:
m_Stat.onFindFastSuccess();
return true;
break;
}
- if ( find_slowpath( val, cmp, f, pos ) ) {
+ if ( find_slowpath( val, cmp, f, pos )) {
m_Stat.onFindSlowSuccess();
bRet = true;
}
{
rcu_lock rcuLock;
- if ( !find_position( val, pos, cmp, false ) ) {
+ if ( !find_position( val, pos, cmp, false )) {
m_Stat.onEraseFailed();
bRet = false;
}
assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
unsigned int nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, f, false ) ) {
+ if ( try_remove_at( pDel, pos, f, false )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onEraseSuccess();
value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
{
// RCU should be locked!!!
- assert( gc::is_locked() );
+ assert( gc::is_locked());
node_type * pDel;
- if ( !find_position( key, pos, cmp, false ) ) {
+ if ( !find_position( key, pos, cmp, false )) {
m_Stat.onExtractFailed();
pDel = nullptr;
}
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractSuccess();
value_type * do_extract_min()
{
- assert( !gc::is_locked() );
+ assert( !gc::is_locked());
position pos;
node_type * pDel;
{
rcu_lock l;
- if ( !find_min_position( pos ) ) {
+ if ( !find_min_position( pos )) {
m_Stat.onExtractMinFailed();
pDel = nullptr;
}
pDel = pos.pCur;
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMinSuccess();
value_type * do_extract_max()
{
- assert( !gc::is_locked() );
+ assert( !gc::is_locked());
position pos;
node_type * pDel;
{
rcu_lock l;
- if ( !find_max_position( pos ) ) {
+ if ( !find_max_position( pos )) {
m_Stat.onExtractMaxFailed();
pDel = nullptr;
}
pDel = pos.pCur;
unsigned int const nHeight = pDel->height();
- if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+ if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMaxSuccess();
node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
while ( p ) {
node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
- dispose_node( node_traits::to_value_ptr( p ) );
+ dispose_node( node_traits::to_value_ptr( p ));
p = pNext;
}
}
{
backoff_strategy backoff;
while ( nTryCount-- ) {
- if ( try_lock() )
+ if ( try_lock())
return true;
backoff();
}
backoff_strategy bkoff;
while ( nTryCount-- ) {
- if ( try_acquire() )
+ if ( try_acquire())
return true;
bkoff();
}
// Several threads may work concurrently so we use atomic technique only.
{
cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_relaxed);
- if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) {
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+ if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
continue;
}
else
// Create threads
std::vector< std::thread > threads;
- threads.reserve( m_workers.size() );
+ threads.reserve( m_workers.size());
for ( auto w : m_workers )
threads.emplace_back( &thread::run, w );
size_t Map_DelOdd::s_nFeldmanMap_HeadBits = 10;
size_t Map_DelOdd::s_nFeldmanMap_ArrayBits = 4;
-
+
size_t Map_DelOdd::s_nLoadFactor = 1;
std::vector<size_t> Map_DelOdd::m_arrElements;
m_arrElements.resize( s_nMapSize );
for ( size_t i = 0; i < s_nMapSize; ++i )
m_arrElements[i] = i;;
- shuffle( m_arrElements.begin(), m_arrElements.end() );
+ shuffle( m_arrElements.begin(), m_arrElements.end());
}
void Map_DelOdd::TearDownTestCase()
template <typename Pred>
static void prepare_array( std::vector<size_t>& arr, Pred pred )
{
- arr.reserve( m_arrElements.size() );
+ arr.reserve( m_arrElements.size());
for ( auto el : m_arrElements ) {
- if ( pred( el ) )
+ if ( pred( el ))
arr.push_back( el );
}
- arr.resize( arr.size() );
- shuffle( arr.begin(), arr.end() );
+ arr.resize( arr.size());
+ shuffle( arr.begin(), arr.end());
}
protected:
{
prepare_array( m_arr, []( size_t ) -> bool { return true; } );
for ( size_t i = 0; i < m_arr.size(); ++i ) {
- if ( m_Map.insert( key_type( m_arr[i], id() ) ) )
+ if ( m_Map.insert( key_type( m_arr[i], id())))
++m_nInsertInitSuccess;
else
++m_nInsertInitFailed;
// insert pass
for ( auto el : m_arr ) {
if ( el & 1 ) {
- if ( rMap.insert( key_type( el, id() )))
+ if ( rMap.insert( key_type( el, id())))
++m_nInsertSuccess;
else
++m_nInsertFailed;
if ( el & 1 ) {
bool success;
bool inserted;
- std::tie( success, inserted ) = rMap.update( key_type( el, id() ), f );
+ std::tie( success, inserted ) = rMap.update( key_type( el, id()), f );
if ( success && inserted )
++m_nInsertSuccess;
else
else {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
for ( auto el: m_arr ) {
- if ( rMap.erase( key_type( el, k ) ) )
+ if ( rMap.erase( key_type( el, k )))
++m_nDeleteSuccess;
else
++m_nDeleteFailed;
else {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
for ( auto el: m_arr ) {
- gp = rMap.extract( key_type( el, k ) );
+ gp = rMap.extract( key_type( el, k ));
if ( gp )
++m_nDeleteSuccess;
else
for ( auto el: m_arr ) {
if ( Map::c_bExtractLockExternal ) {
typename Map::rcu_lock l;
- xp = rMap.extract( key_type( el, k ) );
+ xp = rMap.extract( key_type( el, k ));
if ( xp )
++m_nDeleteSuccess;
else
++m_nDeleteFailed;
}
else {
- xp = rMap.extract( key_type( el, k ) );
+ xp = rMap.extract( key_type( el, k ));
if ( xp )
++m_nDeleteSuccess;
else
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
if ( Map::c_bExtractLockExternal ) {
typename Map::rcu_lock l;
- xp = rMap.extract( key_type( el, k ) );
+ xp = rMap.extract( key_type( el, k ));
if ( xp )
++m_nDeleteSuccess;
else
++m_nDeleteFailed;
}
else {
- xp = rMap.extract( key_type( el, k ) );
+ xp = rMap.extract( key_type( el, k ));
if ( xp )
++m_nDeleteSuccess;
else
for ( size_t i = 0; i < pool.size(); ++i ) {
cds_test::thread& thr = pool.get( i );
- switch ( thr.type() ) {
+ switch ( thr.type()) {
case inserter_thread:
{
insert_thread& inserter = static_cast<insert_thread&>( thr );
template <typename Pred>
static void prepare_array( std::vector<size_t>& arr, Pred pred )
{
- arr.reserve( m_arrData.size() );
+ arr.reserve( m_arrData.size());
for ( auto el : m_arrData ) {
- if ( pred( el ) )
+ if ( pred( el ))
arr.push_back( el );
}
- arr.resize( arr.size() );
- shuffle( arr.begin(), arr.end() );
+ arr.resize( arr.size());
+ shuffle( arr.begin(), arr.end());
}
protected:
{
prepare_array( m_arr, []( size_t ) -> bool { return true; } );
for ( size_t i = 0; i < m_arr.size(); ++i ) {
- if ( m_Set.insert( key_type( m_arr[i], id() ) ) )
+ if ( m_Set.insert( key_type( m_arr[i], id())))
++m_nInsertInitSuccess;
else
++m_nInsertInitFailed;
// insert pass
for ( auto el : m_arr ) {
if ( el & 1 ) {
- if ( rSet.insert( key_type( el, id() ) ) )
+ if ( rSet.insert( key_type( el, id())))
++m_nInsertSuccess;
else
++m_nInsertFailed;
if ( el & 1 ) {
bool success;
bool inserted;
- std::tie( success, inserted ) = rSet.update( key_type( el, id() ), update_functor() );
+ std::tie( success, inserted ) = rSet.update( key_type( el, id()), update_functor());
if ( success && inserted )
++m_nInsertSuccess;
else
if ( id() & 1 ) {
for ( auto el : m_arr ) {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- if ( rSet.erase( key_type( el, k ) ) )
+ if ( rSet.erase( key_type( el, k )))
++m_nDeleteSuccess;
else
++m_nDeleteFailed;
else {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
for ( auto el : m_arr ) {
- if ( rSet.erase( key_type( el, k ) ) )
+ if ( rSet.erase( key_type( el, k )))
++m_nDeleteSuccess;
else
++m_nDeleteFailed;
if ( id() & 1 ) {
for ( auto el : m_arr ) {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- gp = rSet.extract( key_type( el, k ) );
+ gp = rSet.extract( key_type( el, k ));
if ( gp )
++m_nExtractSuccess;
else
else {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
for ( auto el : m_arr ) {
- gp = rSet.extract( key_type( el, k ) );
+ gp = rSet.extract( key_type( el, k ));
if ( gp )
++m_nExtractSuccess;
else
for ( auto el : m_arr ) {
if ( Set::c_bExtractLockExternal ) {
typename Set::rcu_lock l;
- xp = rSet.extract( key_type( el, k ) );
+ xp = rSet.extract( key_type( el, k ));
if ( xp )
++m_nExtractSuccess;
else
++m_nExtractFailed;
}
else {
- xp = rSet.extract( key_type( el, k ) );
+ xp = rSet.extract( key_type( el, k ));
if ( xp )
++m_nExtractSuccess;
else
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
if ( Set::c_bExtractLockExternal ) {
typename Set::rcu_lock l;
- xp = rSet.extract( key_type( el, k ) );
+ xp = rSet.extract( key_type( el, k ));
if ( xp )
++m_nExtractSuccess;
else
++m_nExtractFailed;
}
else {
- xp = rSet.extract( key_type( el, k ) );
+ xp = rSet.extract( key_type( el, k ));
if ( xp )
++m_nExtractSuccess;
else
for ( size_t key : arr ) {
if ( key & 1 ) {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- if ( set.contains( key_thread( key, k ) ) )
+ if ( set.contains( key_thread( key, k )))
++m_nFindOddSuccess;
else
++m_nFindOddFailed;
else {
// even keys MUST be in the map
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- if ( set.contains( key_thread( key, k ) ) )
+ if ( set.contains( key_thread( key, k )))
++m_nFindEvenSuccess;
else
++m_nFindEvenFailed;
}
CDSSTRESS_FeldmanHashSet_fixed( Set_DelOdd, run_feldman, key_thread, size_t )
-
+
} // namespace set
#binmode $fh ;\r
my $str = '';\r
while (<$fh>) {\r
- if ( /^\/\/\$\$CDS-header\$\$/ ) {\r
- $str .= \r
-"/*\r
- This file is a part of libcds - Concurrent Data Structures library\r
-\r
- (C) Copyright Maxim Khizhinsky (libcds.dev\@gmail.com) 2006-$year\r
-\r
- Source code repo: http://github.com/khizmax/libcds/\r
- Download: http://sourceforge.net/projects/libcds/files/\r
- \r
- Redistribution and use in source and binary forms, with or without\r
- modification, are permitted provided that the following conditions are met:\r
-\r
- * Redistributions of source code must retain the above copyright notice, this\r
- list of conditions and the following disclaimer.\r
-\r
- * Redistributions in binary form must reproduce the above copyright notice,\r
- this list of conditions and the following disclaimer in the documentation\r
- and/or other materials provided with the distribution.\r
-\r
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\r
- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \r
-*/\n" ;\r
- }\r
- else {\r
- $nTabsFound += $_ =~ s/\t/ /g;\r
- $_ =~ s/\s+$//;\r
- $_ =~ s/\s+;$/;/;\r
- $_ =~ s/\)\s+\)/\)\)/g;\r
- $str .= $_ ;\r
- $str .= "\n" ;\r
- }\r
+ $nTabsFound += $_ =~ s/\t/ /g;\r
+ $_ =~ s/\s+$//;\r
+ $_ =~ s/\s+;$/;/;\r
+ $_ =~ s/\)\s+\)/\)\)/g;\r
+ $str .= $_ ;\r
+ $str .= "\n" ;\r
}\r
close $fh;\r
\r