struct node
{
typedef T value_type; ///< Value type
+ typedef cds::details::marked_ptr<T, 1> marked_data_ptr; ///< marked pointer to the value
- atomics::atomic< node* > next; ///< pointer to next node in the list
- atomics::atomic< value_type* > data; ///< pointer to user data, \p nullptr if the node is free
+ atomics::atomic< node* > next; ///< pointer to next node in the list
+ atomics::atomic< marked_data_ptr > data; ///< pointer to user data, \p nullptr if the node is free
//@cond
node()
event_counter m_nInsertSuccess; ///< Number of success \p insert() operations
event_counter m_nInsertFailed; ///< Number of failed \p insert() operations
event_counter m_nInsertRetry; ///< Number of attempts to insert new item
+ event_counter m_nInsertReuse; ///< Number of reusing empty node when inserting
+ event_counter m_nInsertReuseFailed; ///< Number of failed attempsof reusing free node when inserting
event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
event_counter m_nUpdateExisting; ///< Number of existing item updates
event_counter m_nUpdateFailed; ///< Number of failed \p update() call
void onInsertSuccess() { ++m_nInsertSuccess; }
void onInsertFailed() { ++m_nInsertFailed; }
void onInsertRetry() { ++m_nInsertRetry; }
+ void onInsertReuse() { ++m_nInsertReuse; }
+ void onInsertReuseFailed() { ++m_nInsertReuseFailed; }
void onUpdateNew() { ++m_nUpdateNew; }
void onUpdateExisting() { ++m_nUpdateExisting; }
void onUpdateFailed() { ++m_nUpdateFailed; }
void onInsertSuccess() const {}
void onInsertFailed() const {}
void onInsertRetry() const {}
+ void onInsertReuse() const {}
+ void onInsertReuseFailed() const {}
void onUpdateNew() const {}
void onUpdateExisting() const {}
void onUpdateFailed() const {}
void onInsertSuccess() { m_stat.onInsertSuccess(); }
void onInsertFailed() { m_stat.onInsertFailed(); }
void onInsertRetry() { m_stat.onInsertRetry(); }
+ void onInsertReuse() { m_stat.onInsertReuse(); }
+ void onInsertReuseFailed() { m_stat.onInsertReuseFailed(); }
void onUpdateNew() { m_stat.onUpdateNew(); }
void onUpdateExisting() { m_stat.onUpdateExisting();}
void onUpdateFailed() { m_stat.onUpdateFailed(); }
//@cond
typedef atomics::atomic< node_type* > atomic_node_ptr; ///< Atomic node pointer
typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support)
+ typedef typename node_type::marked_data_ptr marked_data_ptr;
atomic_node_ptr m_pHead; ///< Head pointer
item_counter m_ItemCounter; ///< Item counter
friend class IterableList;
protected:
- node_type* m_pNode;
+ node_type* m_pNode;
typename gc::Guard m_Guard; // data guard
void next()
m_Guard.clear();
break;
}
- if ( m_Guard.protect( m_pNode->data ))
+ if ( m_Guard.protect( m_pNode->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr())
break;
}
}
: m_pNode( pNode.load( memory_model::memory_order_acquire ))
{
if ( m_pNode ) {
- if ( !m_Guard.protect( m_pNode->data ))
+ if ( !m_Guard.protect( m_pNode->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr())
next();
}
}
position pos;
for ( pos.pCur = m_pHead.load( memory_model::memory_order_relaxed ); pos.pCur; pos.pCur = pos.pCur->next.load( memory_model::memory_order_relaxed )) {
while ( true ) {
- pos.pFound = pos.guard.protect( pos.pCur->data );
+ pos.pFound = pos.guard.protect( pos.pCur->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr();
if ( !pos.pFound )
break;
if ( cds_likely( unlink_node( pos ))) {
assert( pos.pFound != nullptr );
assert( key_comparator()(*pos.pFound, val) == 0 );
- if ( cds_likely( pos.pCur->data.compare_exchange_strong( pos.pFound, &val, memory_model::memory_order_release, atomics::memory_order_relaxed ))) {
+ marked_data_ptr pFound( pos.pFound );
+ if ( cds_likely( pos.pCur->data.compare_exchange_strong( pFound, marked_data_ptr( &val ),
+ memory_model::memory_order_release, atomics::memory_order_relaxed )))
+ {
if ( pos.pFound != &val ) {
retire_data( pos.pFound );
func( val, pos.pFound );
return false;
}
- value_type * pVal = pos.guard.protect( pCur->data );
+ value_type * pVal = pos.guard.protect( pCur->data,
+ []( marked_data_ptr p ) -> value_type*
+ {
+ return p.ptr();
+ }).ptr();
if ( pVal ) {
int nCmp = cmp( *pVal, val );
{
node_type * pNode = m_pHead.load( memory_model::memory_order_relaxed );
while ( pNode ) {
- value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed );
+ value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr();
if ( pVal )
retire_data( pVal );
node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed );
bool link_node( value_type * pVal, position& pos )
{
if ( pos.pPrev ) {
- if ( pos.pPrev->data.load( memory_model::memory_order_relaxed ) == nullptr ) {
+ if ( pos.pPrev->data.load( memory_model::memory_order_relaxed ) == marked_data_ptr() ) {
// reuse pPrev
- value_type * p = nullptr;
- return pos.pPrev->data.compare_exchange_strong( p, pVal, memory_model::memory_order_release, atomics::memory_order_relaxed );
+
+ // We need pos.pCur data should be unchanged, otherwise ordering violation can be possible
+ // if current thread will be preempted and another thread deletes pos.pCur data
+ // and then set it to another.
+ // To prevent this we mark pos.pCur data as undeletable by setting LSB
+ marked_data_ptr val( pos.pFound );
+ if ( !pos.pCur->data.compare_exchange_strong( val, val | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
+ // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data
+ m_Stat.onInsertReuseFailed();
+ return false;
+ }
+
+ // Set pos.pPrev data if it is null
+ marked_data_ptr p;
+ bool result = pos.pPrev->data.compare_exchange_strong( p, marked_data_ptr( pVal ),
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
+
+ // Clear pos.pCur data mark
+ pos.pCur->data.store( val, memory_model::memory_order_relaxed );
+
+ if ( result )
+ m_Stat.onInsertReuse();
+ return result;
}
else {
// insert new node between pos.pPrev and pos.pCur
assert( pos.pCur != nullptr );
assert( pos.pFound != nullptr );
- if ( pos.pCur->data.compare_exchange_strong( pos.pFound, nullptr, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ marked_data_ptr val( pos.pFound );
+ if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
retire_data( pos.pFound );
return true;
}
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
<< CDSSTRESS_STAT_OUT( s, m_nInsertSuccess )
<< CDSSTRESS_STAT_OUT( s, m_nInsertFailed )
<< CDSSTRESS_STAT_OUT( s, m_nInsertRetry )
+ << CDSSTRESS_STAT_OUT( s, m_nInsertReuse )
+ << CDSSTRESS_STAT_OUT( s, m_nInsertReuseFailed )
<< CDSSTRESS_STAT_OUT( s, m_nUpdateNew )
<< CDSSTRESS_STAT_OUT( s, m_nUpdateExisting )
<< CDSSTRESS_STAT_OUT( s, m_nUpdateFailed )