value_type m_Value;
template <typename Q>
- explicit node_type( Q const& v )
- : m_Value(v)
+ explicit node_type( Q&& v )
+ : m_Value( std::forward<Q>( v ))
{}
+
template <typename Q, typename... Args>
explicit node_type( Q&& q, Args&&... args )
: m_Value( std::forward<Q>(q), std::forward<Args>(args)... )
{
return base_class::operator()( key_accessor()( v.m_Value ));
}
+
template <typename Q>
size_t operator()( Q const& k ) const
{
value_type m_Value;
template <typename Q>
- explicit node_type( const Q& v )
- : m_Value(v)
+ explicit node_type( Q&& v )
+ : m_Value( std::forward<Q>( v ))
{}
template <typename Q, typename... Args>
value_type m_Value;
template <typename Q>
- explicit node_type( Q const& v )
- : m_Value(v)
+ explicit node_type( Q&& v )
+ : m_Value( std::forward<Q>( v ))
{}
template <typename Q, typename... Args>
explicit node_type( Q&& q, Args&&... args )
{
return base_class::statistics();
}
+
+ /// Returns internal statistics for \p ordered_list
+ typename ordered_list::stat const& list_statistics() const
+ {
+ return base_class::list_statistics();
+ }
};
}} // namespace cds::container
{
return base_class::statistics();
}
+
+ /// Returns internal statistics for \p ordered_list
+ typename ordered_list::stat const& list_statistics() const
+ {
+ return base_class::list_statistics();
+ }
};
}} // namespace cds::container
Returns \p true if \p val is inserted into the set, \p false otherwise.
*/
template <typename Q>
- bool insert( Q const& val )
+ bool insert( Q&& val )
{
- return insert_node( alloc_node( val ));
+ return insert_node( alloc_node( std::forward<Q>( val )));
}
/// Inserts new node
synchronization.
*/
template <typename Q, typename Func>
- bool insert( Q const& val, Func f )
+ bool insert( Q&& val, Func f )
{
- scoped_node_ptr pNode( alloc_node( val ));
+ scoped_node_ptr pNode( alloc_node( std::forward<Q>( val )));
if ( base_class::insert( *pNode, [&f](node_type& node) { f( node.m_Value ) ; } )) {
pNode.release();
#endif
upsert( Q&& val, bool bAllowInsert = true )
{
- scoped_node_ptr pNode( alloc_node( val ));
+ scoped_node_ptr pNode( alloc_node( std::forward<Q>( val )));
auto bRet = base_class::upsert( *pNode, bAllowInsert );
std::pair<bool, bool>
>::type
#endif
- update( Q const& val, Func func, bool bAllowInsert = true )
+ update( Q&& val, Func func, bool bAllowInsert = true )
{
- scoped_node_ptr pNode( alloc_node( val ));
+ scoped_node_ptr pNode( alloc_node( std::forward<Q>( val )));
auto bRet = base_class::update( *pNode,
[&func, &val]( bool bNew, node_type& item, node_type const& /*val*/ ) {
std::is_same<Q, Q>::value && is_iterable_list<ordered_list>::value,
std::pair<bool, bool>
>::type
- update( Q const& val, Func func, bool bAllowInsert = true )
+ update( Q&& val, Func func, bool bAllowInsert = true )
{
- scoped_node_ptr pNode( alloc_node( val ));
+ scoped_node_ptr pNode( alloc_node( std::forward<Q>( val )));
auto bRet = base_class::update( *pNode,
[&func]( node_type& item, node_type* old ) {
using base_class::extract_;
using base_class::get_;
- template <typename Q>
- static node_type * alloc_node( Q const& v )
- {
- return cxx_node_allocator().New( v );
- }
-
template <typename... Args>
static node_type * alloc_node( Args&&... args )
{
{
return base_class::statistics();
}
+
+ /// Returns internal statistics for \p ordered_list
+ typename ordered_list::stat const& list_statistics() const
+ {
+ return base_class::list_statistics();
+ }
};
}} // namespace cds::container
{
return base_class::statistics();
}
+
+ /// Returns internal statistics for \p ordered_list
+ typename ordered_list::stat const& list_statistics() const
+ {
+ return base_class::list_statistics();
+ }
};
}} // namespace cds::container
event_counter m_nReuseNode; ///< Number of reusing empty node when inserting/updating
event_counter m_nNodeMarkFailed; ///< Number of unsuccessful marking attempts when we try to insert new data
event_counter m_nNodeSeqBreak; ///< Number of breaking sequence events of \p prev -> \p next node when we try to insert new data
+ event_counter m_nNullPrevABA; ///< Number of ABA-problem for \p nullptr prev node
event_counter m_nNewNodeCreated; ///< Number of new node created when we try to insert new data
event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
event_counter m_nUpdateExisting; ///< Number of existing item updates
void onReuseNode() { ++m_nReuseNode; }
void onNodeMarkFailed() { ++m_nNodeMarkFailed; }
void onNodeSeqBreak() { ++m_nNodeSeqBreak; }
+ void onNullPrevABA() { ++m_nNullPrevABA; }
void onNewNodeCreated() { ++m_nNewNodeCreated; }
void onUpdateNew() { ++m_nUpdateNew; }
void onUpdateExisting() { ++m_nUpdateExisting; }
void onReuseNode() const {}
void onNodeMarkFailed() const {}
void onNodeSeqBreak() const {}
+ void onNullPrevABA() const {}
void onNewNodeCreated() const {}
void onUpdateNew() const {}
void onUpdateExisting() const {}
void onReuseNode() { m_stat.onReuseNode(); }
void onNodeMarkFailed() { m_stat.onNodeMarkFailed();}
void onNodeSeqBreak() { m_stat.onNodeSeqBreak(); }
+ void onNullPrevABA() { m_stat.onNullPrevABA(); }
void onNewNodeCreated() { m_stat.onNewNodeCreated();}
void onUpdateNew() { m_stat.onUpdateNew(); }
void onUpdateExisting() { m_stat.onUpdateExisting();}
if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
table_entry* pNewSegment = allocate_segment();
table_entry * pNull = nullptr;
- if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
+ if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed ))
destroy_segment( pNewSegment );
- }
}
+
+ assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr );
segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
}
return base_class()(q.val, v);
}
- template <typename Q1, typename Q2>
- int operator()( Q1 const& v1, Q2 const& v2 ) const
+ int operator()( value_type const& lhs, value_type const& rhs ) const
{
- return base_class()(v1, v2);
+ splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( lhs ));
+ splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( rhs ));
+ if ( n1->m_nHash != n2->m_nHash )
+ return n1->m_nHash < n2->m_nHash ? -1 : 1;
+
+ if ( n1->is_dummy() ) {
+ assert( n2->is_dummy() );
+ return 0;
+ }
+
+ assert( !n1->is_dummy() && !n2->is_dummy() );
+
+ return native_key_comparator()( lhs, rhs );
}
};
{
void operator()( value_type * v )
{
- hash_node* p = static_cast<hash_node*>( v );
- if ( !p->is_dummy())
- native_disposer()(v);
+ if ( !static_cast<hash_node*>( v )->is_dummy())
+ native_disposer()( v );
}
};
aux_node()
{
typedef typename native_ordered_list::node_type list_node_type;
+
list_node_type::data.store( typename list_node_type::marked_data_ptr(
static_cast<value_type*>( static_cast<hash_node *>( this ))),
atomics::memory_order_release
return base_class()(q.val, v);
}
- template <typename Q1, typename Q2>
- int operator()( Q1 const& v1, Q2 const& v2 ) const
+ int operator()( value_type const& lhs, value_type const& rhs ) const
{
- return base_class()(v1, v2);
+ hash_node const& n1 = static_cast<hash_node const&>( lhs );
+ hash_node const& n2 = static_cast<hash_node const&>( rhs );
+ if ( n1.m_nHash != n2.m_nHash )
+ return n1.m_nHash < n2.m_nHash ? -1 : 1;
+
+ if ( n1.is_dummy() ) {
+ assert( n2.is_dummy() );
+ return 0;
+ }
+
+ assert( !n1.is_dummy() && !n2.is_dummy() );
+
+ return base_class()( lhs, rhs );
}
};
typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer
- static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 3; ///< Count of hazard pointer required for the algorithm
+ static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm
//@cond
// Rebind traits (split-list support)
return false;
}
- if ( link_aux_node( pNode, pos )) {
+ if ( link_aux_node( pNode, pos, pHead )) {
++m_ItemCounter;
m_Stat.onInsertSuccess();
return true;
return false;
}
- if ( link_data( &val, pos )) {
+ if ( link_data( &val, pos, pHead )) {
++m_ItemCounter;
m_Stat.onInsertSuccess();
return true;
return false;
}
- if ( link_data( &val, pos )) {
+ if ( link_data( &val, pos, pHead )) {
f( val );
++m_ItemCounter;
m_Stat.onInsertSuccess();
return std::make_pair( false, false );
}
- if ( link_data( &val, pos )) {
+ if ( link_data( &val, pos, pHead )) {
func( val, static_cast<value_type*>( nullptr ));
++m_ItemCounter;
m_Stat.onUpdateNew();
}
}
- bool link_data( value_type * pVal, insert_position& pos )
+ bool link_data( value_type* pVal, insert_position& pos, node_type* pHead )
{
assert( pos.pPrev != nullptr );
assert( pos.pCur != nullptr );
marked_data_ptr valPrev( pos.pPrevVal );
if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
m_Stat.onNodeMarkFailed();
return false;
}
// sequence pPrev - pCur is broken
pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
m_Stat.onNodeSeqBreak();
return false;
}
- if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr )
- {
+ if ( pos.pPrevVal == nullptr ) {
+ // Check ABA-problem for prev
+ // There is a possibility that the current thread was preempted
+ // on entry of this function. Other threads can link data to prev
+ // and then remove it. As a result, the order of items may be changed
+ if ( find_prev( pHead, *pVal ) != pos.pPrev ) {
+ pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+ pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+ m_Stat.onNullPrevABA();
+ return false;
+ }
+ }
+
+ if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr ) {
// reuse pPrev
// Set pos.pPrev data if it is null
}
// split-list support
- bool link_aux_node( node_type * pNode, insert_position& pos )
+ bool link_aux_node( node_type * pNode, insert_position& pos, node_type* pHead )
{
assert( pos.pPrev != nullptr );
assert( pos.pCur != nullptr );
return false;
}
+ if ( pos.pPrevVal == nullptr ) {
+ // Check ABA-problem for prev
+ // There is a possibility that the current thread was preempted
+ // on entry of this function. Other threads can insert (link) an item to prev
+ // and then remove it. As a result, the order of items may be changed
+ if ( find_prev( pHead, *pNode->data.load( memory_model::memory_order_relaxed ).ptr()) != pos.pPrev ) {
+ pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed );
+ pos.pCur->data.store( valCur, memory_model::memory_order_relaxed );
+
+ m_Stat.onNullPrevABA();
+ return false;
+ }
+ }
+
// insert new node between pos.pPrev and pos.pCur
pNode->next.store( pos.pCur, memory_model::memory_order_relaxed );
}
return false;
}
+
+ template <typename Q>
+ node_type* find_prev( node_type const* pHead, Q const& val ) const
+ {
+ node_type* pPrev = const_cast<node_type*>(pHead);
+ typename gc::Guard guard;
+ key_comparator cmp;
+
+ while ( true ) {
+ node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed );
+
+ if ( pCur == pCur->next.load( memory_model::memory_order_acquire ) ) {
+ // end-of-list
+ return pPrev;
+ }
+
+ value_type * pVal = guard.protect( pCur->data,
+ []( marked_data_ptr p ) -> value_type*
+ {
+ return p.ptr();
+ } ).ptr();
+
+ if ( pVal && cmp( *pVal, val ) >= 0 )
+ return pPrev;
+
+ pPrev = pCur;
+ }
+ }
//@endcond
};
}} // namespace cds::intrusive
return m_Stat;
}
+ /// Returns internal statistics for \p OrderedList
+ typename OrderedList::stat const& list_statistics() const
+ {
+ return m_List.statistics();
+ }
+
protected:
//@cond
template <bool IsConst>
return m_Stat;
}
+ /// Returns internal statistics for \p OrderedList
+ typename OrderedList::stat const& list_statistics() const
+ {
+ return m_List.statistics();
+ }
+
protected:
//@cond
template <bool IsConst>
<< CDSSTRESS_STAT_OUT( s, m_nReuseNode )
<< CDSSTRESS_STAT_OUT( s, m_nNodeMarkFailed )
<< CDSSTRESS_STAT_OUT( s, m_nNodeSeqBreak )
+ << CDSSTRESS_STAT_OUT( s, m_nNullPrevABA )
<< CDSSTRESS_STAT_OUT( s, m_nNewNodeCreated )
<< CDSSTRESS_STAT_OUT( s, m_nUpdateNew )
<< CDSSTRESS_STAT_OUT( s, m_nUpdateExisting )
#include <cds/container/split_list_map_nogc.h>
#include <cds_test/stat_splitlist_out.h>
+#include <cds_test/stat_michael_list_out.h>
+#include <cds_test/stat_lazy_list_out.h>
+#include <cds_test/stat_iterable_list_out.h>
namespace map {
template <typename GC, typename K, typename T, typename Traits >
static inline void print_stat( cds_test::property_stream& o, SplitListMap< GC, K, T, Traits > const& m )
{
- o << m.statistics();
+ o << m.statistics()
+ << m.list_statistics();
}
} // namespace map
#include <cds/container/split_list_set_rcu.h>
#include <cds_test/stat_splitlist_out.h>
+#include <cds_test/stat_michael_list_out.h>
+#include <cds_test/stat_lazy_list_out.h>
+#include <cds_test/stat_iterable_list_out.h>
namespace set {
,cc::split_list::ordered_list_traits<
typename cc::michael_list::make_traits<
co::compare< compare >
+ ,co::stat< cc::michael_list::stat<>>
>::type
>
>::type
,cc::split_list::ordered_list_traits<
typename cc::michael_list::make_traits<
co::less< less >
+ ,co::stat< cc::michael_list::stat<>>
>::type
>
>::type
,cc::split_list::ordered_list_traits<
typename cc::iterable_list::make_traits<
co::compare< compare >
+ ,co::stat< cc::iterable_list::stat<>>
>::type
>
>::type
,cc::split_list::ordered_list_traits<
typename cc::iterable_list::make_traits<
co::less< less >
+ ,co::stat< cc::iterable_list::stat<>>
>::type
>
>::type
template <typename GC, typename T, typename Traits>
static inline void print_stat( cds_test::property_stream& o, SplitListSet<GC, T, Traits> const& s )
{
- o << s.statistics();
+ o << s.statistics()
+ << s.list_statistics();
}
} // namespace set