#include <cds/intrusive/details/base.h>
#include <cds/algo/atomic.h>
+#include <cds/algo/bit_reversal.h>
#include <cds/details/allocator.h>
#include <cds/algo/int_algo.h>
#include <cds/algo/bitop.h>
}
/// Initializes dummy node with \p nHash value
- hash_node( size_t nHash )
+ explicit hash_node( size_t nHash )
: m_nHash( nHash )
{
assert( is_dummy());
}
/// Initializes dummy node with \p nHash value
- node( size_t nHash )
+ explicit node( size_t nHash )
: hash_node( nHash )
{
assert( is_dummy());
}
/// Initializes dummy node with \p nHash value
- node( size_t nHash )
+ explicit node( size_t nHash )
: hash_node( nHash )
{
assert( is_dummy());
//@endcond
};
+ /// Option to control bit reversal algorithm
+ /**
+ Bit reversal is a significant part of split-list.
+ \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace.
+ */
+ template <typename Type>
+ struct bit_reversal {
+ //@cond
+ template <typename Base>
+ struct pack: public Base
+ {
+ typedef Type bit_reversal;
+ };
+ //@endcond
+ };
+
/// SplitListSet traits
struct traits
{
*/
typedef opt::none hash;
+ /// Bit reversal algorithm
+ /**
+ Bit reversal is a significant part of split-list.
+ There are several predefined algorithm in \p cds::algo::bit_reversal namespace,
+ \p cds::algo::bit_reversal::lookup is the best general purpose one.
+
+ There are more efficient bit reversal algoritm for particular processor architecture,
+ for example, based on x86 SIMD/AVX instruction set, see <a href="http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c">here</a>
+ */
+ typedef cds::algo::bit_reversal::lookup bit_reversal;
+
/// Item counter
/**
The item counting is an important part of \p SplitListSet algorithm:
the <tt>empty()</tt> member function depends on correct item counting.
Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
- Default is \p cds::atomicity::item_counter.
+ Default is \p cds::atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter
*/
typedef cds::atomicity::item_counter item_counter;
/**
Available \p Options:
- \p opt::hash - mandatory option, specifies hash functor.
+ - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation
+ default is \p cds::algo::bit_reversal::lookup
- \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
for default type.
- \p opt::memory_model - C++ memory model for atomic operations.
/// Auxiliary node type
struct aux_node_type: public node_type, public free_list::node
- {};
+ {
+# ifdef CDS_DEBUG
+ atomics::atomic<bool> m_busy;
+
+ aux_node_type()
+ {
+ m_busy.store( false, atomics::memory_order_release );
+ }
+# endif
+ };
typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
// alloc next free node from m_auxNode
size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
- if ( idx < capacity())
+ if ( idx < capacity() ) {
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) );
return new( &m_auxNode[idx] ) aux_node_type();
+ }
}
// get from free-list
typedef typename options::free_list free_list;
/// Auxiliary node type
- class aux_node_type: public node_type, public free_list::node
- {};
+ struct aux_node_type: public node_type, public free_list::node
+ {
+# ifdef CDS_DEBUG
+ atomics::atomic<bool> m_busy;
+
+ aux_node_type()
+ {
+ m_busy.store( false, atomics::memory_order_release );
+ }
+# endif
+ };
protected:
//@cond
// aux_node_type nodes[];
aux_node_segment()
- : aux_node_count(0)
- , next_segment( nullptr )
- {}
+ : next_segment( nullptr )
+ {
+ aux_node_count.store( 0, atomics::memory_order_release );
+ }
aux_node_type* segment()
{
assert( aux_segment != nullptr );
// try to allocate from current aux segment
- if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
+ if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) {
size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
- if ( idx < m_metrics.nSegmentSize )
+ if ( idx < m_metrics.nSegmentSize ) {
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) );
return new( aux_segment->segment() + idx ) aux_node_type();
+ }
}
// try allocate from free-list
aux_node_segment* new_aux_segment = allocate_aux_segment();
new_aux_segment->next_segment = aux_segment;
new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
- CDS_COMPILER_RW_BARRIER;
- if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ))
- return new( new_aux_segment->segment()) aux_node_type();
+ if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) {
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) );
+ return new( new_aux_segment->segment() ) aux_node_type();
+ }
free_aux_segment( new_aux_segment );
}
// Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
- size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
+ size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor;
if ( nBucketCount <= 2 ) {
m.nSegmentCount = 1;
m.nSegmentSize = 2;
aux_node_segment* allocate_aux_segment()
{
char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) );
return new(p) aux_node_segment();
}
{
return m_itCur != i.m_itCur;
}
+
+ protected:
+ list_iterator const& underlying_iterator() const
+ {
+ return m_itCur;
+ }
};
} // namespace details
//@endcond
//@cond
// Helper functions
-
- /// Reverses bit order in \p nHash
- static inline size_t reverse_bits( size_t nHash )
- {
- return bitop::RBO( nHash );
- }
-
+ template <typename BitReversalAlgo>
static inline size_t regular_hash( size_t nHash )
{
- return reverse_bits( nHash ) | size_t(1);
+ return BitReversalAlgo()( nHash ) | size_t(1);
}
+ template <typename BitReversalAlgo>
static inline size_t dummy_hash( size_t nHash )
{
- return reverse_bits( nHash ) & ~size_t(1);
+ return BitReversalAlgo()( nHash ) & ~size_t(1);
}
//@endcond