{}
/// Returns current value of the counter
- counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const
+ counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const
{
return m_Counter.load( order );
}
}
/// Returns underlying atomic interface
- atomic_type& getAtomic()
+ atomic_type& getAtomic()
{
return m_Counter;
}
/// Returns underlying atomic interface (const)
- const atomic_type& getAtomic() const
+ const atomic_type& getAtomic() const
{
return m_Counter;
}
typedef size_t counter_type ; ///< Counter type
public:
/// Returns 0
- counter_type value(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) const
+ counter_type value(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) const
{
return 0;
}
{
metrics m;
- // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
+ // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
size_t nBucketCount = (size_t)( ((float) nItemCount) / m.nLoadFactor );
// In this point, we must wait while nBucket is empty.
// The compiler can decide that waiting loop can be "optimized" (stripped)
// To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
- //
m_Stat.onBucketInitContenton();
back_off bkoff;
while ( true ) {
m_Buckets.bucket( 0, pNode );
}
- void inc_item_count()
+ void inc_item_count()
{
size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
/// Initialize split-ordered list of default capacity
/**
The default capacity is defined in bucket table constructor.
- See \p split_list::expandable_bucket_table, \p split_list::static_ducket_table
+ See \p split_list::expandable_bucket_table, \p split_list::static_bucket_table
which selects by \p split_list::dynamic_bucket_table option.
*/
SplitListSet()