-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
-#ifndef __CDS_INTRUSIVE_SEGMENTED_QUEUE_H
-#define __CDS_INTRUSIVE_SEGMENTED_QUEUE_H
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H
+#define CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H
#include <mutex>
#include <cds/intrusive/details/base.h>
#include <cds/details/marked_ptr.h>
#include <cds/algo/int_algo.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <cds/opt/permutation.h>
#include <boost/intrusive/slist.hpp>
/// SegmentedQueue internal statistics. May be used for debugging or profiling
template <typename Counter = cds::atomicity::event_counter >
- struct stat {
+ struct stat
+ {
typedef Counter counter_type; ///< Counter type
counter_type m_nPush; ///< Push count
//@endcond
};
- /// SegmentedQueue default type traits
- struct type_traits {
+ /// SegmentedQueue default traits
+ struct traits {
/// Element disposer that is called when the item to be dequeued. Default is opt::v::empty_disposer (no disposer)
typedef opt::v::empty_disposer disposer;
/// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification
enum { alignment = opt::cache_line_alignment };
+ /// Padding of segment data, default is no special padding
+ /**
+ The segment is just an array of atomic data pointers,
+ so, the high load leads to false sharing and performance degradation.
+ A padding of segment data can eliminate false sharing issue.
+ On the other hand, the padding leads to increase segment size.
+ */
+ enum { padding = opt::no_special_padding };
+
/// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Lock type used to maintain an internal list of allocated segments
- typedef cds::lock::Spin lock_type;
+ typedef cds::sync::spin lock_type;
/// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor)
typedef cds::opt::v::random2_permutation<int> permutation_generator;
/// Metafunction converting option list to traits for SegmentedQueue
/**
- The metafunction can be useful if a few fields in \ref type_traits should be changed.
+ The metafunction can be useful if a few fields in \p segmented_queue::traits should be changed.
For example:
\code
typedef cds::intrusive::segmented_queue::make_traits<
>::type my_segmented_queue_traits;
\endcode
This code creates \p %SegmentedQueue type traits with item counting feature,
- all other \p type_traits members left unchanged.
+ all other \p %segmented_queue::traits members left unchanged.
\p Options are:
- - \p opt::disposer - the functor used for dispose removed items.
- - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default)
- - \p opt::item_counter - item counting feature. Note that atomicity::empty_item_counetr is not suitable
+ - \p opt::disposer - the functor used to dispose removed items.
+ - \p opt::stat - internal statistics, possible type: \p segmented_queue::stat, \p segmented_queue::empty_stat (the default)
+ - \p opt::item_counter - item counting feature. Note that \p atomicity::empty_item_counetr is not suitable
for segmented queue.
- \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering.
See option description for the full list of possible models
- \p opt::alignment - the alignment for critical data, see option description for explanation
- - \p opt::allocator - the allocator used t maintain segments.
+ - \p opt::padding - the padding of segment data, default no special padding.
+ See \p traits::padding for explanation.
+ - \p opt::allocator - the allocator to be used for maintaining segments.
- \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated
segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable.
- \p opt::permutation_generator - a random permutation generator for sequence [0, quasi_factor),
- default is cds::opt::v::random2_permutation<int>
+ default is \p cds::opt::v::random2_permutation<int>
*/
template <typename... Options>
struct make_traits {
typedef implementation_defined type ; ///< Metafunction result
# else
typedef typename cds::opt::make_options<
- typename cds::opt::find_type_traits< type_traits, Options... >::type
+ typename cds::opt::find_type_traits< traits, Options... >::type
,Options...
>::type type;
# endif
quasi factor. This means that the consumer dequeues <i>any</i> item from the current first segment.
Template parameters:
- - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::PTB
+ - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::DHP
- \p T - the type of values stored in the queue
- - \p Traits - queue type traits, default is segmented_queue::type_traits.
- segmented_queue::make_traits metafunction can be used to construct the
+ - \p Traits - queue type traits, default is \p segmented_queue::traits.
+ \p segmented_queue::make_traits metafunction can be used to construct the
type traits.
The queue stores the pointers to enqueued items so no special node hooks are needed.
*/
- template <class GC, typename T, typename Traits = segmented_queue::type_traits >
+ template <class GC, typename T, typename Traits = segmented_queue::traits >
class SegmentedQueue
{
public:
- typedef GC gc ; ///< Garbage collector
- typedef T value_type ; ///< type of the value stored in the queue
- typedef Traits options ; ///< Queue's traits
+ typedef GC gc; ///< Garbage collector
+ typedef T value_type; ///< type of the value stored in the queue
+ typedef Traits traits; ///< Queue traits
- typedef typename options::disposer disposer ; ///< value disposer, called only in \p clear() when the element to be dequeued
- typedef typename options::allocator allocator ; ///< Allocator maintaining the segments
- typedef typename options::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
- typedef typename options::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter
- typedef typename options::stat stat ; ///< Internal statistics policy
- typedef typename options::lock_type lock_type ; ///< Type of mutex for maintaining an internal list of allocated segments.
- typedef typename options::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor)
+ typedef typename traits::disposer disposer ; ///< value disposer, called only in \p clear() when the element to be dequeued
+ typedef typename traits::allocator allocator; ///< Allocator maintaining the segments
+ typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename traits::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter
+ typedef typename traits::stat stat; ///< Internal statistics policy
+ typedef typename traits::lock_type lock_type; ///< Type of mutex for maintaining an internal list of allocated segments.
+ typedef typename traits::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor)
- static const size_t m_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm
+ static const size_t c_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm
protected:
//@cond
// Segment cell. LSB is used as deleted mark
- typedef cds::details::marked_ptr< value_type, 1 > cell;
+ typedef cds::details::marked_ptr< value_type, 1 > regular_cell;
+ typedef atomics::atomic< regular_cell > atomic_cell;
+ typedef typename cds::opt::details::apply_padding< atomic_cell, traits::padding >::type cell;
// Segment
struct segment: public boost::intrusive::slist_base_hook<>
{
- atomics::atomic< cell > * cells; // Cell array of size \ref m_nQuasiFactor
- size_t version; // version tag (ABA prevention tag)
+ cell * cells; // Cell array of size \ref m_nQuasiFactor
+ size_t version; // version tag (ABA prevention tag)
// cell array is placed here in one continuous memory block
// Initializes the segment
- segment( size_t nCellCount )
+ explicit segment( size_t nCellCount )
// MSVC warning C4355: 'this': used in base member initializer list
- : cells( reinterpret_cast< atomics::atomic< cell > * >( this + 1 ))
+ : cells( reinterpret_cast< cell *>( this + 1 ))
, version( 0 )
{
init( nCellCount );
}
+ segment() = delete;
+
void init( size_t nCellCount )
{
- atomics::atomic< cell > * pLastCell = cells + nCellCount;
- for ( atomics::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell )
- pCell->store( cell(), atomics::memory_order_relaxed );
+ cell * pLastCell = cells + nCellCount;
+ for ( cell* pCell = cells; pCell < pLastCell; ++pCell )
+ pCell->data.store( regular_cell(), atomics::memory_order_relaxed );
atomics::atomic_thread_fence( memory_model::memory_order_release );
}
-
- private:
- segment(); //=delete
};
- typedef typename opt::details::alignment_setter< atomics::atomic<segment *>, options::alignment >::type aligned_segment_ptr;
+ typedef typename opt::details::alignment_setter< atomics::atomic<segment *>, traits::alignment >::type aligned_segment_ptr;
//@endcond
protected:
~segment_list()
{
- m_List.clear_and_dispose( gc_segment_disposer() );
+ m_List.clear_and_dispose( gc_segment_disposer());
}
segment * head( typename gc::Guard& guard )
bool populated( segment const& s ) const
{
// The lock should be held
- atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor();
- for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
- if ( !pCell->load( memory_model::memory_order_relaxed ).all() )
+ cell const * pLastCell = s.cells + quasi_factor();
+ for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
+ if ( !pCell->data.load( memory_model::memory_order_relaxed ).all())
return false;
}
return true;
bool exhausted( segment const& s ) const
{
// The lock should be held
- atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor();
- for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
- if ( !pCell->load( memory_model::memory_order_relaxed ).bits() )
+ cell const * pLastCell = s.cells + quasi_factor();
+ for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
+ if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits())
return false;
}
return true;
if ( !m_List.empty() && ( pTail != &m_List.back() || get_version(pTail) != m_List.back().version )) {
m_pTail.store( &m_List.back(), memory_model::memory_order_relaxed );
- return guard.assign( &m_List.back() );
+ return guard.assign( &m_List.back());
}
- assert( m_List.empty() || populated( m_List.back() ));
+# ifdef _DEBUG
+ assert( m_List.empty() || populated( m_List.back()));
+# endif
segment * pNew = allocate_segment();
m_Stat.onSegmentCreated();
- if ( m_List.empty() )
- m_pHead.store( pNew, memory_model::memory_order_relaxed );
+ if ( m_List.empty())
+ m_pHead.store( pNew, memory_model::memory_order_release );
m_List.push_back( *pNew );
m_pTail.store( pNew, memory_model::memory_order_release );
return guard.assign( pNew );
{
scoped_lock l( m_Lock );
- if ( m_List.empty() ) {
+ if ( m_List.empty()) {
m_pTail.store( nullptr, memory_model::memory_order_relaxed );
m_pHead.store( nullptr, memory_model::memory_order_relaxed );
return guard.assign( nullptr );
if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) {
m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed );
- return guard.assign( &m_List.front() );
+ return guard.assign( &m_List.front());
}
- assert( exhausted(m_List.front()) );
+# ifdef _DEBUG
+ assert( exhausted( m_List.front()));
+# endif
m_List.pop_front();
- if ( m_List.empty() ) {
+ if ( m_List.empty()) {
pRet = guard.assign( nullptr );
m_pTail.store( nullptr, memory_model::memory_order_relaxed );
}
else
- pRet = guard.assign( &m_List.front() );
+ pRet = guard.assign( &m_List.front());
m_pHead.store( pRet, memory_model::memory_order_release );
}
segment * allocate_segment()
{
- return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor,
- quasi_factor() );
+ return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, quasi_factor());
}
static void free_segment( segment * pSegment )
assert( pTailSegment );
}
- permutation_generator gen( quasi_factor() );
+ permutation_generator gen( quasi_factor());
// First, increment item counter.
// We sure that the item will be enqueued
do {
typename permutation_generator::integer_type i = gen;
CDS_DEBUG_ONLY( ++nLoopCount );
- if ( pTailSegment->cells[i].load(memory_model::memory_order_relaxed).all() ) {
+ if ( pTailSegment->cells[i].data.load(memory_model::memory_order_relaxed).all()) {
// Cell is not empty, go next
m_Stat.onPushPopulated();
}
else {
// Empty cell found, try to enqueue here
- cell nullCell;
- if ( pTailSegment->cells[i].compare_exchange_strong( nullCell, cell( &val ),
+ regular_cell nullCell;
+ if ( pTailSegment->cells[i].data.compare_exchange_strong( nullCell, regular_cell( &val ),
memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
// Ok to push item
m_Stat.onPush();
return true;
}
- assert( nullCell.ptr() );
+ assert( nullCell.ptr());
m_Stat.onPushContended();
}
- } while ( gen.next() );
+ } while ( gen.next());
assert( nLoopCount == quasi_factor());
The disposer specified in \p Traits template argument is <b>not</b> called for returned item.
You should manually dispose the item:
- <code>
+ \code
struct my_disposer {
void operator()( foo * p )
{
// pItem is not longer needed and can be deleted
// Do it via gc::HP::retire
cds::gc::HP::template retire< my_disposer >( pItem );
- </code>
+ \endcode
*/
value_type * dequeue()
{
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns \p nullptr.
+ The function repeatedly calls \p dequeue() until it returns \p nullptr.
The disposer specified in \p Traits template argument is called for each removed item.
*/
void clear()
{
- clear_with( disposer() );
+ clear_with( disposer());
}
/// Clear the queue
void clear_with( Disposer )
{
typename gc::Guard itemGuard;
- while ( do_dequeue( itemGuard ) ) {
- assert( itemGuard.template get<value_type>() );
- gc::template retire<Disposer>( itemGuard.template get<value_type>() );
+ while ( do_dequeue( itemGuard )) {
+ assert( itemGuard.template get<value_type>());
+ gc::template retire<Disposer>( itemGuard.template get<value_type>());
itemGuard.clear();
}
}
typename gc::Guard segmentGuard;
segment * pHeadSegment = m_SegmentList.head( segmentGuard );
- permutation_generator gen( quasi_factor() );
+ permutation_generator gen( quasi_factor());
while ( true ) {
if ( !pHeadSegment ) {
// Queue is empty
}
bool bHadNullValue = false;
- cell item;
+ regular_cell item;
CDS_DEBUG_ONLY( size_t nLoopCount = 0 );
do {
typename permutation_generator::integer_type i = gen;
// Guard the item
// In segmented queue the cell cannot be reused
// So no loop is needed here to protect the cell
- item = pHeadSegment->cells[i].load( memory_model::memory_order_relaxed );
- itemGuard.assign( item.ptr() );
+ item = pHeadSegment->cells[i].data.load( memory_model::memory_order_relaxed );
+ itemGuard.assign( item.ptr());
// Check if this cell is empty, which means an element
// can be enqueued to this cell in the future
- if ( !item.ptr() )
+ if ( !item.ptr())
bHadNullValue = true;
else {
// If the item is not deleted yet
- if ( !item.bits() ) {
+ if ( !item.bits()) {
// Try to mark the cell as deleted
- if ( pHeadSegment->cells[i].compare_exchange_strong( item, item | 1,
+ if ( pHeadSegment->cells[i].data.compare_exchange_strong( item, item | 1,
memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
--m_ItemCounter;
return true;
}
- assert( item.bits() );
+ assert( item.bits());
m_Stat.onPopContended();
}
}
- } while ( gen.next() );
+ } while ( gen.next());
- assert( nLoopCount == quasi_factor() );
+ assert( nLoopCount == quasi_factor());
// scanning the entire segment without finding a candidate to dequeue
// If there was an empty cell, the queue is considered empty
# pragma warning( pop )
#endif
-#endif // #ifndef __CDS_INTRUSIVE_SEGMENTED_QUEUE_H
+#endif // #ifndef CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H