-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
-#ifndef __CDS_INTRUSIVE_SEGMENTED_QUEUE_H
-#define __CDS_INTRUSIVE_SEGMENTED_QUEUE_H
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H
+#define CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H
#include <mutex>
#include <cds/intrusive/details/base.h>
#include <cds/details/marked_ptr.h>
#include <cds/algo/int_algo.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#include <cds/opt/permutation.h>
#include <boost/intrusive/slist.hpp>
typedef CDS_DEFAULT_ALLOCATOR allocator;
/// Lock type used to maintain an internal list of allocated segments
- typedef cds::lock::Spin lock_type;
+ typedef cds::sync::spin lock_type;
/// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor)
typedef cds::opt::v::random2_permutation<int> permutation_generator;
all other \p %segmented_queue::traits members left unchanged.
\p Options are:
- - \p opt::disposer - the functor used for dispose removed items.
+ - \p opt::disposer - the functor used to dispose removed items.
- \p opt::stat - internal statistics, possible type: \p segmented_queue::stat, \p segmented_queue::empty_stat (the default)
- \p opt::item_counter - item counting feature. Note that \p atomicity::empty_item_counetr is not suitable
for segmented queue.
typedef typename traits::lock_type lock_type; ///< Type of mutex for maintaining an internal list of allocated segments.
typedef typename traits::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor)
- static const size_t m_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm
+ static const size_t c_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm
protected:
//@cond
// cell array is placed here in one continuous memory block
// Initializes the segment
- segment( size_t nCellCount )
+ explicit segment( size_t nCellCount )
// MSVC warning C4355: 'this': used in base member initializer list
: cells( reinterpret_cast< cell *>( this + 1 ))
, version( 0 )
~segment_list()
{
- m_List.clear_and_dispose( gc_segment_disposer() );
+ m_List.clear_and_dispose( gc_segment_disposer());
}
segment * head( typename gc::Guard& guard )
// The lock should be held
cell const * pLastCell = s.cells + quasi_factor();
for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
- if ( !pCell->data.load( memory_model::memory_order_relaxed ).all() )
+ if ( !pCell->data.load( memory_model::memory_order_relaxed ).all())
return false;
}
return true;
// The lock should be held
cell const * pLastCell = s.cells + quasi_factor();
for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
- if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits() )
+ if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits())
return false;
}
return true;
if ( !m_List.empty() && ( pTail != &m_List.back() || get_version(pTail) != m_List.back().version )) {
m_pTail.store( &m_List.back(), memory_model::memory_order_relaxed );
- return guard.assign( &m_List.back() );
+ return guard.assign( &m_List.back());
}
- assert( m_List.empty() || populated( m_List.back() ));
+# ifdef _DEBUG
+ assert( m_List.empty() || populated( m_List.back()));
+# endif
segment * pNew = allocate_segment();
m_Stat.onSegmentCreated();
- if ( m_List.empty() )
- m_pHead.store( pNew, memory_model::memory_order_relaxed );
+ if ( m_List.empty())
+ m_pHead.store( pNew, memory_model::memory_order_release );
m_List.push_back( *pNew );
m_pTail.store( pNew, memory_model::memory_order_release );
return guard.assign( pNew );
{
scoped_lock l( m_Lock );
- if ( m_List.empty() ) {
+ if ( m_List.empty()) {
m_pTail.store( nullptr, memory_model::memory_order_relaxed );
m_pHead.store( nullptr, memory_model::memory_order_relaxed );
return guard.assign( nullptr );
if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) {
m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed );
- return guard.assign( &m_List.front() );
+ return guard.assign( &m_List.front());
}
- assert( exhausted(m_List.front()) );
+# ifdef _DEBUG
+ assert( exhausted( m_List.front()));
+# endif
m_List.pop_front();
- if ( m_List.empty() ) {
+ if ( m_List.empty()) {
pRet = guard.assign( nullptr );
m_pTail.store( nullptr, memory_model::memory_order_relaxed );
}
else
- pRet = guard.assign( &m_List.front() );
+ pRet = guard.assign( &m_List.front());
m_pHead.store( pRet, memory_model::memory_order_release );
}
segment * allocate_segment()
{
- return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor,
- quasi_factor() );
+ return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, quasi_factor());
}
static void free_segment( segment * pSegment )
assert( pTailSegment );
}
- permutation_generator gen( quasi_factor() );
+ permutation_generator gen( quasi_factor());
// First, increment item counter.
// We sure that the item will be enqueued
do {
typename permutation_generator::integer_type i = gen;
CDS_DEBUG_ONLY( ++nLoopCount );
- if ( pTailSegment->cells[i].data.load(memory_model::memory_order_relaxed).all() ) {
+ if ( pTailSegment->cells[i].data.load(memory_model::memory_order_relaxed).all()) {
// Cell is not empty, go next
m_Stat.onPushPopulated();
}
m_Stat.onPush();
return true;
}
- assert( nullCell.ptr() );
+ assert( nullCell.ptr());
m_Stat.onPushContended();
}
- } while ( gen.next() );
+ } while ( gen.next());
assert( nLoopCount == quasi_factor());
The disposer specified in \p Traits template argument is <b>not</b> called for returned item.
You should manually dispose the item:
- <code>
+ \code
struct my_disposer {
void operator()( foo * p )
{
// pItem is not longer needed and can be deleted
// Do it via gc::HP::retire
cds::gc::HP::template retire< my_disposer >( pItem );
- </code>
+ \endcode
*/
value_type * dequeue()
{
/// Clear the queue
/**
- The function repeatedly calls \ref dequeue until it returns \p nullptr.
+ The function repeatedly calls \p dequeue() until it returns \p nullptr.
The disposer specified in \p Traits template argument is called for each removed item.
*/
void clear()
{
- clear_with( disposer() );
+ clear_with( disposer());
}
/// Clear the queue
void clear_with( Disposer )
{
typename gc::Guard itemGuard;
- while ( do_dequeue( itemGuard ) ) {
- assert( itemGuard.template get<value_type>() );
- gc::template retire<Disposer>( itemGuard.template get<value_type>() );
+ while ( do_dequeue( itemGuard )) {
+ assert( itemGuard.template get<value_type>());
+ gc::template retire<Disposer>( itemGuard.template get<value_type>());
itemGuard.clear();
}
}
typename gc::Guard segmentGuard;
segment * pHeadSegment = m_SegmentList.head( segmentGuard );
- permutation_generator gen( quasi_factor() );
+ permutation_generator gen( quasi_factor());
while ( true ) {
if ( !pHeadSegment ) {
// Queue is empty
// In segmented queue the cell cannot be reused
// So no loop is needed here to protect the cell
item = pHeadSegment->cells[i].data.load( memory_model::memory_order_relaxed );
- itemGuard.assign( item.ptr() );
+ itemGuard.assign( item.ptr());
// Check if this cell is empty, which means an element
// can be enqueued to this cell in the future
- if ( !item.ptr() )
+ if ( !item.ptr())
bHadNullValue = true;
else {
// If the item is not deleted yet
- if ( !item.bits() ) {
+ if ( !item.bits()) {
// Try to mark the cell as deleted
if ( pHeadSegment->cells[i].data.compare_exchange_strong( item, item | 1,
memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
return true;
}
- assert( item.bits() );
+ assert( item.bits());
m_Stat.onPopContended();
}
}
- } while ( gen.next() );
+ } while ( gen.next());
- assert( nLoopCount == quasi_factor() );
+ assert( nLoopCount == quasi_factor());
// scanning the entire segment without finding a candidate to dequeue
// If there was an empty cell, the queue is considered empty
# pragma warning( pop )
#endif
-#endif // #ifndef __CDS_INTRUSIVE_SEGMENTED_QUEUE_H
+#endif // #ifndef CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H