-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
-#ifndef __CDS_INTRUSIVE_MSQUEUE_H
-#define __CDS_INTRUSIVE_MSQUEUE_H
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_MSQUEUE_H
+#define CDSLIB_INTRUSIVE_MSQUEUE_H
#include <type_traits>
#include <cds/intrusive/details/single_link_struct.h>
-#include <cds/cxx11_atomic.h>
+#include <cds/algo/atomic.h>
namespace cds { namespace intrusive {
- Tag - a \ref cds_intrusive_hook_tag "tag"
*/
template <class GC, typename Tag = opt::none >
- using node = cds::intrusive::single_link::node< GC, Tag > ;
+ using node = cds::intrusive::single_link::node< GC, Tag >;
/// Base hook
/**
counter_type m_DequeueRace ; ///< Count of dequeue race conditions encountered
counter_type m_AdvanceTailError ; ///< Count of "advance tail failed" events
counter_type m_BadTail ; ///< Count of events "Tail is not pointed to the last item in the queue"
+ counter_type m_EmptyDequeue ; ///< Count of dequeue from empty queue
/// Register enqueue call
void onEnqueue() { ++m_EnqueueCount; }
void onAdvanceTailFailed() { ++m_AdvanceTailError; }
/// Register event "Tail is not pointed to last item in the queue"
void onBadTail() { ++m_BadTail; }
+ /// Register dequeuing from empty queue
+ void onEmptyDequeue() { ++m_EmptyDequeue; }
//@cond
void reset()
m_DequeueRace.reset();
m_AdvanceTailError.reset();
m_BadTail.reset();
+ m_EmptyDequeue.reset();
}
stat& operator +=( stat const& s )
m_DequeueRace += s.m_DequeueRace.get();
m_AdvanceTailError += s.m_AdvanceTailError.get();
m_BadTail += s.m_BadTail.get();
+ m_EmptyDequeue += s.m_EmptyDequeue.get();
return *this;
}
struct empty_stat
{
//@cond
- void onEnqueue() {}
- void onDequeue() {}
- void onEnqueueRace() {}
- void onDequeueRace() {}
- void onAdvanceTailFailed() {}
- void onBadTail() {}
+ void onEnqueue() const {}
+ void onDequeue() const {}
+ void onEnqueueRace() const {}
+ void onDequeueRace() const {}
+ void onAdvanceTailFailed() const {}
+ void onBadTail() const {}
+ void onEmptyDequeue() const {}
void reset() {}
- empty_stat& operator +=( empty_stat const& s )
+ empty_stat& operator +=( empty_stat const& )
{
return *this;
}
//@endcond
};
- /// MSQueue default type traits
+ /// MSQueue default traits
struct traits
{
/// Back-off strategy
typedef msqueue::empty_stat stat;
/// C++ memory ordering model
- /**
+ /**
Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
*/
/// Link checking, see \p cds::opt::link_checker
static CDS_CONSTEXPR const opt::link_check_type link_checker = opt::debug_check_link;
- /// Alignment for internal queue data. Default is \p opt::cache_line_alignment
- enum { alignment = opt::cache_line_alignment };
+ /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding
+ enum { padding = opt::cache_line_padding };
};
/// Metafunction converting option list to \p msqueue::traits
/**
Supported \p Options are:
- - opt::hook - hook used. Possible hooks are: \p msqueue::base_hook, \p msqueue::member_hook, \p msqueue::traits_hook.
+ - \p opt::hook - hook used. Possible hooks are: \p msqueue::base_hook, \p msqueue::member_hook, \p msqueue::traits_hook.
If the option is not specified, \p %msqueue::base_hook<> is used.
- - opt::back_off - back-off strategy used, default is \p cds::backoff::empty.
- - opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used
+ - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty.
+ - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used
when dequeuing.
- - opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link
- - opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled)
+ - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link
+ - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled)
To enable item counting use \p cds::atomicity::item_counter
- - opt::stat - the type to gather internal statistics.
+ - \p opt::stat - the type to gather internal statistics.
Possible statistics types are: \p msqueue::stat, \p msqueue::empty_stat, user-provided class that supports \p %msqueue::stat interface.
Default is \p %msqueue::empty_stat (internal statistics disabled).
- - opt::alignment - the alignment for internal queue data. Default is \p opt::cache_line_alignment
- - opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
+ - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding
+ - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
Example: declare \p %MSQueue with item counting and internal statistics
\code
- typedef cds::intrusive::MSQueue< cds::gc::HP, Foo,
+ typedef cds::intrusive::MSQueue< cds::gc::HP, Foo,
typename cds::intrusive::msqueue::make_traits<
cds::intrusive::opt:hook< cds::intrusive::msqueue::base_hook< cds::opt::gc<cds:gc::HP> >>,
cds::opt::item_counte< cds::atomicity::item_counter >,
typedef cds::intrusive::MSQueue< cds::gc::HP, Foo, myTraits > myQueue;
// Equivalent make_traits example:
- typedef cds::intrusive::MSQueue< cds::gc::HP, Foo,
- typename cds::intrusive::msqueue::make_traits<
+ typedef cds::intrusive::MSQueue< cds::gc::HP, Foo,
+ typename cds::intrusive::msqueue::make_traits<
cds::opt::stat< cds::intrusive::msqueue::stat<> >,
cds::opt::item_counter< cds::atomicity::item_counter >
>::type
// Example 2:
// MSQueue with Hazard Pointer garbage collector,
// member hook + item disposer + item counter,
- // without alignment of internal queue data
+ // without padding of internal queue data
// Use msqueue::make_traits
struct Bar
{
>
,ci::opt::disposer< fooDisposer >
,cds::opt::item_counter< cds::atomicity::item_counter >
- ,cds::opt::alignment< cds::opt::no_special_alignment >
+ ,cds::opt::padding< cds::opt::no_special_padding >
>::type
> barQueue;
\endcode
*/
- template <typename GC, typename T, typename Traits>
+ template <typename GC, typename T, typename Traits = msqueue::traits>
class MSQueue
{
public:
typedef MSQueue< GC2, T2, Traits2 > other; ///< Rebinding result
};
- static CDS_CONSTEXPR const size_t m_nHazardPtrCount = 2; ///< Count of hazard pointer required for the algorithm
+ static CDS_CONSTEXPR const size_t c_nHazardPtrCount = 2; ///< Count of hazard pointer required for the algorithm
protected:
//@cond
// GC and node_type::gc must be the same
static_assert((std::is_same<gc, typename node_type::gc>::value), "GC and node_type::gc must be the same");
- typedef intrusive::node_to_value<MSQueue> node_to_value;
- typedef typename opt::details::alignment_setter< typename node_type::atomic_node_ptr, traits::alignment >::type aligned_node_ptr;
- typedef typename opt::details::alignment_setter< node_type, traits::alignment >::type dummy_node_type;
+ typedef typename node_type::atomic_node_ptr atomic_node_ptr;
- aligned_node_ptr m_pHead ; ///< Queue's head pointer
- aligned_node_ptr m_pTail ; ///< Queue's tail pointer
- dummy_node_type m_Dummy ; ///< dummy node
- item_counter m_ItemCounter ; ///< Item counter
- stat m_Stat ; ///< Internal statistics
+ atomic_node_ptr m_pHead; ///< Queue's head pointer
+ typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad1_;
+ atomic_node_ptr m_pTail; ///< Queue's tail pointer
+ typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad2_;
+ node_type m_Dummy; ///< dummy node
+ typename opt::details::apply_padding< node_type, traits::padding >::padding_type pad3_;
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@endcond
//@cond
node_type * h;
while ( true ) {
- h = res.guards.protect( 0, m_pHead, node_to_value() );
- pNext = h->m_pNext.load( memory_model::memory_order_relaxed );
- res.guards.assign( 1, node_to_value()( pNext ));
+ h = res.guards.protect( 0, m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
+ pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
if ( m_pHead.load(memory_model::memory_order_acquire) != h )
continue;
- if ( pNext == nullptr )
- return false ; // empty queue
+ if ( pNext == nullptr ) {
+ m_Stat.onEmptyDequeue();
+ return false; // empty queue
+ }
node_type * t = m_pTail.load(memory_model::memory_order_acquire);
if ( h == t ) {
continue;
}
- if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ))
+ if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
break;
m_Stat.onDequeueRace();
void operator()( value_type * p ) const
{
assert( p != nullptr );
- MSQueue::clear_links( node_traits::to_node_ptr( p ) );
+ MSQueue::clear_links( node_traits::to_node_ptr( p ));
disposer()(p);
}
};
if ( p != &m_Dummy )
- gc::template retire<disposer_thunk>( node_traits::to_value_ptr( p ) );
+ gc::template retire<disposer_thunk>( node_traits::to_value_ptr( p ));
}
//@endcond
node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed);
assert( pHead != nullptr );
- assert( pHead == m_pTail.load(memory_model::memory_order_relaxed) );
+ assert( pHead == m_pTail.load(memory_model::memory_order_relaxed));
m_pHead.store( nullptr, memory_model::memory_order_relaxed );
m_pTail.store( nullptr, memory_model::memory_order_relaxed );
node_type * t;
while ( true ) {
- t = guard.protect( m_pTail, node_to_value() );
+ t = guard.protect( m_pTail, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire);
if ( pNext != nullptr ) {
++m_ItemCounter;
m_Stat.onEnqueue();
- if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
+ if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ))
m_Stat.onAdvanceTailFailed();
return true;
}
bool empty() const
{
typename gc::Guard guard;
- return guard.protect( m_pHead, node_to_value() )->m_pNext.load( memory_model::memory_order_relaxed ) == nullptr;
+ node_type * p = guard.protect( m_pHead, []( node_type * pNode ) -> value_type * { return node_traits::to_value_ptr( pNode );});
+ return p->m_pNext.load( memory_model::memory_order_relaxed ) == nullptr;
}
/// Clear the queue
*/
void clear()
{
- while ( dequeue() );
+ while ( dequeue());
}
/// Returns queue's item count
}} // namespace cds::intrusive
-#endif // #ifndef __CDS_INTRUSIVE_MSQUEUE_H
+#endif // #ifndef CDSLIB_INTRUSIVE_MSQUEUE_H