3 #ifndef __CDS_INTRUSIVE_BASKET_QUEUE_H
4 #define __CDS_INTRUSIVE_BASKET_QUEUE_H
6 #include <cds/intrusive/base.h>
7 #include <cds/details/marked_ptr.h>
8 #include <cds/intrusive/queue_stat.h>
9 #include <cds/intrusive/single_link_struct.h>
11 #include <cds/intrusive/details/dummy_node_holder.h>
13 #include <cds/details/std/type_traits.h>
15 namespace cds { namespace intrusive {
17 /// BasketQueue -related definitions
18 /** @ingroup cds_intrusive_helper
20 namespace basket_queue {
24 - GC - garbage collector used
25 - Tag - a tag used to distinguish between different implementation
27 template <class GC, typename Tag = opt::none>
28 struct node: public GC::container_node
30 typedef GC gc ; ///< Garbage collector
31 typedef Tag tag ; ///< tag
33 typedef cds::details::marked_ptr<node, 1> marked_ptr ; ///< marked pointer
34 typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC
36 /// Rebind node for other template parameters
37 template <class GC2, typename Tag2 = tag>
39 typedef node<GC2, Tag2> other ; ///< Rebinding result
42 atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container
50 // Specialization for HRC GC
51 template <typename Tag>
52 struct node< gc::HRC, Tag>: public gc::HRC::container_node
54 typedef gc::HRC gc ; ///< Garbage collector
55 typedef Tag tag ; ///< tag
57 typedef cds::details::marked_ptr<node, 1> marked_ptr ; ///< marked pointer
58 typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC
60 atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container
67 virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC )
69 assert( pGC != nullptr );
70 typename gc::template GuardArray<2> aGuards( *pGC );
73 marked_ptr pNext = aGuards.protect( 0, m_pNext );
74 if ( pNext.ptr() && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
75 marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
76 m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
85 virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
88 marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed);
89 do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
92 m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
98 using single_link::default_hook;
101 template < typename HookType, CDS_DECL_OPTIONS2>
104 typedef typename opt::make_options< default_hook, CDS_OPTIONS2>::type options;
105 typedef typename options::gc gc;
106 typedef typename options::tag tag;
107 typedef node<gc, tag> node_type;
108 typedef HookType hook_type;
116 - opt::gc - garbage collector used.
119 template < CDS_DECL_OPTIONS2 >
120 struct base_hook: public hook< opt::base_hook_tag, CDS_OPTIONS2 >
125 \p MemberOffset defines offset in bytes of \ref node member into your structure.
126 Use \p offsetof macro to define \p MemberOffset
129 - opt::gc - garbage collector used.
132 template < size_t MemberOffset, CDS_DECL_OPTIONS2 >
133 struct member_hook: public hook< opt::member_hook_tag, CDS_OPTIONS2 >
136 static const size_t c_nMemberOffset = MemberOffset;
142 \p NodeTraits defines type traits for node.
143 See \ref node_traits for \p NodeTraits interface description
146 - opt::gc - garbage collector used.
149 template <typename NodeTraits, CDS_DECL_OPTIONS2 >
150 struct traits_hook: public hook< opt::traits_hook_tag, CDS_OPTIONS2 >
153 typedef NodeTraits node_traits;
158 #if defined(CDS_CXX11_TEMPLATE_ALIAS_SUPPORT) && !defined(CDS_DOXYGEN_INVOKED)
159 template < typename Node, opt::link_check_type LinkType > using get_link_checker = single_link::get_link_checker< Node, LinkType >;
161 /// Metafunction for selecting appropriate link checking policy
162 template < typename Node, opt::link_check_type LinkType >
163 struct get_link_checker: public single_link::get_link_checker< Node, LinkType >
168 /// Basket queue internal statistics. May be used for debugging or profiling
170 Basket queue statistics derives from cds::intrusive::queue_stat
171 and extends it by two additional fields specific for the algorithm.
173 template <typename Counter = cds::atomicity::event_counter >
174 struct stat: public cds::intrusive::queue_stat< Counter >
177 typedef cds::intrusive::queue_stat< Counter > base_class;
178 typedef typename base_class::counter_type counter_type;
181 counter_type m_TryAddBasket ; ///< Count of attemps adding new item to a basket (only or BasketQueue, for other queue this metric is not used)
182 counter_type m_AddBasketCount ; ///< Count of events "Enqueue a new item into basket" (only or BasketQueue, for other queue this metric is not used)
184 /// Register an attempt t add new item to basket
185 void onTryAddBasket() { ++m_TryAddBasket; }
186 /// Register event "Enqueue a new item into basket" (only or BasketQueue, for other queue this metric is not used)
187 void onAddBasket() { ++m_AddBasketCount; }
193 m_TryAddBasket.reset();
194 m_AddBasketCount.reset();
197 stat& operator +=( stat const& s )
199 base_class::operator +=( s );
200 m_TryAddBasket += s.m_TryAddBasket.get();
201 m_AddBasketCount += s.m_AddBasketCount.get();
207 /// Dummy basket queue statistics - no counting is performed. Support interface like \ref stat
208 struct dummy_stat: public cds::intrusive::queue_dummy_stat
211 void onTryAddBasket() {}
212 void onAddBasket() {}
215 dummy_stat& operator +=( dummy_stat const& )
222 } // namespace basket_queue
224 /// Basket lock-free queue (intrusive variant)
225 /** @ingroup cds_intrusive_queue
226 Implementation of basket queue algorithm.
229 [2007] Moshe Hoffman, Ori Shalev, Nir Shavit "The Baskets Queue"
233 In the
\93basket
\94 approach, instead of
234 the traditional ordered list of nodes, the queue consists of an ordered list of groups
235 of nodes (logical baskets). The order of nodes in each basket need not be specified, and in
236 fact, it is easiest to maintain them in FIFO order. The baskets fulfill the following basic
238 - Each basket has a time interval in which all its nodes
\92 enqueue operations overlap.
239 - The baskets are ordered by the order of their respective time intervals.
240 - For each basket, its nodes
\92 dequeue operations occur after its time interval.
241 - The dequeue operations are performed according to the order of baskets.
243 Two properties define the FIFO order of nodes:
244 - The order of nodes in a basket is not specified.
245 - The order of nodes in different baskets is the FIFO-order of their respective baskets.
247 In algorithms such as the MS-queue or optimistic
248 queue, threads enqueue items by applying a Compare-and-swap (CAS) operation to the
249 queue
\92s tail pointer, and all the threads that fail on a particular CAS operation (and also
250 the winner of that CAS) overlap in time. In particular, they share the time interval of
251 the CAS operation itself. Hence, all the threads that fail to CAS on the tail-node of
252 the queue may be inserted into the same basket. By integrating the basket-mechanism
253 as the back-off mechanism, the time usually spent on backing-off before trying to link
254 onto the new tail, can now be utilized to insert the failed operations into the basket,
255 allowing enqueues to complete sooner. In the meantime, the next successful CAS operations
256 by enqueues allow new baskets to be formed down the list, and these can be
257 filled concurrently. Moreover, the failed operations don
\92t retry their link attempt on the
258 new tail, lowering the overall contention on it. This leads to a queue
259 algorithm that unlike all former concurrent queue algorithms requires virtually no tuning
260 of the backoff mechanisms to reduce contention, making the algorithm an attractive
261 out-of-the-box queue.
263 In order to enqueue, just as in MSQueue, a thread first tries to link the new node to
264 the last node. If it failed to do so, then another thread has already succeeded. Thus it
265 tries to insert the new node into the new basket that was created by the winner thread.
266 To dequeue a node, a thread first reads the head of the queue to obtain the
267 oldest basket. It may then dequeue any node in the oldest basket.
269 <b>Template arguments:</b>
270 - \p GC - garbage collector type: gc::HP, gc::HRC, gc::PTB
271 - \p T - type to be stored in the queue, should be convertible to \ref single_link::node
272 - \p Options - options
274 <b>Type of node</b>: \ref single_link::node
277 - opt::hook - hook used. Possible values are: basket_queue::base_hook, basket_queue::member_hook, basket_queue::traits_hook.
278 If the option is not specified, <tt>basket_queue::base_hook<></tt> is used.
279 For Gidenstam's gc::HRC, only basket_queue::base_hook is supported.
280 - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used.
281 - opt::disposer - the functor used for dispose removed items. Default is opt::v::empty_disposer. This option is used
282 in \ref dequeue function.
283 - opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link
284 Note: for gc::HRC garbage collector, link checking policy is always selected as \ref opt::always_check_link.
285 - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter (no item counting feature)
286 - opt::stat - the type to gather internal statistics.
287 Possible option value are: \ref basket_queue::stat, \ref basket_queue::dummy_stat,
288 user-provided class that supports basket_queue::stat interface.
289 Default is \ref basket_queue::dummy_stat.
290 Generic option intrusive::queue_stat and intrusive::queue_dummy_stat are acceptable too, however,
291 they will be automatically converted to basket_queue::stat and basket_queue::dummy_stat
293 - opt::alignment - the alignment for internal queue data. Default is opt::cache_line_alignment
294 - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default)
295 or opt::v::sequential_consistent (sequentially consisnent memory model).
297 Garbage collecting schema \p GC must be consistent with the basket_queue::node GC.
299 \par About item disposing
300 Like MSQueue, the Baskets queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from
301 the standpoint of the algo. See \ref dequeue function doc for explanation.
305 #include <cds/intrusive/basket_queue.h>
306 #include <cds/gc/hp.h>
308 namespace ci = cds::inrtusive;
309 typedef cds::gc::HP hp_gc;
311 // Basket queue with Hazard Pointer garbage collector, base hook + item disposer:
312 struct Foo: public ci::basket_queue::node< hp_gc >
318 // Disposer for Foo struct just deletes the object passed in
320 void operator()( Foo * p )
326 typedef ci::BasketQueue< hp_gc,
329 ci::basket_queue::base_hook< ci::opt::gc<hp_gc> >
331 ,ci::opt::disposer< fooDisposer >
334 // BasketQueue with Hazard Pointer garbage collector,
335 // member hook + item disposer + item counter,
336 // without alignment of internal queue data:
341 ci::basket_queue::node< hp_gc > hMember;
344 typedef ci::BasketQueue< hp_gc,
347 ci::basket_queue::member_hook<
348 offsetof(Bar, hMember)
352 ,ci::opt::disposer< fooDisposer >
353 ,cds::opt::item_counter< cds::atomicity::item_counter >
354 ,cds::opt::alignment< cds::opt::no_special_alignment >
358 template <typename GC, typename T, CDS_DECL_OPTIONS9>
362 struct default_options
364 typedef cds::backoff::empty back_off;
365 typedef basket_queue::base_hook<> hook;
366 typedef opt::v::empty_disposer disposer;
367 typedef atomicity::empty_item_counter item_counter;
368 typedef basket_queue::dummy_stat stat;
369 typedef opt::v::relaxed_ordering memory_model;
370 static const opt::link_check_type link_checker = opt::debug_check_link;
371 enum { alignment = opt::cache_line_alignment };
377 typedef typename opt::make_options<
378 typename cds::opt::find_type_traits< default_options, CDS_OPTIONS9 >::type
382 typedef typename std::conditional<
383 std::is_same<typename options::stat, cds::intrusive::queue_stat<> >::value
384 ,basket_queue::stat<>
385 ,typename std::conditional<
386 std::is_same<typename options::stat, cds::intrusive::queue_dummy_stat>::value
387 ,basket_queue::dummy_stat
388 ,typename options::stat
395 typedef T value_type ; ///< type of value stored in the queue
396 typedef typename options::hook hook ; ///< hook type
397 typedef typename hook::node_type node_type ; ///< node type
398 typedef typename options::disposer disposer ; ///< disposer used
399 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits
400 typedef typename basket_queue::get_link_checker< node_type, options::link_checker >::type link_checker ; ///< link checker
402 typedef GC gc ; ///< Garbage collector
403 typedef typename options::back_off back_off ; ///< back-off strategy
404 typedef typename options::item_counter item_counter ; ///< Item counting policy used
405 #ifdef CDS_DOXYGEN_INVOKED
406 typedef typename options::stat stat ; ///< Internal statistics policy used
408 typedef stat_type_ stat;
410 typedef typename options::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option
412 /// Rebind template arguments
413 template <typename GC2, typename T2, CDS_DECL_OTHER_OPTIONS9>
415 typedef BasketQueue< GC2, T2, CDS_OTHER_OPTIONS9> other ; ///< Rebinding result
418 static const size_t m_nHazardPtrCount = 6 ; ///< Count of hazard pointer required for the algorithm
423 struct internal_disposer
425 void operator()( value_type * p )
427 assert( p != nullptr );
429 BasketQueue::clear_links( node_traits::to_node_ptr(p) );
434 typedef typename node_type::marked_ptr marked_ptr;
435 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
437 typedef intrusive::node_to_value<BasketQueue> node_to_value;
438 typedef typename opt::details::alignment_setter< atomic_marked_ptr, options::alignment >::type aligned_node_ptr;
439 typedef typename opt::details::alignment_setter<
440 cds::intrusive::details::dummy_node< gc, node_type>,
442 >::type dummy_node_type;
446 aligned_node_ptr m_pHead ; ///< Queue's head pointer (aligned)
447 aligned_node_ptr m_pTail ; ///< Queue's tail pointer (aligned)
449 dummy_node_type m_Dummy ; ///< dummy node
450 item_counter m_ItemCounter ; ///< Item counter
451 stat m_Stat ; ///< Internal statistics
453 size_t const m_nMaxHops;
458 template <size_t Count>
459 static marked_ptr guard_node( typename gc::template GuardArray<Count>& g, size_t idx, atomic_marked_ptr const& p )
463 pg = p.load( memory_model::memory_order_relaxed );
464 g.assign( idx, node_traits::to_value_ptr( pg.ptr() ) );
465 if ( p.load( memory_model::memory_order_acquire) == pg ) {
471 static marked_ptr guard_node( typename gc::Guard& g, atomic_marked_ptr const& p )
475 pg = p.load( memory_model::memory_order_relaxed );
476 g.assign( node_traits::to_value_ptr( pg.ptr() ) );
477 if ( p.load( memory_model::memory_order_acquire) == pg ) {
483 struct dequeue_result {
484 typename gc::template GuardArray<3> guards;
488 bool do_dequeue( dequeue_result& res, bool bDeque )
491 // If bDeque == false then the function is called from empty method and no real dequeuing operation is performed
500 h = guard_node( res.guards, 0, m_pHead );
501 t = guard_node( res.guards, 1, m_pTail );
502 pNext = guard_node( res.guards, 2, h->m_pNext );
504 if ( h == m_pHead.load( memory_model::memory_order_acquire ) ) {
505 if ( h.ptr() == t.ptr() ) {
510 typename gc::Guard g;
511 while ( pNext->m_pNext.load(memory_model::memory_order_relaxed).ptr() && m_pTail.load(memory_model::memory_order_relaxed) == t ) {
512 pNext = guard_node( g, pNext->m_pNext );
513 res.guards.assign( 2, g.template get<value_type>() );
517 m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed );
520 marked_ptr iter( h );
523 typename gc::Guard g;
525 while ( pNext.ptr() && pNext.bits() && iter.ptr() != t.ptr() && m_pHead.load(memory_model::memory_order_relaxed) == h ) {
527 g.assign( res.guards.template get<value_type>(2) );
528 pNext = guard_node( res.guards, 2, pNext->m_pNext );
532 if ( m_pHead.load(memory_model::memory_order_relaxed) != h )
535 if ( iter.ptr() == t.ptr() )
536 free_chain( h, iter );
538 res.pNext = pNext.ptr();
540 if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) {
541 if ( hops >= m_nMaxHops )
542 free_chain( h, pNext );
552 m_Stat.onDequeueRace();
564 void free_chain( marked_ptr head, marked_ptr newHead )
566 // "head" and "newHead" are guarded
568 if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
570 typename gc::template GuardArray<2> guards;
571 guards.assign( 0, node_traits::to_value_ptr(head.ptr()) );
572 while ( head.ptr() != newHead.ptr() ) {
573 marked_ptr pNext = guard_node( guards, 1, head->m_pNext );
574 assert( pNext.bits() != 0 );
575 dispose_node( head.ptr() );
576 guards.assign( 0, guards.template get<value_type>(1) );
582 static void clear_links( node_type * pNode )
584 pNode->m_pNext.store( marked_ptr( nullptr ), memory_model::memory_order_release );
587 void dispose_node( node_type * p )
589 if ( p != m_Dummy.get() ) {
590 gc::template retire<internal_disposer>( node_traits::to_value_ptr(p) );
598 /// Initializes empty queue
604 // GC and node_type::gc must be the same
605 static_assert(( std::is_same<gc, typename node_type::gc>::value ), "GC and node_type::gc must be the same");
607 // For cds::gc::HRC, only one base_hook is allowed
610 std::is_same<gc, cds::gc::HRC>::value,
611 std::is_same< typename hook::hook_type, opt::base_hook_tag >,
614 ), "For cds::gc::HRC, only base_hook is allowed");
616 // Head/tail initialization should be made via store call
617 // because of gc::HRC manages reference counting
618 m_pHead.store( marked_ptr(m_Dummy.get()), memory_model::memory_order_relaxed );
619 m_pTail.store( marked_ptr(m_Dummy.get()), memory_model::memory_order_relaxed );
622 /// Destructor clears the queue
624 Since the baskets queue contains at least one item even
625 if the queue is empty, the destructor may call item disposer.
631 node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed).ptr();
632 assert( pHead != nullptr );
635 node_type * pNext = pHead->m_pNext.load( memory_model::memory_order_relaxed ).ptr();
637 node_type * p = pNext;
638 pNext = pNext->m_pNext.load( memory_model::memory_order_relaxed ).ptr();
639 p->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed );
642 pHead->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed );
643 //m_pTail.store( marked_ptr( pHead ), memory_model::memory_order_relaxed );
646 m_pHead.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed );
647 m_pTail.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed );
649 dispose_node( pHead );
652 /// Returns queue's item count
654 The value returned depends on opt::item_counter option. For atomicity::empty_item_counter,
655 this function always returns 0.
657 <b>Warning</b>: even if you use real item counter and it returns 0, this fact is not mean that the queue
658 is empty. To check queue emptyness use \ref empty() method.
662 return m_ItemCounter.value();
665 /// Returns reference to internal statistics
666 const stat& statistics() const
671 /// Enqueues \p val value into the queue.
672 /** @anchor cds_intrusive_BasketQueue_enqueue
673 The function always returns \p true.
675 bool enqueue( value_type& val )
677 node_type * pNew = node_traits::to_node_ptr( val );
678 link_checker::is_empty( pNew );
680 typename gc::Guard guard;
685 t = guard_node( guard, m_pTail );
687 marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_acquire );
689 if ( pNext.ptr() == nullptr ) {
690 pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release );
691 if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) {
692 if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
693 m_Stat.onAdvanceTailFailed();
697 // Try adding to basket
698 m_Stat.onTryAddBasket();
701 typename gc::Guard gNext;
704 pNext = guard_node( gNext, t->m_pNext );
707 if ( m_pTail.load(memory_model::memory_order_relaxed) == t
708 && t->m_pNext.load( memory_model::memory_order_relaxed) == pNext
712 pNew->m_pNext.store( pNext, memory_model::memory_order_release );
713 if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) {
714 m_Stat.onAddBasket();
721 // Tail is misplaced, advance it
723 typename gc::template GuardArray<2> g;
724 g.assign( 0, node_traits::to_value_ptr( pNext.ptr() ) );
725 if ( t->m_pNext.load( memory_model::memory_order_relaxed ) != pNext ) {
726 m_Stat.onEnqueueRace();
733 while ( (p = pNext->m_pNext.load( memory_model::memory_order_relaxed )).ptr() != nullptr )
735 bTailOk = m_pTail.load( memory_model::memory_order_relaxed ) == t;
739 g.assign( 1, node_traits::to_value_ptr( p.ptr() ));
740 if ( pNext->m_pNext.load(memory_model::memory_order_relaxed) != p )
743 g.assign( 0, g.template get<value_type>( 1 ) );
745 if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr() ), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
746 m_Stat.onAdvanceTailFailed();
751 m_Stat.onEnqueueRace();
760 /// Dequeues a value from the queue
761 /** @anchor cds_intrusive_BasketQueue_dequeue
762 If the queue is empty the function returns \p NULL.
764 <b>Warning</b>: see MSQueue::deque note about item disposing
766 value_type * dequeue()
770 if ( do_dequeue( res, true ))
771 return node_traits::to_value_ptr( *res.pNext );
775 /// Synonym for \ref cds_intrusive_BasketQueue_enqueue "enqueue" function
776 bool push( value_type& val )
778 return enqueue( val );
781 /// Synonym for \ref cds_intrusive_BasketQueue_dequeue "dequeue" function
787 /// Checks if the queue is empty
789 Note that this function is not \p const.
790 The function is based on \ref dequeue algorithm
791 but really does not dequeued any item.
796 return !do_dequeue( res, false );
801 The function repeatedly calls \ref dequeue until it returns \p NULL.
802 The disposer defined in template \p Options is called for each item
803 that can be safely disposed.
811 }} // namespace cds::intrusive
813 #endif // #ifndef __CDS_INTRUSIVE_BASKET_QUEUE_H