Queue refactoring
authorkhizmax <libcds.dev@gmail.com>
Mon, 15 Jun 2015 20:54:10 +0000 (23:54 +0300)
committerkhizmax <libcds.dev@gmail.com>
Mon, 15 Jun 2015 20:54:10 +0000 (23:54 +0300)
cds/compiler/feature_tsan.h
cds/intrusive/basket_queue.h
cds/intrusive/moir_queue.h
cds/intrusive/msqueue.h
cds/intrusive/optimistic_queue.h

index f870ab584fac02c347a670123261fdaa847991a4..8ebcc857c43f34ea48064559d99f02b0c25e4b42 100644 (file)
@@ -22,7 +22,7 @@
 #   define CDS_TSAN_ANNOTATE_IGNORE_RW_END          \\r
                                                     CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;\\r
                                                     CDS_TSAN_ANNOTATE_IGNORE_READS_END\r
-#   define CDS_TSAN_ANNOTATE_NEW_MEMORY( addr, sz ) AnnotateNewMemory( __FILE__, __LINE__, addr, sz )\r
+#   define CDS_TSAN_ANNOTATE_NEW_MEMORY( addr, sz ) AnnotateNewMemory( (char *) __FILE__, __LINE__, reinterpret_cast<void *>(addr), sz )\r
 \r
     // provided by TSan\r
     extern "C" {\r
index f5d129a95274693655bb68c9211973a63ca02423..46796dc8fa1bb743cde56b4512eecc69ad0bc60c 100644 (file)
@@ -434,7 +434,6 @@ namespace cds { namespace intrusive {
         typedef typename node_type::marked_ptr   marked_ptr;
         typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
 
-        typedef intrusive::node_to_value<BasketQueue> node_to_value;
         typedef typename opt::details::alignment_setter< atomic_marked_ptr, traits::alignment >::type aligned_node_ptr;
         typedef typename opt::details::alignment_setter< node_type, traits::alignment >::type dummy_node_type;
 
@@ -453,31 +452,6 @@ namespace cds { namespace intrusive {
 
         //@cond
 
-        template <size_t Count>
-        static marked_ptr guard_node( typename gc::template GuardArray<Count>& g, size_t idx, atomic_marked_ptr const& p )
-        {
-            marked_ptr pg;
-            while ( true ) {
-                pg = p.load( memory_model::memory_order_relaxed );
-                g.assign( idx, node_traits::to_value_ptr( pg.ptr() ) );
-                if ( p.load( memory_model::memory_order_acquire) == pg ) {
-                    return pg;
-                }
-            }
-        }
-
-        static marked_ptr guard_node( typename gc::Guard& g, atomic_marked_ptr const& p )
-        {
-            marked_ptr pg;
-            while ( true ) {
-                pg = p.load( memory_model::memory_order_relaxed );
-                g.assign( node_traits::to_value_ptr( pg.ptr() ) );
-                if ( p.load( memory_model::memory_order_acquire) == pg ) {
-                    return pg;
-                }
-            }
-        }
-
         struct dequeue_result {
             typename gc::template GuardArray<3>  guards;
             node_type * pNext;
@@ -495,9 +469,9 @@ namespace cds { namespace intrusive {
             marked_ptr pNext;
 
             while ( true ) {
-                h = guard_node( res.guards, 0, m_pHead );
-                t = guard_node( res.guards, 1, m_pTail );
-                pNext = guard_node( res.guards, 2, h->m_pNext );
+                h = res.guards.protect( 0, m_pHead, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr() );});
+                t = res.guards.protect( 1, m_pTail, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr() );});
+                pNext = res.guards.protect( 2, h->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr() );});
 
                 if ( h == m_pHead.load( memory_model::memory_order_acquire ) ) {
                     if ( h.ptr() == t.ptr() ) {
@@ -509,8 +483,8 @@ namespace cds { namespace intrusive {
                         {
                             typename gc::Guard g;
                             while ( pNext->m_pNext.load(memory_model::memory_order_relaxed).ptr() && m_pTail.load(memory_model::memory_order_relaxed) == t ) {
-                                pNext = guard_node( g, pNext->m_pNext );
-                                res.guards.assign( 2, g.template get<value_type>() );
+                                pNext = g.protect( pNext->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr() );});
+                                res.guards.copy( 2, g );
                             }
                         }
 
@@ -525,7 +499,7 @@ namespace cds { namespace intrusive {
                         while ( pNext.ptr() && pNext.bits() && iter.ptr() != t.ptr() && m_pHead.load(memory_model::memory_order_relaxed) == h ) {
                             iter = pNext;
                             g.assign( res.guards.template get<value_type>(2) );
-                            pNext = guard_node( res.guards, 2, pNext->m_pNext );
+                            pNext = res.guards.protect( 2, pNext->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr() );});
                             ++hops;
                         }
 
@@ -570,10 +544,10 @@ namespace cds { namespace intrusive {
                 typename gc::template GuardArray<2> guards;
                 guards.assign( 0, node_traits::to_value_ptr(head.ptr()) );
                 while ( head.ptr() != newHead.ptr() ) {
-                    marked_ptr pNext = guard_node( guards, 1, head->m_pNext );
+                    marked_ptr pNext = guards.protect( 1, head->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr() );});
                     assert( pNext.bits() != 0 );
                     dispose_node( head.ptr() );
-                    guards.assign( 0, guards.template get<value_type>(1) );
+                    guards.copy( 0, 1 );
                     head = pNext;
                 }
             }
@@ -653,7 +627,7 @@ namespace cds { namespace intrusive {
 
             marked_ptr t;
             while ( true ) {
-                t = guard_node( guard, m_pTail );
+                t = guard.protect( m_pTail, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr() );});
 
                 marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_acquire );
 
@@ -672,7 +646,7 @@ namespace cds { namespace intrusive {
                     typename gc::Guard gNext;
 
                 try_again:
-                    pNext = guard_node( gNext, t->m_pNext );
+                    pNext = gNext.protect( t->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr() );});
 
                     // add to the basket
                     if ( m_pTail.load(memory_model::memory_order_relaxed) == t
index 3f6c40c54dfea3d24a3c213d18e323afad18d506..0928dd220302d30c9eb4f332e0e10685c969691f 100644 (file)
@@ -105,7 +105,6 @@ namespace cds { namespace intrusive {
     protected:
         //@cond
         typedef typename base_class::dequeue_result dequeue_result;
-        typedef typename base_class::node_to_value node_to_value;
 
         bool do_dequeue( dequeue_result& res )
         {
@@ -114,8 +113,8 @@ namespace cds { namespace intrusive {
             node_type * pNext;
             node_type * h;
             while ( true ) {
-                h = res.guards.protect( 0, base_class::m_pHead, node_to_value() );
-                pNext = res.guards.protect( 1, h->m_pNext, node_to_value() );
+                h = res.guards.protect( 0, base_class::m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
+                pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
 
                 if ( pNext == nullptr ) {
                     base_class::m_Stat.onEmptyDequeue();
index 404cd1893c0c3d6ecad255522b086711c8ca8ed7..e9b30f7546668f91c78327bb1dfc564de85b5dc4 100644 (file)
@@ -345,7 +345,6 @@ namespace cds { namespace intrusive {
         // GC and node_type::gc must be the same
         static_assert((std::is_same<gc, typename node_type::gc>::value), "GC and node_type::gc must be the same");
 
-        typedef intrusive::node_to_value<MSQueue> node_to_value;
         typedef typename opt::details::alignment_setter< typename node_type::atomic_node_ptr, traits::alignment >::type aligned_node_ptr;
         typedef typename opt::details::alignment_setter< node_type, traits::alignment >::type dummy_node_type;
 
@@ -371,9 +370,8 @@ namespace cds { namespace intrusive {
 
             node_type * h;
             while ( true ) {
-                h = res.guards.protect( 0, m_pHead, node_to_value() );
-                pNext = h->m_pNext.load( memory_model::memory_order_acquire );
-                res.guards.assign( 1, node_to_value()( pNext ));
+                h = res.guards.protect( 0, m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
+                pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
                 if ( m_pHead.load(memory_model::memory_order_acquire) != h )
                     continue;
 
@@ -479,7 +477,7 @@ namespace cds { namespace intrusive {
 
             node_type * t;
             while ( true ) {
-                t = guard.protect( m_pTail, node_to_value() );
+                t = guard.protect( m_pTail, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
 
                 node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire);
                 if ( pNext != nullptr ) {
@@ -556,7 +554,8 @@ namespace cds { namespace intrusive {
         bool empty() const
         {
             typename gc::Guard guard;
-            return guard.protect( m_pHead, node_to_value() )->m_pNext.load( memory_model::memory_order_relaxed ) == nullptr;
+            return guard.protect( m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );})
+                ->m_pNext.load( memory_model::memory_order_relaxed ) == nullptr;
         }
 
         /// Clear the queue
index 0f98f75f7988faceb9136e05257dca042703bfc7..65d883c3ab3a5a36338c513fc3c1b45992d14806 100644 (file)
@@ -421,7 +421,6 @@ namespace cds { namespace intrusive {
 
     protected:
         //@cond
-        typedef intrusive::node_to_value<OptimisticQueue> node_to_value;
         typedef typename opt::details::alignment_setter< typename node_type::atomic_node_ptr, traits::alignment >::type aligned_node_ptr;
 
         // GC and node_type::gc must be the same
@@ -460,10 +459,10 @@ namespace cds { namespace intrusive {
             back_off bkoff;
 
             while ( true ) { // Try till success or empty
-                pHead = res.guards.protect( 0, m_pHead, node_to_value() );
-                pTail = res.guards.protect( 1, m_pTail, node_to_value() );
+                pHead = res.guards.protect( 0, m_pHead, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);});
+                pTail = res.guards.protect( 1, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);});
                 assert( pHead != nullptr );
-                pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, node_to_value() );
+                pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);});
 
                 if ( pHead == m_pHead.load(memory_model::memory_order_relaxed)) {
                     if ( pTail != pHead ) {
@@ -510,7 +509,7 @@ namespace cds { namespace intrusive {
 
             pCurNode = pTail;
             while ( pCurNode != pHead ) { // While not at head
-                pCurNodeNext = guards.protect(0, pCurNode->m_pNext, node_to_value() );
+                pCurNodeNext = guards.protect(0, pCurNode->m_pNext, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);} );
                 if ( pHead != m_pHead.load(memory_model::memory_order_relaxed) )
                     break;
                 pCurNodeNext->m_pPrev.store( pCurNode, memory_model::memory_order_release );
@@ -577,16 +576,16 @@ namespace cds { namespace intrusive {
             back_off bkoff;
 
             guards.assign( 1, &val );
-            node_type * pTail = guards.protect( 0, m_pTail, node_to_value() )  ;   // Read the tail
+            node_type * pTail = guards.protect( 0, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);} );   // Read the tail
             while( true ) {
                 pNew->m_pNext.store( pTail, memory_model::memory_order_release );
-                if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {     // Try to CAS the tail
-                    pTail->m_pPrev.store( pNew, memory_model::memory_order_release )     ;           // Success, write prev
+                if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) { // Try to CAS the tail
+                    pTail->m_pPrev.store( pNew, memory_model::memory_order_release ); // Success, write prev
                     ++m_ItemCounter;
                     m_Stat.onEnqueue();
                     break ;     // Enqueue done!
                 }
-                guards.assign( 0, node_traits::to_value_ptr( pTail ) )   ;  // pTail has been changed by CAS above
+                guards.assign( 0, node_traits::to_value_ptr( pTail ));  // pTail has been changed by CAS above
                 m_Stat.onEnqueueRace();
                 bkoff();
             }