Merge branch 'ldionne-ldionne-cmake' into dev
[libcds.git] / cds / intrusive / details / split_list_base.h
index fb0e230510f77a4191fa132ee4598a6a1549535f..8f2f0ed3a0bfaabcda602ccf5cb0f6c2556b9cf2 100644 (file)
@@ -39,6 +39,7 @@
 #include <cds/algo/bitop.h>
 #include <cds/opt/hash.h>
 #include <cds/intrusive/free_list_selector.h>
+#include <cds/details/size_t_cast.h>
 
 namespace cds { namespace intrusive {
 
@@ -59,7 +60,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            hash_node( size_t nHash )
+            explicit hash_node( size_t nHash )
                 : m_nHash( nHash )
             {
                 assert( is_dummy());
@@ -93,7 +94,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            node( size_t nHash )
+            explicit node( size_t nHash )
                 : hash_node( nHash )
             {
                 assert( is_dummy());
@@ -119,7 +120,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            node( size_t nHash )
+            explicit node( size_t nHash )
                 : hash_node( nHash )
             {
                 assert( is_dummy());
@@ -257,7 +258,7 @@ namespace cds { namespace intrusive {
                 the <tt>empty()</tt> member function depends on correct item counting.
                 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
 
-                Default is \p cds::atomicity::item_counter.
+                Default is \p cds::atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter
             */
             typedef cds::atomicity::item_counter item_counter;
 
@@ -396,7 +397,16 @@ namespace cds { namespace intrusive {
 
             /// Auxiliary node type
             struct aux_node_type: public node_type, public free_list::node
-            {};
+            {
+#           ifdef CDS_DEBUG
+                atomics::atomic<bool> m_busy;
+
+                aux_node_type()
+                {
+                    m_busy.store( false, atomics::memory_order_release );
+                }
+#           endif
+            };
 
             typedef atomics::atomic<aux_node_type *> table_entry;  ///< Table entry type
             typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
@@ -482,8 +492,10 @@ namespace cds { namespace intrusive {
                 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
                     // alloc next free node from m_auxNode
                     size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
-                    if ( idx < capacity())
+                    if ( idx < capacity()) {
+                        CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ));
                         return new( &m_auxNode[idx] ) aux_node_type();
+                    }
                 }
 
                 // get from free-list
@@ -555,8 +567,17 @@ namespace cds { namespace intrusive {
             typedef typename options::free_list free_list;
 
             /// Auxiliary node type
-            class aux_node_type: public node_type, public free_list::node
-            {};
+            struct aux_node_type: public node_type, public free_list::node
+            {
+#           ifdef CDS_DEBUG
+                atomics::atomic<bool> m_busy;
+
+                aux_node_type()
+                {
+                    m_busy.store( false, atomics::memory_order_release );
+                }
+#           endif
+            };
 
         protected:
             //@cond
@@ -569,9 +590,10 @@ namespace cds { namespace intrusive {
                 // aux_node_type     nodes[];
 
                 aux_node_segment()
-                    : aux_node_count(0)
-                    , next_segment( nullptr )
-                {}
+                    : next_segment( nullptr )
+                {
+                    aux_node_count.store( 0, atomics::memory_order_release );
+                }
 
                 aux_node_type* segment()
                 {
@@ -685,10 +707,12 @@ namespace cds { namespace intrusive {
                     assert( aux_segment != nullptr );
 
                     // try to allocate from current aux segment
-                    if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
+                    if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) {
                         size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
-                        if ( idx < m_metrics.nSegmentSize )
+                        if ( idx < m_metrics.nSegmentSize ) {
+                            CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ));
                             return new( aux_segment->segment() + idx ) aux_node_type();
+                        }
                     }
 
                     // try allocate from free-list
@@ -702,10 +726,11 @@ namespace cds { namespace intrusive {
                     aux_node_segment* new_aux_segment = allocate_aux_segment();
                     new_aux_segment->next_segment = aux_segment;
                     new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
-                    CDS_COMPILER_RW_BARRIER;
 
-                    if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ))
+                    if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire )) {
+                        CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ));
                         return new( new_aux_segment->segment()) aux_node_type();
+                    }
 
                     free_aux_segment( new_aux_segment );
                 }
@@ -785,6 +810,7 @@ namespace cds { namespace intrusive {
             aux_node_segment* allocate_aux_segment()
             {
                 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
+                CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ));
                 return new(p) aux_node_segment();
             }
 
@@ -1277,13 +1303,13 @@ namespace cds { namespace intrusive {
         template <typename BitReversalAlgo>
         static inline size_t regular_hash( size_t nHash )
         {
-            return BitReversalAlgo()( nHash ) | size_t(1);
+            return static_cast<size_t>( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) | size_t(1);
         }
 
         template <typename BitReversalAlgo>
         static inline size_t dummy_hash( size_t nHash )
         {
-            return BitReversalAlgo()( nHash ) & ~size_t(1);
+            return static_cast<size_t>( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) & ~size_t(1);
         }
         //@endcond