3 #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
4 #define CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
6 #include <memory.h> // memcmp, memcpy
9 #include <cds/intrusive/details/base.h>
10 #include <cds/opt/compare.h>
11 #include <cds/algo/atomic.h>
12 #include <cds/algo/split_bitstring.h>
13 #include <cds/details/marked_ptr.h>
14 #include <cds/urcu/options.h>
16 namespace cds { namespace intrusive {
18 /// FeldmanHashSet related definitions
19 /** @ingroup cds_intrusive_helper
21 namespace feldman_hashset {
22 /// Hash accessor option
24 @copydetails traits::hash_accessor
26 template <typename Accessor>
27 struct hash_accessor {
29 template <typename Base> struct pack: public Base
31 typedef Accessor hash_accessor;
36 /// \p FeldmanHashSet internal statistics
37 template <typename EventCounter = cds::atomicity::event_counter>
39 typedef EventCounter event_counter ; ///< Event counter type
41 event_counter m_nInsertSuccess; ///< Number of success \p insert() operations
42 event_counter m_nInsertFailed; ///< Number of failed \p insert() operations
43 event_counter m_nInsertRetry; ///< Number of attempts to insert new item
44 event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
45 event_counter m_nUpdateExisting; ///< Number of existing item updates
46 event_counter m_nUpdateFailed; ///< Number of failed \p update() call
47 event_counter m_nUpdateRetry; ///< Number of attempts to update the item
48 event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations
49 event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations
50 event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item
51 event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations
52 event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations
54 event_counter m_nExpandNodeSuccess; ///< Number of succeeded attempts converting data node to array node
55 event_counter m_nExpandNodeFailed; ///< Number of failed attempts converting data node to array node
56 event_counter m_nSlotChanged; ///< Number of array node slot changing by other thread during an operation
57 event_counter m_nSlotConverting; ///< Number of events when we encounter a slot while it is converting to array node
59 event_counter m_nArrayNodeCount; ///< Number of array nodes
60 event_counter m_nHeight; ///< Current height of the tree
63 void onInsertSuccess() { ++m_nInsertSuccess; }
64 void onInsertFailed() { ++m_nInsertFailed; }
65 void onInsertRetry() { ++m_nInsertRetry; }
66 void onUpdateNew() { ++m_nUpdateNew; }
67 void onUpdateExisting() { ++m_nUpdateExisting; }
68 void onUpdateFailed() { ++m_nUpdateFailed; }
69 void onUpdateRetry() { ++m_nUpdateRetry; }
70 void onEraseSuccess() { ++m_nEraseSuccess; }
71 void onEraseFailed() { ++m_nEraseFailed; }
72 void onEraseRetry() { ++m_nEraseRetry; }
73 void onFindSuccess() { ++m_nFindSuccess; }
74 void onFindFailed() { ++m_nFindFailed; }
76 void onExpandNodeSuccess() { ++m_nExpandNodeSuccess; }
77 void onExpandNodeFailed() { ++m_nExpandNodeFailed; }
78 void onSlotChanged() { ++m_nSlotChanged; }
79 void onSlotConverting() { ++m_nSlotConverting; }
80 void onArrayNodeCreated() { ++m_nArrayNodeCount; }
81 void height( size_t h ) { if (m_nHeight < h ) m_nHeight = h; }
85 /// \p FeldmanHashSet empty internal statistics
88 void onInsertSuccess() const {}
89 void onInsertFailed() const {}
90 void onInsertRetry() const {}
91 void onUpdateNew() const {}
92 void onUpdateExisting() const {}
93 void onUpdateFailed() const {}
94 void onUpdateRetry() const {}
95 void onEraseSuccess() const {}
96 void onEraseFailed() const {}
97 void onEraseRetry() const {}
98 void onFindSuccess() const {}
99 void onFindFailed() const {}
101 void onExpandNodeSuccess() const {}
102 void onExpandNodeFailed() const {}
103 void onSlotChanged() const {}
104 void onSlotConverting() const {}
105 void onArrayNodeCreated() const {}
106 void height(size_t) const {}
110 /// \p FeldmanHashSet traits
113 /// Mandatory functor to get hash value from data node
115 It is most-important feature of \p FeldmanHashSet.
116 That functor must return a reference to fixed-sized hash value of data node.
117 The return value of that functor specifies the type of hash value.
121 typedef uint8_t hash_type[32]; // 256-bit hash type
123 hash_type hash; // 256-bit hash value
128 struct foo_hash_accessor {
129 hash_type const& operator()( foo const& d ) const
136 typedef cds::opt::none hash_accessor;
138 /// Disposer for removing data nodes
139 typedef cds::intrusive::opt::v::empty_disposer disposer;
141 /// Hash comparing functor
143 No default functor is provided.
144 If the option is not specified, the \p less option is used.
146 typedef cds::opt::none compare;
148 /// Specifies binary predicate used for hash compare.
150 If \p %less and \p %compare are not specified, \p memcmp() -like @ref bitwise_compare "bit-wise hash comparator" is used
151 because the hash value is treated as fixed-sized bit-string.
153 typedef cds::opt::none less;
157 The item counting is an important part of \p FeldmanHashSet algorithm:
158 the \p empty() member function depends on correct item counting.
159 Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option.
161 Default is \p atomicity::item_counter.
163 typedef cds::atomicity::item_counter item_counter;
165 /// Array node allocator
167 Allocator for array nodes. That allocator is used for creating \p headNode and \p arrayNode when the set grows.
168 Default is \ref CDS_DEFAULT_ALLOCATOR
170 typedef CDS_DEFAULT_ALLOCATOR node_allocator;
172 /// C++ memory ordering model
174 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
175 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
177 typedef cds::opt::v::relaxed_ordering memory_model;
179 /// Back-off strategy
180 typedef cds::backoff::Default back_off;
182 /// Internal statistics
184 By default, internal statistics is disabled (\p feldman_hashset::empty_stat).
185 Use \p feldman_hashset::stat to enable it.
187 typedef empty_stat stat;
189 /// RCU deadlock checking policy (only for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet")
191 List of available policy see \p opt::rcu_check_deadlock
193 typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
196 /// Metafunction converting option list to \p feldman_hashset::traits
198 Supported \p Options are:
199 - \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor.
200 @copydetails traits::hash_accessor
201 - \p opt::node_allocator - array node allocator.
202 @copydetails traits::node_allocator
203 - \p opt::compare - hash comparison functor. No default functor is provided.
204 If the option is not specified, the \p opt::less is used.
205 - \p opt::less - specifies binary predicate used for hash comparison.
206 If the option is not specified, \p memcmp() -like bit-wise hash comparator is used
207 because the hash value is treated as fixed-sized bit-string.
208 - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
209 - \p opt::disposer - the functor used for disposing removed data node. Default is \p opt::v::empty_disposer. Due the nature
210 of GC schema the disposer may be called asynchronously.
211 - \p opt::item_counter - the type of item counting feature.
212 The item counting is an important part of \p FeldmanHashSet algorithm:
213 the \p empty() member function depends on correct item counting.
214 Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option.
215 Default is \p atomicity::item_counter.
216 - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
217 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
218 - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat).
219 To enable it use \p feldman_hashset::stat
220 - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet"
221 Default is \p opt::v::rcu_throw_deadlock
223 template <typename... Options>
226 # ifdef CDS_DOXYGEN_INVOKED
227 typedef implementation_defined type ; ///< Metafunction result
229 typedef typename cds::opt::make_options<
230 typename cds::opt::find_type_traits< traits, Options... >::type
236 /// Bit-wise memcmp-based comparator for hash value \p T
237 template <typename T>
238 struct bitwise_compare
240 /// Compares \p lhs and \p rhs
243 - <tt> < 0</tt> if <tt>lhs < rhs</tt>
244 - <tt>0</tt> if <tt>lhs == rhs</tt>
245 - <tt> > 0</tt> if <tt>lhs > rhs</tt>
247 int operator()( T const& lhs, T const& rhs ) const
249 return memcmp( &lhs, &rhs, sizeof(T));
253 /// One-level statistics, see \p FeldmanHashSet::get_level_statistics
254 struct level_statistics
256 size_t array_node_count; ///< Count of array node at the level
257 size_t node_capacity; ///< Array capacity
259 size_t data_cell_count; ///< The number of data cells in all array node at this level
260 size_t array_cell_count; ///< The number of array cells in all array node at this level
261 size_t empty_cell_count; ///< The number of empty cells in all array node at this level
265 : array_node_count(0)
267 , array_cell_count(0)
268 , empty_cell_count(0)
275 template <typename HashType, typename UInt = size_t >
276 using hash_splitter = cds::algo::split_bitstring< HashType, UInt >;
279 size_t head_node_size; // power-of-two
280 size_t head_node_size_log; // log2( head_node_size )
281 size_t array_node_size; // power-of-two
282 size_t array_node_size_log;// log2( array_node_size )
284 static metrics make(size_t head_bits, size_t array_bits, size_t hash_size )
286 size_t const hash_bits = hash_size * 8;
292 if (head_bits > hash_bits)
293 head_bits = hash_bits;
294 if ((hash_bits - head_bits) % array_bits != 0)
295 head_bits += (hash_bits - head_bits) % array_bits;
297 assert((hash_bits - head_bits) % array_bits == 0);
300 m.head_node_size_log = head_bits;
301 m.head_node_size = size_t(1) << head_bits;
302 m.array_node_size_log = array_bits;
303 m.array_node_size = size_t(1) << array_bits;
308 } // namespace details
312 template <typename T, typename Traits>
313 class multilevel_array
316 typedef T value_type;
317 typedef Traits traits;
318 typedef typename Traits::node_allocator node_allocator;
319 typedef typename traits::memory_model memory_model;
320 typedef typename traits::back_off back_off; ///< Backoff strategy
321 typedef typename traits::stat stat; ///< Internal statistics type
323 typedef typename traits::hash_accessor hash_accessor;
324 static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified");
326 /// Hash type deduced from \p hash_accessor return type
327 typedef typename std::decay<
328 typename std::remove_reference<
329 decltype(hash_accessor()(std::declval<value_type>()))
332 //typedef typename std::result_of< hash_accessor( std::declval<value_type>()) >::type hash_type;
333 static_assert(!std::is_pointer<hash_type>::value, "hash_accessor should return a reference to hash value");
335 typedef typename cds::opt::details::make_comparator_from<
338 feldman_hashset::bitwise_compare< hash_type >
339 >::type hash_comparator;
341 typedef feldman_hashset::details::hash_splitter< hash_type > hash_splitter;
344 flag_array_converting = 1, ///< the cell is converting from data node to an array node
345 flag_array_node = 2 ///< the cell is a pointer to an array node
350 typedef cds::details::marked_ptr< value_type, 3 > node_ptr;
351 typedef atomics::atomic< node_ptr > atomic_node_ptr;
354 array_node * const pParent; ///< parent array node
355 size_t const idxParent; ///< index in parent array node
356 atomic_node_ptr nodes[1]; ///< node array
358 array_node(array_node * parent, size_t idx)
363 array_node() = delete;
364 array_node(array_node const&) = delete;
365 array_node(array_node&&) = delete;
368 typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator;
370 struct traverse_data {
371 hash_splitter splitter;
376 traverse_data( hash_type const& hash, multilevel_array& arr )
382 void reset( multilevel_array& arr )
386 nSlot = splitter.cut( arr.metrics().head_node_size_log );
392 feldman_hashset::details::metrics const m_Metrics;
397 multilevel_array(size_t head_bits, size_t array_bits )
398 : m_Metrics(feldman_hashset::details::metrics::make(head_bits, array_bits, sizeof(hash_type)))
399 , m_Head( alloc_head_node())
405 free_array_node(m_Head);
408 node_ptr traverse(traverse_data& pos)
412 node_ptr slot = pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire);
413 if (slot.bits() == flag_array_node) {
414 // array node, go down the tree
415 assert(slot.ptr() != nullptr);
416 assert( !pos.splitter.eos());
417 pos.nSlot = pos.splitter.cut( metrics().array_node_size_log );
418 assert( pos.nSlot < metrics().array_node_size );
419 pos.pArr = to_array(slot.ptr());
422 else if (slot.bits() == flag_array_converting) {
423 // the slot is converting to array node right now
425 stats().onSlotConverting();
429 assert(slot.bits() == 0);
435 size_t head_size() const
437 return m_Metrics.head_node_size;
440 size_t array_node_size() const
442 return m_Metrics.array_node_size;
445 void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const
448 gather_level_statistics(stat, 0, m_Head, head_size());
452 array_node * head() const
462 feldman_hashset::details::metrics const& metrics() const
469 // The function is not thread-safe. For use in dtor only
470 // Destroy all array nodes
471 destroy_array_nodes(m_Head, head_size());
474 void destroy_array_nodes(array_node * pArr, size_t nSize)
476 for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
477 node_ptr slot = p->load(memory_model::memory_order_relaxed);
478 if (slot.bits() == flag_array_node) {
479 destroy_array_nodes(to_array(slot.ptr()), array_node_size());
480 free_array_node(to_array(slot.ptr()));
481 p->store(node_ptr(), memory_model::memory_order_relaxed);
486 static array_node * alloc_array_node(size_t nSize, array_node * pParent, size_t idxParent)
488 array_node * pNode = cxx_array_node_allocator().NewBlock(sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent);
489 new (pNode->nodes) atomic_node_ptr[nSize];
493 array_node * alloc_head_node() const
495 return alloc_array_node(head_size(), nullptr, 0);
498 array_node * alloc_array_node(array_node * pParent, size_t idxParent) const
500 return alloc_array_node(array_node_size(), pParent, idxParent);
503 static void free_array_node(array_node * parr)
505 cxx_array_node_allocator().Delete(parr);
512 converter(value_type * p)
516 converter(array_node * p)
521 static array_node * to_array(value_type * p)
523 return converter(p).pArr;
525 static value_type * to_node(array_node * p)
527 return converter(p).pData;
530 void gather_level_statistics(std::vector<feldman_hashset::level_statistics>& stat, size_t nLevel, array_node * pArr, size_t nSize) const
532 if (stat.size() <= nLevel) {
533 stat.resize(nLevel + 1);
534 stat[nLevel].node_capacity = nSize;
537 ++stat[nLevel].array_node_count;
538 for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
539 node_ptr slot = p->load(memory_model::memory_order_relaxed);
541 ++stat[nLevel].array_cell_count;
542 if (slot.bits() == flag_array_node)
543 gather_level_statistics(stat, nLevel + 1, to_array(slot.ptr()), array_node_size());
546 ++stat[nLevel].data_cell_count;
548 ++stat[nLevel].empty_cell_count;
552 bool expand_slot( traverse_data& pos, node_ptr current)
554 return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset());
558 bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset)
560 assert(current.bits() == 0);
561 assert(current.ptr());
563 array_node * pArr = alloc_array_node(pParent, idxParent);
565 node_ptr cur(current.ptr());
566 atomic_node_ptr& slot = pParent->nodes[idxParent];
567 if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed))
569 stats().onExpandNodeFailed();
570 free_array_node(pArr);
574 size_t idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( m_Metrics.array_node_size_log );
575 pArr->nodes[idx].store(current, memory_model::memory_order_release);
577 cur = cur | flag_array_converting;
579 slot.compare_exchange_strong(cur, node_ptr(to_node(pArr), flag_array_node), memory_model::memory_order_release, atomics::memory_order_relaxed)
582 stats().onExpandNodeSuccess();
583 stats().onArrayNodeCreated();
588 } // namespace feldman_hashset
591 // Forward declaration
592 template < class GC, typename T, class Traits = feldman_hashset::traits >
593 class FeldmanHashSet;
596 }} // namespace cds::intrusive
598 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H