2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
34 #include <memory.h> // memcmp, memcpy
35 #include <type_traits>
37 #include <cds/intrusive/details/base.h>
38 #include <cds/opt/compare.h>
39 #include <cds/algo/atomic.h>
40 #include <cds/algo/split_bitstring.h>
41 #include <cds/details/marked_ptr.h>
42 #include <cds/urcu/options.h>
44 namespace cds { namespace intrusive {
46 /// FeldmanHashSet related definitions
47 /** @ingroup cds_intrusive_helper
49 namespace feldman_hashset {
50 /// Hash accessor option
52 @copydetails traits::hash_accessor
54 template <typename Accessor>
55 struct hash_accessor {
57 template <typename Base> struct pack: public Base
59 typedef Accessor hash_accessor;
66 @copydetails traits::hash_size
68 template <size_t Size>
71 template <typename Base> struct pack: public Base
80 /// \p FeldmanHashSet internal statistics
81 template <typename EventCounter = cds::atomicity::event_counter>
83 typedef EventCounter event_counter ; ///< Event counter type
85 event_counter m_nInsertSuccess; ///< Number of success \p insert() operations
86 event_counter m_nInsertFailed; ///< Number of failed \p insert() operations
87 event_counter m_nInsertRetry; ///< Number of attempts to insert new item
88 event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
89 event_counter m_nUpdateExisting; ///< Number of existing item updates
90 event_counter m_nUpdateFailed; ///< Number of failed \p update() call
91 event_counter m_nUpdateRetry; ///< Number of attempts to update the item
92 event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations
93 event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations
94 event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item
95 event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations
96 event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations
98 event_counter m_nExpandNodeSuccess; ///< Number of succeeded attempts converting data node to array node
99 event_counter m_nExpandNodeFailed; ///< Number of failed attempts converting data node to array node
100 event_counter m_nSlotChanged; ///< Number of array node slot changing by other thread during an operation
101 event_counter m_nSlotConverting; ///< Number of events when we encounter a slot while it is converting to array node
103 event_counter m_nArrayNodeCount; ///< Number of array nodes
104 event_counter m_nHeight; ///< Current height of the tree
107 void onInsertSuccess() { ++m_nInsertSuccess; }
108 void onInsertFailed() { ++m_nInsertFailed; }
109 void onInsertRetry() { ++m_nInsertRetry; }
110 void onUpdateNew() { ++m_nUpdateNew; }
111 void onUpdateExisting() { ++m_nUpdateExisting; }
112 void onUpdateFailed() { ++m_nUpdateFailed; }
113 void onUpdateRetry() { ++m_nUpdateRetry; }
114 void onEraseSuccess() { ++m_nEraseSuccess; }
115 void onEraseFailed() { ++m_nEraseFailed; }
116 void onEraseRetry() { ++m_nEraseRetry; }
117 void onFindSuccess() { ++m_nFindSuccess; }
118 void onFindFailed() { ++m_nFindFailed; }
120 void onExpandNodeSuccess() { ++m_nExpandNodeSuccess; }
121 void onExpandNodeFailed() { ++m_nExpandNodeFailed; }
122 void onSlotChanged() { ++m_nSlotChanged; }
123 void onSlotConverting() { ++m_nSlotConverting; }
124 void onArrayNodeCreated() { ++m_nArrayNodeCount; }
125 void height( size_t h ) { if (m_nHeight < h ) m_nHeight = h; }
129 /// \p FeldmanHashSet empty internal statistics
132 void onInsertSuccess() const {}
133 void onInsertFailed() const {}
134 void onInsertRetry() const {}
135 void onUpdateNew() const {}
136 void onUpdateExisting() const {}
137 void onUpdateFailed() const {}
138 void onUpdateRetry() const {}
139 void onEraseSuccess() const {}
140 void onEraseFailed() const {}
141 void onEraseRetry() const {}
142 void onFindSuccess() const {}
143 void onFindFailed() const {}
145 void onExpandNodeSuccess() const {}
146 void onExpandNodeFailed() const {}
147 void onSlotChanged() const {}
148 void onSlotConverting() const {}
149 void onArrayNodeCreated() const {}
150 void height(size_t) const {}
154 /// \p FeldmanHashSet traits
157 /// Mandatory functor to get hash value from data node
159 It is most-important feature of \p FeldmanHashSet.
160 That functor must return a reference to fixed-sized hash value of data node.
161 The return value of that functor specifies the type of hash value.
165 typedef uint8_t hash_type[32]; // 256-bit hash type
167 hash_type hash; // 256-bit hash value
172 struct foo_hash_accessor {
173 hash_type const& operator()( foo const& d ) const
180 typedef cds::opt::none hash_accessor;
182 /// The size of hash value in bytes
184 By default, the size of hash value is <tt>sizeof( hash_type )</tt>.
185 Sometimes it is not correct, for example, for that 6-byte struct \p static_assert will be thrown:
192 static_assert( sizeof( key_type ) == 6, "Key type size mismatch" );
194 For that case you can specify \p hash_size explicitly.
196 Value \p 0 means <tt>sizeof( hash_type )</tt>.
198 static CDS_CONSTEXPR size_t const hash_size = 0;
200 /// Disposer for removing data nodes
201 typedef cds::intrusive::opt::v::empty_disposer disposer;
203 /// Hash comparing functor
205 No default functor is provided.
206 If the option is not specified, the \p less option is used.
208 typedef cds::opt::none compare;
210 /// Specifies binary predicate used for hash compare.
212 If \p %less and \p %compare are not specified, \p memcmp() -like @ref bitwise_compare "bit-wise hash comparator" is used
213 because the hash value is treated as fixed-sized bit-string.
215 typedef cds::opt::none less;
219 The item counting is an important part of \p FeldmanHashSet algorithm:
220 the \p empty() member function depends on correct item counting.
221 Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option.
223 Default is \p atomicity::item_counter.
225 typedef cds::atomicity::item_counter item_counter;
227 /// Array node allocator
229 Allocator for array nodes. The allocator is used for creating \p headNode and \p arrayNode when the set grows.
230 Default is \ref CDS_DEFAULT_ALLOCATOR
232 typedef CDS_DEFAULT_ALLOCATOR node_allocator;
234 /// C++ memory ordering model
236 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
237 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
239 typedef cds::opt::v::relaxed_ordering memory_model;
241 /// Back-off strategy
242 typedef cds::backoff::Default back_off;
244 /// Internal statistics
246 By default, internal statistics is disabled (\p feldman_hashset::empty_stat).
247 Use \p feldman_hashset::stat to enable it.
249 typedef empty_stat stat;
251 /// RCU deadlock checking policy (only for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet")
253 List of available policy see \p opt::rcu_check_deadlock
255 typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
258 /// Metafunction converting option list to \p feldman_hashset::traits
260 Supported \p Options are:
261 - \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor.
262 @copydetails traits::hash_accessor
263 - \p feldman_hashset::hash_size - the size of hash value in bytes.
264 @copydetails traits::hash_size
265 - \p opt::node_allocator - array node allocator.
266 @copydetails traits::node_allocator
267 - \p opt::compare - hash comparison functor. No default functor is provided.
268 If the option is not specified, the \p opt::less is used.
269 - \p opt::less - specifies binary predicate used for hash comparison.
270 If the option is not specified, \p memcmp() -like bit-wise hash comparator is used
271 because the hash value is treated as fixed-sized bit-string.
272 - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
273 - \p opt::disposer - the functor used for disposing removed data node. Default is \p opt::v::empty_disposer. Due the nature
274 of GC schema the disposer may be called asynchronously.
275 - \p opt::item_counter - the type of item counting feature.
276 The item counting is an important part of \p FeldmanHashSet algorithm:
277 the \p empty() member function depends on correct item counting.
278 Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option.
279 Default is \p atomicity::item_counter.
280 - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
281 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
282 - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat).
283 To enable it use \p feldman_hashset::stat
284 - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet"
285 Default is \p opt::v::rcu_throw_deadlock
287 template <typename... Options>
290 # ifdef CDS_DOXYGEN_INVOKED
291 typedef implementation_defined type ; ///< Metafunction result
293 typedef typename cds::opt::make_options<
294 typename cds::opt::find_type_traits< traits, Options... >::type
300 /// Bit-wise memcmp-based comparator for hash value \p T
301 template <typename T>
302 struct bitwise_compare
304 /// Compares \p lhs and \p rhs
307 - <tt> < 0</tt> if <tt>lhs < rhs</tt>
308 - <tt>0</tt> if <tt>lhs == rhs</tt>
309 - <tt> > 0</tt> if <tt>lhs > rhs</tt>
311 int operator()( T const& lhs, T const& rhs ) const
313 return memcmp( &lhs, &rhs, sizeof(T));
317 /// One-level statistics, see \p FeldmanHashSet::get_level_statistics
318 struct level_statistics
320 size_t array_node_count; ///< Count of array node at the level
321 size_t node_capacity; ///< Array capacity
323 size_t data_cell_count; ///< The number of data cells in all array node at this level
324 size_t array_cell_count; ///< The number of array cells in all array node at this level
325 size_t empty_cell_count; ///< The number of empty cells in all array node at this level
329 : array_node_count(0)
331 , array_cell_count(0)
332 , empty_cell_count(0)
339 template <typename HashType, size_t HashSize >
340 using hash_splitter = cds::algo::split_bitstring< HashType, HashSize >;
343 size_t head_node_size; // power-of-two
344 size_t head_node_size_log; // log2( head_node_size )
345 size_t array_node_size; // power-of-two
346 size_t array_node_size_log;// log2( array_node_size )
348 static metrics make(size_t head_bits, size_t array_bits, size_t hash_size )
350 size_t const hash_bits = hash_size * 8;
356 if (head_bits > hash_bits)
357 head_bits = hash_bits;
358 if ((hash_bits - head_bits) % array_bits != 0)
359 head_bits += (hash_bits - head_bits) % array_bits;
361 assert((hash_bits - head_bits) % array_bits == 0);
364 m.head_node_size_log = head_bits;
365 m.head_node_size = size_t(1) << head_bits;
366 m.array_node_size_log = array_bits;
367 m.array_node_size = size_t(1) << array_bits;
372 } // namespace details
376 template <typename T, typename Traits>
377 class multilevel_array
380 typedef T value_type;
381 typedef Traits traits;
382 typedef typename Traits::node_allocator node_allocator;
383 typedef typename traits::memory_model memory_model;
384 typedef typename traits::back_off back_off; ///< Backoff strategy
385 typedef typename traits::stat stat; ///< Internal statistics type
387 typedef typename traits::hash_accessor hash_accessor;
388 static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified");
390 /// Hash type deduced from \p hash_accessor return type
391 typedef typename std::decay<
392 typename std::remove_reference<
393 decltype(hash_accessor()(std::declval<value_type>()))
396 static_assert(!std::is_pointer<hash_type>::value, "hash_accessor should return a reference to hash value");
398 typedef typename cds::opt::details::make_comparator_from<
401 feldman_hashset::bitwise_compare< hash_type >
402 >::type hash_comparator;
404 /// The size of hash_type in bytes, see \p traits::hash_size for explanation
405 static CDS_CONSTEXPR size_t const c_hash_size = traits::hash_size == 0 ? sizeof( hash_type ) : static_cast<size_t>( traits::hash_size );
407 typedef feldman_hashset::details::hash_splitter< hash_type, c_hash_size > hash_splitter;
410 flag_array_converting = 1, ///< the cell is converting from data node to an array node
411 flag_array_node = 2 ///< the cell is a pointer to an array node
416 typedef cds::details::marked_ptr< value_type, 3 > node_ptr;
417 typedef atomics::atomic< node_ptr > atomic_node_ptr;
420 array_node * const pParent; ///< parent array node
421 size_t const idxParent; ///< index in parent array node
422 atomic_node_ptr nodes[1]; ///< node array
424 array_node(array_node * parent, size_t idx)
429 array_node() = delete;
430 array_node(array_node const&) = delete;
431 array_node(array_node&&) = delete;
434 typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator;
436 struct traverse_data {
437 hash_splitter splitter;
442 traverse_data( hash_type const& hash, multilevel_array& arr )
448 void reset( multilevel_array& arr )
452 nSlot = splitter.cut( arr.metrics().head_node_size_log );
458 feldman_hashset::details::metrics const m_Metrics;
463 multilevel_array(size_t head_bits, size_t array_bits )
464 : m_Metrics(feldman_hashset::details::metrics::make( head_bits, array_bits, c_hash_size ))
465 , m_Head( alloc_head_node())
471 free_array_node(m_Head);
474 node_ptr traverse(traverse_data& pos)
478 node_ptr slot = pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire);
479 if (slot.bits() == flag_array_node) {
480 // array node, go down the tree
481 assert(slot.ptr() != nullptr);
482 assert( !pos.splitter.eos());
483 pos.nSlot = pos.splitter.cut( metrics().array_node_size_log );
484 assert( pos.nSlot < metrics().array_node_size );
485 pos.pArr = to_array(slot.ptr());
488 else if (slot.bits() == flag_array_converting) {
489 // the slot is converting to array node right now
491 stats().onSlotConverting();
495 assert(slot.bits() == 0);
501 size_t head_size() const
503 return m_Metrics.head_node_size;
506 size_t array_node_size() const
508 return m_Metrics.array_node_size;
511 void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const
514 gather_level_statistics(stat, 0, m_Head, head_size());
518 array_node * head() const
528 feldman_hashset::details::metrics const& metrics() const
535 // The function is not thread-safe. For use in dtor only
536 // Destroy all array nodes
537 destroy_array_nodes(m_Head, head_size());
540 void destroy_array_nodes(array_node * pArr, size_t nSize)
542 for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
543 node_ptr slot = p->load(memory_model::memory_order_relaxed);
544 if (slot.bits() == flag_array_node) {
545 destroy_array_nodes(to_array(slot.ptr()), array_node_size());
546 free_array_node(to_array(slot.ptr()));
547 p->store(node_ptr(), memory_model::memory_order_relaxed);
552 static array_node * alloc_array_node(size_t nSize, array_node * pParent, size_t idxParent)
554 array_node * pNode = cxx_array_node_allocator().NewBlock(sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent);
555 new (pNode->nodes) atomic_node_ptr[nSize];
559 array_node * alloc_head_node() const
561 return alloc_array_node(head_size(), nullptr, 0);
564 array_node * alloc_array_node(array_node * pParent, size_t idxParent) const
566 return alloc_array_node(array_node_size(), pParent, idxParent);
569 static void free_array_node(array_node * parr)
571 cxx_array_node_allocator().Delete(parr);
578 converter(value_type * p)
582 converter(array_node * p)
587 static array_node * to_array(value_type * p)
589 return converter(p).pArr;
591 static value_type * to_node(array_node * p)
593 return converter(p).pData;
596 void gather_level_statistics(std::vector<feldman_hashset::level_statistics>& stat, size_t nLevel, array_node * pArr, size_t nSize) const
598 if (stat.size() <= nLevel) {
599 stat.resize(nLevel + 1);
600 stat[nLevel].node_capacity = nSize;
603 ++stat[nLevel].array_node_count;
604 for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
605 node_ptr slot = p->load(memory_model::memory_order_relaxed);
607 ++stat[nLevel].array_cell_count;
608 if (slot.bits() == flag_array_node)
609 gather_level_statistics(stat, nLevel + 1, to_array(slot.ptr()), array_node_size());
612 ++stat[nLevel].data_cell_count;
614 ++stat[nLevel].empty_cell_count;
618 bool expand_slot( traverse_data& pos, node_ptr current)
620 return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset());
624 bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset)
626 assert(current.bits() == 0);
627 assert(current.ptr());
629 array_node * pArr = alloc_array_node(pParent, idxParent);
631 node_ptr cur(current.ptr());
632 atomic_node_ptr& slot = pParent->nodes[idxParent];
633 if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed))
635 stats().onExpandNodeFailed();
636 free_array_node(pArr);
640 size_t idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( m_Metrics.array_node_size_log );
641 pArr->nodes[idx].store(current, memory_model::memory_order_release);
643 cur = cur | flag_array_converting;
645 slot.compare_exchange_strong(cur, node_ptr(to_node(pArr), flag_array_node), memory_model::memory_order_release, atomics::memory_order_relaxed)
648 stats().onExpandNodeSuccess();
649 stats().onArrayNodeCreated();
654 } // namespace feldman_hashset
657 // Forward declaration
658 template < class GC, typename T, class Traits = feldman_hashset::traits >
659 class FeldmanHashSet;
662 }} // namespace cds::intrusive
664 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H