2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
34 #include <memory.h> // memcmp, memcpy
35 #include <type_traits>
37 #include <cds/intrusive/details/base.h>
38 #include <cds/opt/compare.h>
39 #include <cds/algo/atomic.h>
40 #include <cds/algo/split_bitstring.h>
41 #include <cds/details/marked_ptr.h>
42 #include <cds/urcu/options.h>
44 namespace cds { namespace intrusive {
46 /// FeldmanHashSet related definitions
47 /** @ingroup cds_intrusive_helper
49 namespace feldman_hashset {
50 /// Hash accessor option
52 @copydetails traits::hash_accessor
54 template <typename Accessor>
55 struct hash_accessor {
57 template <typename Base> struct pack: public Base
59 typedef Accessor hash_accessor;
64 /// \p FeldmanHashSet internal statistics
65 template <typename EventCounter = cds::atomicity::event_counter>
67 typedef EventCounter event_counter ; ///< Event counter type
69 event_counter m_nInsertSuccess; ///< Number of success \p insert() operations
70 event_counter m_nInsertFailed; ///< Number of failed \p insert() operations
71 event_counter m_nInsertRetry; ///< Number of attempts to insert new item
72 event_counter m_nUpdateNew; ///< Number of new item inserted for \p update()
73 event_counter m_nUpdateExisting; ///< Number of existing item updates
74 event_counter m_nUpdateFailed; ///< Number of failed \p update() call
75 event_counter m_nUpdateRetry; ///< Number of attempts to update the item
76 event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations
77 event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations
78 event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item
79 event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations
80 event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations
82 event_counter m_nExpandNodeSuccess; ///< Number of succeeded attempts converting data node to array node
83 event_counter m_nExpandNodeFailed; ///< Number of failed attempts converting data node to array node
84 event_counter m_nSlotChanged; ///< Number of array node slot changing by other thread during an operation
85 event_counter m_nSlotConverting; ///< Number of events when we encounter a slot while it is converting to array node
87 event_counter m_nArrayNodeCount; ///< Number of array nodes
88 event_counter m_nHeight; ///< Current height of the tree
91 void onInsertSuccess() { ++m_nInsertSuccess; }
92 void onInsertFailed() { ++m_nInsertFailed; }
93 void onInsertRetry() { ++m_nInsertRetry; }
94 void onUpdateNew() { ++m_nUpdateNew; }
95 void onUpdateExisting() { ++m_nUpdateExisting; }
96 void onUpdateFailed() { ++m_nUpdateFailed; }
97 void onUpdateRetry() { ++m_nUpdateRetry; }
98 void onEraseSuccess() { ++m_nEraseSuccess; }
99 void onEraseFailed() { ++m_nEraseFailed; }
100 void onEraseRetry() { ++m_nEraseRetry; }
101 void onFindSuccess() { ++m_nFindSuccess; }
102 void onFindFailed() { ++m_nFindFailed; }
104 void onExpandNodeSuccess() { ++m_nExpandNodeSuccess; }
105 void onExpandNodeFailed() { ++m_nExpandNodeFailed; }
106 void onSlotChanged() { ++m_nSlotChanged; }
107 void onSlotConverting() { ++m_nSlotConverting; }
108 void onArrayNodeCreated() { ++m_nArrayNodeCount; }
109 void height( size_t h ) { if (m_nHeight < h ) m_nHeight = h; }
113 /// \p FeldmanHashSet empty internal statistics
116 void onInsertSuccess() const {}
117 void onInsertFailed() const {}
118 void onInsertRetry() const {}
119 void onUpdateNew() const {}
120 void onUpdateExisting() const {}
121 void onUpdateFailed() const {}
122 void onUpdateRetry() const {}
123 void onEraseSuccess() const {}
124 void onEraseFailed() const {}
125 void onEraseRetry() const {}
126 void onFindSuccess() const {}
127 void onFindFailed() const {}
129 void onExpandNodeSuccess() const {}
130 void onExpandNodeFailed() const {}
131 void onSlotChanged() const {}
132 void onSlotConverting() const {}
133 void onArrayNodeCreated() const {}
134 void height(size_t) const {}
138 /// \p FeldmanHashSet traits
141 /// Mandatory functor to get hash value from data node
143 It is most-important feature of \p FeldmanHashSet.
144 That functor must return a reference to fixed-sized hash value of data node.
145 The return value of that functor specifies the type of hash value.
149 typedef uint8_t hash_type[32]; // 256-bit hash type
151 hash_type hash; // 256-bit hash value
156 struct foo_hash_accessor {
157 hash_type const& operator()( foo const& d ) const
164 typedef cds::opt::none hash_accessor;
166 /// Disposer for removing data nodes
167 typedef cds::intrusive::opt::v::empty_disposer disposer;
169 /// Hash comparing functor
171 No default functor is provided.
172 If the option is not specified, the \p less option is used.
174 typedef cds::opt::none compare;
176 /// Specifies binary predicate used for hash compare.
178 If \p %less and \p %compare are not specified, \p memcmp() -like @ref bitwise_compare "bit-wise hash comparator" is used
179 because the hash value is treated as fixed-sized bit-string.
181 typedef cds::opt::none less;
185 The item counting is an important part of \p FeldmanHashSet algorithm:
186 the \p empty() member function depends on correct item counting.
187 Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option.
189 Default is \p atomicity::item_counter.
191 typedef cds::atomicity::item_counter item_counter;
193 /// Array node allocator
195 Allocator for array nodes. That allocator is used for creating \p headNode and \p arrayNode when the set grows.
196 Default is \ref CDS_DEFAULT_ALLOCATOR
198 typedef CDS_DEFAULT_ALLOCATOR node_allocator;
200 /// C++ memory ordering model
202 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
203 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
205 typedef cds::opt::v::relaxed_ordering memory_model;
207 /// Back-off strategy
208 typedef cds::backoff::Default back_off;
210 /// Internal statistics
212 By default, internal statistics is disabled (\p feldman_hashset::empty_stat).
213 Use \p feldman_hashset::stat to enable it.
215 typedef empty_stat stat;
217 /// RCU deadlock checking policy (only for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet")
219 List of available policy see \p opt::rcu_check_deadlock
221 typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
224 /// Metafunction converting option list to \p feldman_hashset::traits
226 Supported \p Options are:
227 - \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor.
228 @copydetails traits::hash_accessor
229 - \p opt::node_allocator - array node allocator.
230 @copydetails traits::node_allocator
231 - \p opt::compare - hash comparison functor. No default functor is provided.
232 If the option is not specified, the \p opt::less is used.
233 - \p opt::less - specifies binary predicate used for hash comparison.
234 If the option is not specified, \p memcmp() -like bit-wise hash comparator is used
235 because the hash value is treated as fixed-sized bit-string.
236 - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used.
237 - \p opt::disposer - the functor used for disposing removed data node. Default is \p opt::v::empty_disposer. Due the nature
238 of GC schema the disposer may be called asynchronously.
239 - \p opt::item_counter - the type of item counting feature.
240 The item counting is an important part of \p FeldmanHashSet algorithm:
241 the \p empty() member function depends on correct item counting.
242 Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option.
243 Default is \p atomicity::item_counter.
244 - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
245 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
246 - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat).
247 To enable it use \p feldman_hashset::stat
248 - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet"
249 Default is \p opt::v::rcu_throw_deadlock
251 template <typename... Options>
254 # ifdef CDS_DOXYGEN_INVOKED
255 typedef implementation_defined type ; ///< Metafunction result
257 typedef typename cds::opt::make_options<
258 typename cds::opt::find_type_traits< traits, Options... >::type
264 /// Bit-wise memcmp-based comparator for hash value \p T
265 template <typename T>
266 struct bitwise_compare
268 /// Compares \p lhs and \p rhs
271 - <tt> < 0</tt> if <tt>lhs < rhs</tt>
272 - <tt>0</tt> if <tt>lhs == rhs</tt>
273 - <tt> > 0</tt> if <tt>lhs > rhs</tt>
275 int operator()( T const& lhs, T const& rhs ) const
277 return memcmp( &lhs, &rhs, sizeof(T));
281 /// One-level statistics, see \p FeldmanHashSet::get_level_statistics
282 struct level_statistics
284 size_t array_node_count; ///< Count of array node at the level
285 size_t node_capacity; ///< Array capacity
287 size_t data_cell_count; ///< The number of data cells in all array node at this level
288 size_t array_cell_count; ///< The number of array cells in all array node at this level
289 size_t empty_cell_count; ///< The number of empty cells in all array node at this level
293 : array_node_count(0)
295 , array_cell_count(0)
296 , empty_cell_count(0)
303 template <typename HashType, typename UInt = size_t >
304 using hash_splitter = cds::algo::split_bitstring< HashType, UInt >;
307 size_t head_node_size; // power-of-two
308 size_t head_node_size_log; // log2( head_node_size )
309 size_t array_node_size; // power-of-two
310 size_t array_node_size_log;// log2( array_node_size )
312 static metrics make(size_t head_bits, size_t array_bits, size_t hash_size )
314 size_t const hash_bits = hash_size * 8;
320 if (head_bits > hash_bits)
321 head_bits = hash_bits;
322 if ((hash_bits - head_bits) % array_bits != 0)
323 head_bits += (hash_bits - head_bits) % array_bits;
325 assert((hash_bits - head_bits) % array_bits == 0);
328 m.head_node_size_log = head_bits;
329 m.head_node_size = size_t(1) << head_bits;
330 m.array_node_size_log = array_bits;
331 m.array_node_size = size_t(1) << array_bits;
336 } // namespace details
340 template <typename T, typename Traits>
341 class multilevel_array
344 typedef T value_type;
345 typedef Traits traits;
346 typedef typename Traits::node_allocator node_allocator;
347 typedef typename traits::memory_model memory_model;
348 typedef typename traits::back_off back_off; ///< Backoff strategy
349 typedef typename traits::stat stat; ///< Internal statistics type
351 typedef typename traits::hash_accessor hash_accessor;
352 static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified");
354 /// Hash type deduced from \p hash_accessor return type
355 typedef typename std::decay<
356 typename std::remove_reference<
357 decltype(hash_accessor()(std::declval<value_type>()))
360 //typedef typename std::result_of< hash_accessor( std::declval<value_type>()) >::type hash_type;
361 static_assert(!std::is_pointer<hash_type>::value, "hash_accessor should return a reference to hash value");
363 typedef typename cds::opt::details::make_comparator_from<
366 feldman_hashset::bitwise_compare< hash_type >
367 >::type hash_comparator;
369 typedef feldman_hashset::details::hash_splitter< hash_type > hash_splitter;
372 flag_array_converting = 1, ///< the cell is converting from data node to an array node
373 flag_array_node = 2 ///< the cell is a pointer to an array node
378 typedef cds::details::marked_ptr< value_type, 3 > node_ptr;
379 typedef atomics::atomic< node_ptr > atomic_node_ptr;
382 array_node * const pParent; ///< parent array node
383 size_t const idxParent; ///< index in parent array node
384 atomic_node_ptr nodes[1]; ///< node array
386 array_node(array_node * parent, size_t idx)
391 array_node() = delete;
392 array_node(array_node const&) = delete;
393 array_node(array_node&&) = delete;
396 typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator;
398 struct traverse_data {
399 hash_splitter splitter;
404 traverse_data( hash_type const& hash, multilevel_array& arr )
410 void reset( multilevel_array& arr )
414 nSlot = splitter.cut( arr.metrics().head_node_size_log );
420 feldman_hashset::details::metrics const m_Metrics;
425 multilevel_array(size_t head_bits, size_t array_bits )
426 : m_Metrics(feldman_hashset::details::metrics::make(head_bits, array_bits, sizeof(hash_type)))
427 , m_Head( alloc_head_node())
433 free_array_node(m_Head);
436 node_ptr traverse(traverse_data& pos)
440 node_ptr slot = pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire);
441 if (slot.bits() == flag_array_node) {
442 // array node, go down the tree
443 assert(slot.ptr() != nullptr);
444 assert( !pos.splitter.eos());
445 pos.nSlot = pos.splitter.cut( metrics().array_node_size_log );
446 assert( pos.nSlot < metrics().array_node_size );
447 pos.pArr = to_array(slot.ptr());
450 else if (slot.bits() == flag_array_converting) {
451 // the slot is converting to array node right now
453 stats().onSlotConverting();
457 assert(slot.bits() == 0);
463 size_t head_size() const
465 return m_Metrics.head_node_size;
468 size_t array_node_size() const
470 return m_Metrics.array_node_size;
473 void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const
476 gather_level_statistics(stat, 0, m_Head, head_size());
480 array_node * head() const
490 feldman_hashset::details::metrics const& metrics() const
497 // The function is not thread-safe. For use in dtor only
498 // Destroy all array nodes
499 destroy_array_nodes(m_Head, head_size());
502 void destroy_array_nodes(array_node * pArr, size_t nSize)
504 for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
505 node_ptr slot = p->load(memory_model::memory_order_relaxed);
506 if (slot.bits() == flag_array_node) {
507 destroy_array_nodes(to_array(slot.ptr()), array_node_size());
508 free_array_node(to_array(slot.ptr()));
509 p->store(node_ptr(), memory_model::memory_order_relaxed);
514 static array_node * alloc_array_node(size_t nSize, array_node * pParent, size_t idxParent)
516 array_node * pNode = cxx_array_node_allocator().NewBlock(sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent);
517 new (pNode->nodes) atomic_node_ptr[nSize];
521 array_node * alloc_head_node() const
523 return alloc_array_node(head_size(), nullptr, 0);
526 array_node * alloc_array_node(array_node * pParent, size_t idxParent) const
528 return alloc_array_node(array_node_size(), pParent, idxParent);
531 static void free_array_node(array_node * parr)
533 cxx_array_node_allocator().Delete(parr);
540 converter(value_type * p)
544 converter(array_node * p)
549 static array_node * to_array(value_type * p)
551 return converter(p).pArr;
553 static value_type * to_node(array_node * p)
555 return converter(p).pData;
558 void gather_level_statistics(std::vector<feldman_hashset::level_statistics>& stat, size_t nLevel, array_node * pArr, size_t nSize) const
560 if (stat.size() <= nLevel) {
561 stat.resize(nLevel + 1);
562 stat[nLevel].node_capacity = nSize;
565 ++stat[nLevel].array_node_count;
566 for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) {
567 node_ptr slot = p->load(memory_model::memory_order_relaxed);
569 ++stat[nLevel].array_cell_count;
570 if (slot.bits() == flag_array_node)
571 gather_level_statistics(stat, nLevel + 1, to_array(slot.ptr()), array_node_size());
574 ++stat[nLevel].data_cell_count;
576 ++stat[nLevel].empty_cell_count;
580 bool expand_slot( traverse_data& pos, node_ptr current)
582 return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset());
586 bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset)
588 assert(current.bits() == 0);
589 assert(current.ptr());
591 array_node * pArr = alloc_array_node(pParent, idxParent);
593 node_ptr cur(current.ptr());
594 atomic_node_ptr& slot = pParent->nodes[idxParent];
595 if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed))
597 stats().onExpandNodeFailed();
598 free_array_node(pArr);
602 size_t idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( m_Metrics.array_node_size_log );
603 pArr->nodes[idx].store(current, memory_model::memory_order_release);
605 cur = cur | flag_array_converting;
607 slot.compare_exchange_strong(cur, node_ptr(to_node(pArr), flag_array_node), memory_model::memory_order_release, atomics::memory_order_relaxed)
610 stats().onExpandNodeSuccess();
611 stats().onArrayNodeCreated();
616 } // namespace feldman_hashset
619 // Forward declaration
620 template < class GC, typename T, class Traits = feldman_hashset::traits >
621 class FeldmanHashSet;
624 }} // namespace cds::intrusive
626 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H