2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H
32 #define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H
34 #include <functional> // std::ref
35 #include <iterator> // std::iterator_traits
38 #include <cds/intrusive/details/feldman_hashset_base.h>
39 #include <cds/details/allocator.h>
40 #include <cds/urcu/details/check_deadlock.h>
41 #include <cds/urcu/exempt_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
45 namespace cds { namespace intrusive {
46 /// Intrusive hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization
47 /** @ingroup cds_intrusive_map
48 @anchor cds_intrusive_FeldmanHashSet_rcu
51 - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays:
52 Wait-free Extensible Hash Maps"
54 See algorithm short description @ref cds_intrusive_FeldmanHashSet_hp "here"
56 @note Two important things you should keep in mind when you're using \p %FeldmanHashSet:
57 - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet.
58 Instead, for the strings you should use well-known hashing algorithms like <a href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithm">SHA1, SHA2</a>,
59 <a href="https://en.wikipedia.org/wiki/MurmurHash">MurmurHash</a>, <a href="https://en.wikipedia.org/wiki/CityHash">CityHash</a>
60 or its successor <a href="https://code.google.com/p/farmhash/">FarmHash</a> and so on, which
61 converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet.
62 - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string,
63 have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key,
64 it maintains its fixed-size hash value.
67 - \p RCU - one of \ref cds_urcu_gc "RCU type"
68 - \p T - a value type to be stored in the set
69 - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction.
70 \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor"
71 to hash value of \p T. The set algorithm does not calculate that hash value.
73 @note Before including <tt><cds/intrusive/feldman_hashset_rcu.h></tt> you should include appropriate RCU header file,
74 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
76 The set supports @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional thread-safe iterators"
78 with some restrictions.
83 #ifdef CDS_DOXYGEN_INVOKED
84 class Traits = feldman_hashset::traits
89 class FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits >: protected feldman_hashset::multilevel_array<T, Traits>
92 typedef feldman_hashset::multilevel_array<T, Traits> base_class;
96 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
97 typedef T value_type; ///< type of value stored in the set
98 typedef Traits traits; ///< Traits template parameter
100 typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor
101 typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type
102 typedef typename traits::disposer disposer; ///< data node disposer
103 typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p traits::compare and \p traits::less options
105 typedef typename traits::item_counter item_counter; ///< Item counter type
106 typedef typename traits::node_allocator node_allocator; ///< Array node allocator
107 typedef typename traits::memory_model memory_model; ///< Memory model
108 typedef typename traits::back_off back_off; ///< Backoff strategy
109 typedef typename traits::stat stat; ///< Internal statistics type
110 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
111 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
112 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
114 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node
116 /// The size of hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation
117 static CDS_CONSTEXPR size_t const c_hash_size = base_class::c_hash_size;
120 typedef feldman_hashset::level_statistics level_statistics;
125 typedef typename base_class::node_ptr node_ptr;
126 typedef typename base_class::atomic_node_ptr atomic_node_ptr;
127 typedef typename base_class::array_node array_node;
128 typedef typename base_class::traverse_data traverse_data;
130 using base_class::to_array;
131 using base_class::to_node;
132 using base_class::stats;
133 using base_class::head;
134 using base_class::metrics;
136 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
141 item_counter m_ItemCounter; ///< Item counter
145 /// Creates empty set
147 @param head_bits: 2<sup>head_bits</sup> specifies the size of head array, minimum is 4.
148 @param array_bits: 2<sup>array_bits</sup> specifies the size of array node, minimum is 2.
150 Equation for \p head_bits and \p array_bits:
152 sizeof(hash_type) * 8 == head_bits + N * array_bits
154 where \p N is multi-level array depth.
156 FeldmanHashSet(size_t head_bits = 8, size_t array_bits = 4)
157 : base_class(head_bits, array_bits)
160 /// Destructs the set and frees all data
168 The function inserts \p val in the set if it does not contain
169 an item with that hash.
171 Returns \p true if \p val is placed into the set, \p false otherwise.
173 The function locks RCU internally.
175 bool insert( value_type& val )
177 return insert( val, [](value_type&) {} );
182 This function is intended for derived non-intrusive containers.
184 The function allows to split creating of new item into two part:
185 - create item with key only
186 - insert new item into the set
187 - if inserting is success, calls \p f functor to initialize \p val.
189 The functor signature is:
191 void func( value_type& val );
193 where \p val is the item inserted.
195 The user-defined functor is called only if the inserting is success.
197 The function locks RCU internally.
198 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting".
200 template <typename Func>
201 bool insert( value_type& val, Func f )
203 hash_type const& hash = hash_accessor()( val );
204 traverse_data pos( hash, *this );
210 node_ptr slot = base_class::traverse( pos );
211 assert(slot.bits() == 0);
213 if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) {
215 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
216 // the item with that hash value already exists
217 stats().onInsertFailed();
221 // the slot must be expanded
222 base_class::expand_slot( pos, slot );
225 // the slot is empty, try to insert data node
227 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed))
229 // the new data node has been inserted
232 stats().onInsertSuccess();
233 stats().height( pos.nHeight );
237 // insert failed - slot has been changed by another thread
239 stats().onInsertRetry();
243 stats().onSlotChanged();
249 Performs inserting or updating the item with hash value equal to \p val.
250 - If hash value is found then existing item is replaced with \p val, old item is disposed
251 with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously.
252 The function returns <tt> std::pair<true, false> </tt>
253 - If hash value is not found and \p bInsert is \p true then \p val is inserted,
254 the function returns <tt> std::pair<true, true> </tt>
255 - If hash value is not found and \p bInsert is \p false then the set is unchanged,
256 the function returns <tt> std::pair<false, false> </tt>
258 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful
259 (i.e. the item has been inserted or updated),
260 \p second is \p true if new item has been added or \p false if the set contains that hash.
262 The function locks RCU internally.
264 std::pair<bool, bool> update( value_type& val, bool bInsert = true )
266 return do_update(val, [](value_type&, value_type *) {}, bInsert );
269 /// Unlinks the item \p val from the set
271 The function searches the item \p val in the set and unlink it
272 if it is found and its address is equal to <tt>&val</tt>.
274 The function returns \p true if success and \p false otherwise.
276 RCU should not be locked. The function locks RCU internally.
278 bool unlink( value_type const& val )
280 check_deadlock_policy::check();
282 auto pred = [&val](value_type const& item) -> bool { return &item == &val; };
286 p = do_erase( hash_accessor()( val ), std::ref( pred ));
289 gc::template retire_ptr<disposer>( p );
295 /// Deletes the item from the set
297 The function searches \p hash in the set,
298 unlinks the item found, and returns \p true.
299 If that item is not found the function returns \p false.
301 The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously.
303 RCU should not be locked. The function locks RCU internally.
305 bool erase( hash_type const& hash )
307 return erase(hash, [](value_type const&) {} );
310 /// Deletes the item from the set
312 The function searches \p hash in the set,
313 call \p f functor with item found, and unlinks it from the set.
314 The \ref disposer specified in \p Traits is called
315 by garbage collector \p GC asynchronously.
317 The \p Func interface is
320 void operator()( value_type& item );
324 If \p hash is not found the function returns \p false.
326 RCU should not be locked. The function locks RCU internally.
328 template <typename Func>
329 bool erase( hash_type const& hash, Func f )
331 check_deadlock_policy::check();
337 p = do_erase( hash, []( value_type const&) -> bool { return true; } );
340 // p is guarded by HP
343 gc::template retire_ptr<disposer>(p);
349 /// Extracts the item with specified \p hash
351 The function searches \p hash in the set,
352 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
353 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
355 RCU \p synchronize method can be called. RCU should NOT be locked.
356 The function does not call the disposer for the item found.
357 The disposer will be implicitly invoked when the returned object is destroyed or when
358 its \p release() member function is called.
361 typedef cds::intrusive::FeldmanHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type;
365 typename set_type::exempt_ptr ep( theSet.extract( 5 ));
370 // Dispose returned item.
375 exempt_ptr extract( hash_type const& hash )
377 check_deadlock_policy::check();
382 p = do_erase( hash, []( value_type const&) -> bool {return true;} );
384 return exempt_ptr( p );
387 /// Finds an item by it's \p hash
389 The function searches the item by \p hash and calls the functor \p f for item found.
390 The interface of \p Func functor is:
393 void operator()( value_type& item );
396 where \p item is the item found.
398 The functor may change non-key fields of \p item. Note that the functor is only guarantee
399 that \p item cannot be disposed during the functor is executing.
400 The functor does not serialize simultaneous access to the set's \p item. If such access is
401 possible you must provide your own synchronization schema on item level to prevent unsafe item modifications.
403 The function returns \p true if \p hash is found, \p false otherwise.
405 The function applies RCU lock internally.
407 template <typename Func>
408 bool find( hash_type const& hash, Func f )
412 value_type * p = search( hash );
420 /// Checks whether the set contains \p hash
422 The function searches the item by its \p hash
423 and returns \p true if it is found, or \p false otherwise.
425 The function applies RCU lock internally.
427 bool contains( hash_type const& hash )
429 return find( hash, [](value_type&) {} );
432 /// Finds an item by it's \p hash and returns the item found
434 The function searches the item by its \p hash
435 and returns the pointer to the item found.
436 If \p hash is not found the function returns \p nullptr.
438 RCU should be locked before the function invocation.
439 Returned pointer is valid only while RCU is locked.
443 typedef cds::intrusive::FeldmanHashSet< your_template_params > my_set;
450 foo * p = theSet.get( 5 );
458 value_type * get( hash_type const& hash )
460 assert( gc::is_locked());
461 return search( hash );
464 /// Clears the set (non-atomic)
466 The function unlink all data node from the set.
467 The function is not atomic but is thread-safe.
468 After \p %clear() the set may not be empty because another threads may insert items.
470 For each item the \p disposer is called after unlinking.
474 clear_array( head(), head_size());
477 /// Checks if the set is empty
479 Emptiness is checked by item counting: if item count is zero then the set is empty.
480 Thus, the correct item counting feature is an important part of the set implementation.
487 /// Returns item count in the set
490 return m_ItemCounter;
493 /// Returns const reference to internal statistics
494 stat const& statistics() const
499 /// Returns the size of head node
500 using base_class::head_size;
502 /// Returns the size of the array node
503 using base_class::array_node_size;
505 /// Collects tree level statistics into \p stat
507 The function traverses the set and collects statistics for each level of the tree
508 into \p feldman_hashset::level_statistics struct. The element of \p stat[i]
509 represents statistics for level \p i, level 0 is head array.
510 The function is thread-safe and may be called in multi-threaded environment.
512 Result can be useful for estimating efficiency of hash functor you use.
514 void get_level_statistics(std::vector<feldman_hashset::level_statistics>& stat) const
516 base_class::get_level_statistics(stat);
523 friend class FeldmanHashSet;
526 array_node * m_pNode; ///< current array node
527 size_t m_idx; ///< current position in m_pNode
528 value_type * m_pValue; ///< current value
529 FeldmanHashSet const* m_set; ///< Hash set
532 iterator_base() CDS_NOEXCEPT
539 iterator_base(iterator_base const& rhs) CDS_NOEXCEPT
540 : m_pNode(rhs.m_pNode)
542 , m_pValue(rhs.m_pValue)
546 iterator_base& operator=(iterator_base const& rhs) CDS_NOEXCEPT
548 m_pNode = rhs.m_pNode;
550 m_pValue = rhs.m_pValue;
555 iterator_base& operator++()
561 iterator_base& operator--()
567 bool operator ==(iterator_base const& rhs) const CDS_NOEXCEPT
569 return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set;
572 bool operator !=(iterator_base const& rhs) const CDS_NOEXCEPT
574 return !(*this == rhs);
578 iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx, bool)
585 iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx)
594 value_type * pointer() const CDS_NOEXCEPT
601 assert(m_set != nullptr);
602 assert(m_pNode != nullptr);
604 size_t const arrayNodeSize = m_set->array_node_size();
605 size_t const headSize = m_set->head_size();
606 array_node * pNode = m_pNode;
607 size_t idx = m_idx + 1;
608 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
611 if (idx < nodeSize) {
612 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
613 if (slot.bits() == base_class::flag_array_node ) {
614 // array node, go down the tree
615 assert(slot.ptr() != nullptr);
616 pNode = to_array(slot.ptr());
618 nodeSize = arrayNodeSize;
620 else if (slot.bits() == base_class::flag_array_converting ) {
621 // the slot is converting to array node right now - skip the node
629 m_pValue = slot.ptr();
637 if (pNode->pParent) {
638 idx = pNode->idxParent + 1;
639 pNode = pNode->pParent;
640 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
644 assert(pNode == m_set->head());
645 assert(idx == headSize);
657 assert(m_set != nullptr);
658 assert(m_pNode != nullptr);
660 size_t const arrayNodeSize = m_set->array_node_size();
661 size_t const headSize = m_set->head_size();
662 size_t const endIdx = size_t(0) - 1;
664 array_node * pNode = m_pNode;
665 size_t idx = m_idx - 1;
666 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
670 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
671 if (slot.bits() == base_class::flag_array_node ) {
672 // array node, go down the tree
673 assert(slot.ptr() != nullptr);
674 pNode = to_array(slot.ptr());
675 nodeSize = arrayNodeSize;
678 else if (slot.bits() == base_class::flag_array_converting ) {
679 // the slot is converting to array node right now - skip the node
687 m_pValue = slot.ptr();
695 if (pNode->pParent) {
696 idx = pNode->idxParent - 1;
697 pNode = pNode->pParent;
698 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
702 assert(pNode == m_set->head());
703 assert(idx == endIdx);
714 template <class Iterator>
715 Iterator init_begin() const
717 return Iterator(*this, head(), size_t(0) - 1);
720 template <class Iterator>
721 Iterator init_end() const
723 return Iterator(*this, head(), head_size(), false);
726 template <class Iterator>
727 Iterator init_rbegin() const
729 return Iterator(*this, head(), head_size());
732 template <class Iterator>
733 Iterator init_rend() const
735 return Iterator(*this, head(), size_t(0) - 1, false);
738 /// Bidirectional iterator class
739 template <bool IsConst>
740 class bidirectional_iterator : protected iterator_base
742 friend class FeldmanHashSet;
745 static CDS_CONSTEXPR bool const c_bConstantIterator = IsConst;
748 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
749 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
752 bidirectional_iterator() CDS_NOEXCEPT
755 bidirectional_iterator(bidirectional_iterator const& rhs) CDS_NOEXCEPT
759 bidirectional_iterator& operator=(bidirectional_iterator const& rhs) CDS_NOEXCEPT
761 iterator_base::operator=(rhs);
765 bidirectional_iterator& operator++()
767 iterator_base::operator++();
771 bidirectional_iterator& operator--()
773 iterator_base::operator--();
777 value_ptr operator ->() const CDS_NOEXCEPT
779 return iterator_base::pointer();
782 value_ref operator *() const CDS_NOEXCEPT
784 value_ptr p = iterator_base::pointer();
789 template <bool IsConst2>
790 bool operator ==(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
792 return iterator_base::operator==(rhs);
795 template <bool IsConst2>
796 bool operator !=(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
798 return !(*this == rhs);
802 bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool)
803 : iterator_base(set, pNode, idx, false)
806 bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx)
807 : iterator_base(set, pNode, idx)
811 /// Reverse bidirectional iterator
812 template <bool IsConst>
813 class reverse_bidirectional_iterator : public iterator_base
815 friend class FeldmanHashSet;
818 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
819 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
822 reverse_bidirectional_iterator() CDS_NOEXCEPT
826 reverse_bidirectional_iterator(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
830 reverse_bidirectional_iterator& operator=(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
832 iterator_base::operator=(rhs);
836 reverse_bidirectional_iterator& operator++()
838 iterator_base::operator--();
842 reverse_bidirectional_iterator& operator--()
844 iterator_base::operator++();
848 value_ptr operator ->() const CDS_NOEXCEPT
850 return iterator_base::pointer();
853 value_ref operator *() const CDS_NOEXCEPT
855 value_ptr p = iterator_base::pointer();
860 template <bool IsConst2>
861 bool operator ==(reverse_bidirectional_iterator<IsConst2> const& rhs) const
863 return iterator_base::operator==(rhs);
866 template <bool IsConst2>
867 bool operator !=(reverse_bidirectional_iterator<IsConst2> const& rhs)
869 return !(*this == rhs);
873 reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool)
874 : iterator_base(set, pNode, idx, false)
877 reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx)
878 : iterator_base(set, pNode, idx, false)
880 iterator_base::backward();
886 #ifdef CDS_DOXYGEN_INVOKED
887 typedef implementation_defined iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional iterator" type
888 typedef implementation_defined const_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional const iterator" type
889 typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse iterator" type
890 typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse const iterator" type
892 typedef bidirectional_iterator<false> iterator;
893 typedef bidirectional_iterator<true> const_iterator;
894 typedef reverse_bidirectional_iterator<false> reverse_iterator;
895 typedef reverse_bidirectional_iterator<true> const_reverse_iterator;
898 ///@name Thread-safe iterators
899 /** @anchor cds_intrusive_FeldmanHashSet_rcu_iterators
900 The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment
901 under explicit RCU lock.
903 RCU lock requirement means that inserting or searching is allowed for iterating thread
904 but you must not erase the items from the set because erasing under RCU lock can lead
905 to a deadlock. However, another thread can call \p erase() safely while your thread is iterating.
907 A typical example is:
912 uint32_t payload; // only for example
914 struct set_traits: cds::intrusive::feldman_hashset::traits
916 struct hash_accessor {
917 uint32_t operator()( foo const& src ) const
924 typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu;
925 typedef cds::intrusive::FeldmanHashSet< rcu, foo, set_traits > set_type;
931 // iterate over the set
934 typename set_type::rcu_lock l; // scoped RCU lock
937 for ( auto i = s.begin(); i != s.end(); ++i ) {
938 // deal with i. Remember, erasing is prohibited here!
941 } // at this point RCU lock is released
944 Each iterator object supports the common interface:
945 - dereference operators:
947 value_type [const] * operator ->() noexcept
948 value_type [const] & operator *() noexcept
950 - pre-increment and pre-decrement. Post-operators is not supported
951 - equality operators <tt>==</tt> and <tt>!=</tt>.
952 Iterators are equal iff they point to the same cell of the same array node.
953 Note that for two iterators \p it1 and \p it2 the condition <tt> it1 == it2 </tt>
954 does not entail <tt> &(*it1) == &(*it2) </tt>: welcome to concurrent containers
956 @note It is possible the item can be iterated more that once, for example, if an iterator points to the item
957 in an array node that is being splitted.
961 /// Returns an iterator to the beginning of the set
964 return iterator(*this, head(), size_t(0) - 1);
967 /// Returns an const iterator to the beginning of the set
968 const_iterator begin() const
970 return const_iterator(*this, head(), size_t(0) - 1);
973 /// Returns an const iterator to the beginning of the set
974 const_iterator cbegin()
976 return const_iterator(*this, head(), size_t(0) - 1);
979 /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
982 return iterator(*this, head(), head_size(), false);
985 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
986 const_iterator end() const
988 return const_iterator(*this, head(), head_size(), false);
991 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
992 const_iterator cend()
994 return const_iterator(*this, head(), head_size(), false);
997 /// Returns a reverse iterator to the first element of the reversed set
998 reverse_iterator rbegin()
1000 return reverse_iterator(*this, head(), head_size());
1003 /// Returns a const reverse iterator to the first element of the reversed set
1004 const_reverse_iterator rbegin() const
1006 return const_reverse_iterator(*this, head(), head_size());
1009 /// Returns a const reverse iterator to the first element of the reversed set
1010 const_reverse_iterator crbegin()
1012 return const_reverse_iterator(*this, head(), head_size());
1015 /// Returns a reverse iterator to the element following the last element of the reversed set
1017 It corresponds to the element preceding the first element of the non-reversed container.
1018 This element acts as a placeholder, attempting to access it results in undefined behavior.
1020 reverse_iterator rend()
1022 return reverse_iterator(*this, head(), size_t(0) - 1, false);
1025 /// Returns a const reverse iterator to the element following the last element of the reversed set
1027 It corresponds to the element preceding the first element of the non-reversed container.
1028 This element acts as a placeholder, attempting to access it results in undefined behavior.
1030 const_reverse_iterator rend() const
1032 return const_reverse_iterator(*this, head(), size_t(0) - 1, false);
1035 /// Returns a const reverse iterator to the element following the last element of the reversed set
1037 It corresponds to the element preceding the first element of the non-reversed container.
1038 This element acts as a placeholder, attempting to access it results in undefined behavior.
1040 const_reverse_iterator crend()
1042 return const_reverse_iterator(*this, head(), size_t(0) - 1, false);
1048 template <typename Func>
1049 std::pair<bool, bool> do_update(value_type& val, Func f, bool bInsert = true)
1051 hash_type const& hash = hash_accessor()(val);
1052 traverse_data pos( hash, *this );
1053 hash_comparator cmp;
1059 node_ptr slot = base_class::traverse( pos );
1060 assert(slot.bits() == 0);
1063 if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) {
1065 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
1066 // the item with that hash value already exists
1067 // Replace it with val
1068 if ( slot.ptr() == &val ) {
1069 stats().onUpdateExisting();
1070 return std::make_pair(true, false);
1073 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(slot, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) {
1074 // slot can be disposed
1075 f( val, slot.ptr());
1077 stats().onUpdateExisting();
1078 goto update_existing_done;
1081 stats().onUpdateRetry();
1085 // the slot must be expanded
1086 base_class::expand_slot( pos, slot );
1089 stats().onUpdateFailed();
1090 return std::make_pair(false, false);
1095 // the slot is empty, try to insert data node
1098 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed))
1100 // the new data node has been inserted
1103 stats().onUpdateNew();
1104 stats().height( pos.nHeight );
1105 return std::make_pair(true, true);
1109 stats().onUpdateFailed();
1110 return std::make_pair(false, false);
1113 // insert failed - slot has been changed by another thread
1115 stats().onUpdateRetry();
1119 stats().onSlotChanged();
1123 // retire_ptr must be called only outside of RCU lock
1124 update_existing_done:
1126 gc::template retire_ptr<disposer>(pOld);
1127 return std::make_pair(true, false);
1130 template <typename Predicate>
1131 value_type * do_erase( hash_type const& hash, Predicate pred)
1133 assert(gc::is_locked());
1134 traverse_data pos( hash, *this );
1135 hash_comparator cmp;
1138 node_ptr slot = base_class::traverse( pos );
1139 assert( slot.bits() == 0 );
1141 if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) == slot ) {
1143 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 && pred( *slot.ptr())) {
1144 // item found - replace it with nullptr
1145 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
1147 stats().onEraseSuccess();
1151 stats().onEraseRetry();
1154 stats().onEraseFailed();
1158 // the slot is empty
1159 stats().onEraseFailed();
1164 stats().onSlotChanged();
1168 value_type * search(hash_type const& hash )
1170 assert( gc::is_locked());
1171 traverse_data pos( hash, *this );
1172 hash_comparator cmp;
1175 node_ptr slot = base_class::traverse( pos );
1176 assert( slot.bits() == 0 );
1178 if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) != slot ) {
1179 // slot value has been changed - retry
1180 stats().onSlotChanged();
1183 else if ( slot.ptr() && cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
1185 stats().onFindSuccess();
1188 stats().onFindFailed();
1197 void clear_array(array_node * pArrNode, size_t nSize)
1202 for (atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr) {
1204 node_ptr slot = pArr->load(memory_model::memory_order_acquire);
1205 if (slot.bits() == base_class::flag_array_node ) {
1206 // array node, go down the tree
1207 assert(slot.ptr() != nullptr);
1208 clear_array(to_array(slot.ptr()), array_node_size());
1211 else if (slot.bits() == base_class::flag_array_converting ) {
1212 // the slot is converting to array node right now
1213 while ((slot = pArr->load(memory_model::memory_order_acquire)).bits() == base_class::flag_array_converting ) {
1215 stats().onSlotConverting();
1219 assert(slot.ptr() != nullptr);
1220 assert(slot.bits() == base_class::flag_array_node );
1221 clear_array(to_array(slot.ptr()), array_node_size());
1226 if (pArr->compare_exchange_strong(slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) {
1228 gc::template retire_ptr<disposer>(slot.ptr());
1230 stats().onEraseSuccess();
1241 }} // namespace cds::intrusive
1243 #endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H