2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H
32 #define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H
34 #include <functional> // std::ref
35 #include <iterator> // std::iterator_traits
38 #include <cds/intrusive/details/feldman_hashset_base.h>
39 #include <cds/details/allocator.h>
40 #include <cds/urcu/details/check_deadlock.h>
41 #include <cds/urcu/exempt_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
45 namespace cds { namespace intrusive {
46 /// Intrusive hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization
47 /** @ingroup cds_intrusive_map
48 @anchor cds_intrusive_FeldmanHashSet_rcu
51 - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays:
52 Wait-free Extensible Hash Maps"
54 See algorithm short description @ref cds_intrusive_FeldmanHashSet_hp "here"
56 @note Two important things you should keep in mind when you're using \p %FeldmanHashSet:
57 - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet.
58 Instead, for the strings you should use well-known hashing algorithms like <a href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithm">SHA1, SHA2</a>,
59 <a href="https://en.wikipedia.org/wiki/MurmurHash">MurmurHash</a>, <a href="https://en.wikipedia.org/wiki/CityHash">CityHash</a>
60 or its successor <a href="https://code.google.com/p/farmhash/">FarmHash</a> and so on, which
61 converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet.
62 - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string,
63 have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key,
64 it maintains its fixed-size hash value.
67 - \p RCU - one of \ref cds_urcu_gc "RCU type"
68 - \p T - a value type to be stored in the set
69 - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction.
70 \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor"
71 to hash value of \p T. The set algorithm does not calculate that hash value.
73 @note Before including <tt><cds/intrusive/feldman_hashset_rcu.h></tt> you should include appropriate RCU header file,
74 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
76 The set supports @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional thread-safe iterators"
78 with some restrictions.
83 #ifdef CDS_DOXYGEN_INVOKED
84 class Traits = feldman_hashset::traits
89 class FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits >: protected feldman_hashset::multilevel_array<T, Traits>
92 typedef feldman_hashset::multilevel_array<T, Traits> base_class;
96 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
97 typedef T value_type; ///< type of value stored in the set
98 typedef Traits traits; ///< Traits template parameter
100 typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor
101 typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type
102 typedef typename traits::disposer disposer; ///< data node disposer
103 typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p traits::compare and \p traits::less options
105 typedef typename traits::item_counter item_counter; ///< Item counter type
106 typedef typename traits::node_allocator node_allocator; ///< Array node allocator
107 typedef typename traits::memory_model memory_model; ///< Memory model
108 typedef typename traits::back_off back_off; ///< Backoff strategy
109 typedef typename traits::stat stat; ///< Internal statistics type
110 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
111 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
112 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
114 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node
117 typedef feldman_hashset::level_statistics level_statistics;
122 typedef typename base_class::node_ptr node_ptr;
123 typedef typename base_class::atomic_node_ptr atomic_node_ptr;
124 typedef typename base_class::array_node array_node;
125 typedef typename base_class::traverse_data traverse_data;
127 using base_class::to_array;
128 using base_class::to_node;
129 using base_class::stats;
130 using base_class::head;
131 using base_class::metrics;
133 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
138 item_counter m_ItemCounter; ///< Item counter
142 /// Creates empty set
144 @param head_bits: 2<sup>head_bits</sup> specifies the size of head array, minimum is 4.
145 @param array_bits: 2<sup>array_bits</sup> specifies the size of array node, minimum is 2.
147 Equation for \p head_bits and \p array_bits:
149 sizeof(hash_type) * 8 == head_bits + N * array_bits
151 where \p N is multi-level array depth.
153 FeldmanHashSet(size_t head_bits = 8, size_t array_bits = 4)
154 : base_class(head_bits, array_bits)
157 /// Destructs the set and frees all data
165 The function inserts \p val in the set if it does not contain
166 an item with that hash.
168 Returns \p true if \p val is placed into the set, \p false otherwise.
170 The function locks RCU internally.
172 bool insert( value_type& val )
174 return insert( val, [](value_type&) {} );
179 This function is intended for derived non-intrusive containers.
181 The function allows to split creating of new item into two part:
182 - create item with key only
183 - insert new item into the set
184 - if inserting is success, calls \p f functor to initialize \p val.
186 The functor signature is:
188 void func( value_type& val );
190 where \p val is the item inserted.
192 The user-defined functor is called only if the inserting is success.
194 The function locks RCU internally.
195 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting".
197 template <typename Func>
198 bool insert( value_type& val, Func f )
200 hash_type const& hash = hash_accessor()( val );
201 traverse_data pos( hash, *this );
207 node_ptr slot = base_class::traverse( pos );
208 assert(slot.bits() == 0);
210 if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) {
212 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
213 // the item with that hash value already exists
214 stats().onInsertFailed();
218 // the slot must be expanded
219 base_class::expand_slot( pos, slot );
222 // the slot is empty, try to insert data node
224 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed))
226 // the new data node has been inserted
229 stats().onInsertSuccess();
230 stats().height( pos.nHeight );
234 // insert failed - slot has been changed by another thread
236 stats().onInsertRetry();
240 stats().onSlotChanged();
246 Performs inserting or updating the item with hash value equal to \p val.
247 - If hash value is found then existing item is replaced with \p val, old item is disposed
248 with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously.
249 The function returns <tt> std::pair<true, false> </tt>
250 - If hash value is not found and \p bInsert is \p true then \p val is inserted,
251 the function returns <tt> std::pair<true, true> </tt>
252 - If hash value is not found and \p bInsert is \p false then the set is unchanged,
253 the function returns <tt> std::pair<false, false> </tt>
255 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful
256 (i.e. the item has been inserted or updated),
257 \p second is \p true if new item has been added or \p false if the set contains that hash.
259 The function locks RCU internally.
261 std::pair<bool, bool> update( value_type& val, bool bInsert = true )
263 return do_update(val, [](value_type&, value_type *) {}, bInsert );
266 /// Unlinks the item \p val from the set
268 The function searches the item \p val in the set and unlink it
269 if it is found and its address is equal to <tt>&val</tt>.
271 The function returns \p true if success and \p false otherwise.
273 RCU should not be locked. The function locks RCU internally.
275 bool unlink( value_type const& val )
277 check_deadlock_policy::check();
279 auto pred = [&val](value_type const& item) -> bool { return &item == &val; };
283 p = do_erase( hash_accessor()( val ), std::ref( pred ));
286 gc::template retire_ptr<disposer>( p );
292 /// Deletes the item from the set
294 The function searches \p hash in the set,
295 unlinks the item found, and returns \p true.
296 If that item is not found the function returns \p false.
298 The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously.
300 RCU should not be locked. The function locks RCU internally.
302 bool erase( hash_type const& hash )
304 return erase(hash, [](value_type const&) {} );
307 /// Deletes the item from the set
309 The function searches \p hash in the set,
310 call \p f functor with item found, and unlinks it from the set.
311 The \ref disposer specified in \p Traits is called
312 by garbage collector \p GC asynchronously.
314 The \p Func interface is
317 void operator()( value_type& item );
321 If \p hash is not found the function returns \p false.
323 RCU should not be locked. The function locks RCU internally.
325 template <typename Func>
326 bool erase( hash_type const& hash, Func f )
328 check_deadlock_policy::check();
334 p = do_erase( hash, []( value_type const&) -> bool { return true; } );
337 // p is guarded by HP
340 gc::template retire_ptr<disposer>(p);
346 /// Extracts the item with specified \p hash
348 The function searches \p hash in the set,
349 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
350 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
352 RCU \p synchronize method can be called. RCU should NOT be locked.
353 The function does not call the disposer for the item found.
354 The disposer will be implicitly invoked when the returned object is destroyed or when
355 its \p release() member function is called.
358 typedef cds::intrusive::FeldmanHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type;
362 typename set_type::exempt_ptr ep( theSet.extract( 5 ));
367 // Dispose returned item.
372 exempt_ptr extract( hash_type const& hash )
374 check_deadlock_policy::check();
379 p = do_erase( hash, []( value_type const&) -> bool {return true;} );
381 return exempt_ptr( p );
384 /// Finds an item by it's \p hash
386 The function searches the item by \p hash and calls the functor \p f for item found.
387 The interface of \p Func functor is:
390 void operator()( value_type& item );
393 where \p item is the item found.
395 The functor may change non-key fields of \p item. Note that the functor is only guarantee
396 that \p item cannot be disposed during the functor is executing.
397 The functor does not serialize simultaneous access to the set's \p item. If such access is
398 possible you must provide your own synchronization schema on item level to prevent unsafe item modifications.
400 The function returns \p true if \p hash is found, \p false otherwise.
402 The function applies RCU lock internally.
404 template <typename Func>
405 bool find( hash_type const& hash, Func f )
409 value_type * p = search( hash );
417 /// Checks whether the set contains \p hash
419 The function searches the item by its \p hash
420 and returns \p true if it is found, or \p false otherwise.
422 The function applies RCU lock internally.
424 bool contains( hash_type const& hash )
426 return find( hash, [](value_type&) {} );
429 /// Finds an item by it's \p hash and returns the item found
431 The function searches the item by its \p hash
432 and returns the pointer to the item found.
433 If \p hash is not found the function returns \p nullptr.
435 RCU should be locked before the function invocation.
436 Returned pointer is valid only while RCU is locked.
440 typedef cds::intrusive::FeldmanHashSet< your_template_params > my_set;
447 foo * p = theSet.get( 5 );
455 value_type * get( hash_type const& hash )
457 assert( gc::is_locked());
458 return search( hash );
461 /// Clears the set (non-atomic)
463 The function unlink all data node from the set.
464 The function is not atomic but is thread-safe.
465 After \p %clear() the set may not be empty because another threads may insert items.
467 For each item the \p disposer is called after unlinking.
471 clear_array( head(), head_size());
474 /// Checks if the set is empty
476 Emptiness is checked by item counting: if item count is zero then the set is empty.
477 Thus, the correct item counting feature is an important part of the set implementation.
484 /// Returns item count in the set
487 return m_ItemCounter;
490 /// Returns const reference to internal statistics
491 stat const& statistics() const
496 /// Returns the size of head node
497 using base_class::head_size;
499 /// Returns the size of the array node
500 using base_class::array_node_size;
502 /// Collects tree level statistics into \p stat
504 The function traverses the set and collects statistics for each level of the tree
505 into \p feldman_hashset::level_statistics struct. The element of \p stat[i]
506 represents statistics for level \p i, level 0 is head array.
507 The function is thread-safe and may be called in multi-threaded environment.
509 Result can be useful for estimating efficiency of hash functor you use.
511 void get_level_statistics(std::vector<feldman_hashset::level_statistics>& stat) const
513 base_class::get_level_statistics(stat);
520 friend class FeldmanHashSet;
523 array_node * m_pNode; ///< current array node
524 size_t m_idx; ///< current position in m_pNode
525 value_type * m_pValue; ///< current value
526 FeldmanHashSet const* m_set; ///< Hash set
529 iterator_base() CDS_NOEXCEPT
536 iterator_base(iterator_base const& rhs) CDS_NOEXCEPT
537 : m_pNode(rhs.m_pNode)
539 , m_pValue(rhs.m_pValue)
543 iterator_base& operator=(iterator_base const& rhs) CDS_NOEXCEPT
545 m_pNode = rhs.m_pNode;
547 m_pValue = rhs.m_pValue;
552 iterator_base& operator++()
558 iterator_base& operator--()
564 bool operator ==(iterator_base const& rhs) const CDS_NOEXCEPT
566 return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set;
569 bool operator !=(iterator_base const& rhs) const CDS_NOEXCEPT
571 return !(*this == rhs);
575 iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx, bool)
582 iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx)
591 value_type * pointer() const CDS_NOEXCEPT
598 assert(m_set != nullptr);
599 assert(m_pNode != nullptr);
601 size_t const arrayNodeSize = m_set->array_node_size();
602 size_t const headSize = m_set->head_size();
603 array_node * pNode = m_pNode;
604 size_t idx = m_idx + 1;
605 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
608 if (idx < nodeSize) {
609 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
610 if (slot.bits() == base_class::flag_array_node ) {
611 // array node, go down the tree
612 assert(slot.ptr() != nullptr);
613 pNode = to_array(slot.ptr());
615 nodeSize = arrayNodeSize;
617 else if (slot.bits() == base_class::flag_array_converting ) {
618 // the slot is converting to array node right now - skip the node
626 m_pValue = slot.ptr();
634 if (pNode->pParent) {
635 idx = pNode->idxParent + 1;
636 pNode = pNode->pParent;
637 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
641 assert(pNode == m_set->head());
642 assert(idx == headSize);
654 assert(m_set != nullptr);
655 assert(m_pNode != nullptr);
657 size_t const arrayNodeSize = m_set->array_node_size();
658 size_t const headSize = m_set->head_size();
659 size_t const endIdx = size_t(0) - 1;
661 array_node * pNode = m_pNode;
662 size_t idx = m_idx - 1;
663 size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize;
667 node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire);
668 if (slot.bits() == base_class::flag_array_node ) {
669 // array node, go down the tree
670 assert(slot.ptr() != nullptr);
671 pNode = to_array(slot.ptr());
672 nodeSize = arrayNodeSize;
675 else if (slot.bits() == base_class::flag_array_converting ) {
676 // the slot is converting to array node right now - skip the node
684 m_pValue = slot.ptr();
692 if (pNode->pParent) {
693 idx = pNode->idxParent - 1;
694 pNode = pNode->pParent;
695 nodeSize = pNode->pParent ? arrayNodeSize : headSize;
699 assert(pNode == m_set->head());
700 assert(idx == endIdx);
711 template <class Iterator>
712 Iterator init_begin() const
714 return Iterator(*this, head(), size_t(0) - 1);
717 template <class Iterator>
718 Iterator init_end() const
720 return Iterator(*this, head(), head_size(), false);
723 template <class Iterator>
724 Iterator init_rbegin() const
726 return Iterator(*this, head(), head_size());
729 template <class Iterator>
730 Iterator init_rend() const
732 return Iterator(*this, head(), size_t(0) - 1, false);
735 /// Bidirectional iterator class
736 template <bool IsConst>
737 class bidirectional_iterator : protected iterator_base
739 friend class FeldmanHashSet;
742 static CDS_CONSTEXPR bool const c_bConstantIterator = IsConst;
745 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
746 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
749 bidirectional_iterator() CDS_NOEXCEPT
752 bidirectional_iterator(bidirectional_iterator const& rhs) CDS_NOEXCEPT
756 bidirectional_iterator& operator=(bidirectional_iterator const& rhs) CDS_NOEXCEPT
758 iterator_base::operator=(rhs);
762 bidirectional_iterator& operator++()
764 iterator_base::operator++();
768 bidirectional_iterator& operator--()
770 iterator_base::operator--();
774 value_ptr operator ->() const CDS_NOEXCEPT
776 return iterator_base::pointer();
779 value_ref operator *() const CDS_NOEXCEPT
781 value_ptr p = iterator_base::pointer();
786 template <bool IsConst2>
787 bool operator ==(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
789 return iterator_base::operator==(rhs);
792 template <bool IsConst2>
793 bool operator !=(bidirectional_iterator<IsConst2> const& rhs) const CDS_NOEXCEPT
795 return !(*this == rhs);
799 bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool)
800 : iterator_base(set, pNode, idx, false)
803 bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx)
804 : iterator_base(set, pNode, idx)
808 /// Reverse bidirectional iterator
809 template <bool IsConst>
810 class reverse_bidirectional_iterator : public iterator_base
812 friend class FeldmanHashSet;
815 typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer
816 typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference
819 reverse_bidirectional_iterator() CDS_NOEXCEPT
823 reverse_bidirectional_iterator(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
827 reverse_bidirectional_iterator& operator=(reverse_bidirectional_iterator const& rhs) CDS_NOEXCEPT
829 iterator_base::operator=(rhs);
833 reverse_bidirectional_iterator& operator++()
835 iterator_base::operator--();
839 reverse_bidirectional_iterator& operator--()
841 iterator_base::operator++();
845 value_ptr operator ->() const CDS_NOEXCEPT
847 return iterator_base::pointer();
850 value_ref operator *() const CDS_NOEXCEPT
852 value_ptr p = iterator_base::pointer();
857 template <bool IsConst2>
858 bool operator ==(reverse_bidirectional_iterator<IsConst2> const& rhs) const
860 return iterator_base::operator==(rhs);
863 template <bool IsConst2>
864 bool operator !=(reverse_bidirectional_iterator<IsConst2> const& rhs)
866 return !(*this == rhs);
870 reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool)
871 : iterator_base(set, pNode, idx, false)
874 reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx)
875 : iterator_base(set, pNode, idx, false)
877 iterator_base::backward();
883 #ifdef CDS_DOXYGEN_INVOKED
884 typedef implementation_defined iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional iterator" type
885 typedef implementation_defined const_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional const iterator" type
886 typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse iterator" type
887 typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse const iterator" type
889 typedef bidirectional_iterator<false> iterator;
890 typedef bidirectional_iterator<true> const_iterator;
891 typedef reverse_bidirectional_iterator<false> reverse_iterator;
892 typedef reverse_bidirectional_iterator<true> const_reverse_iterator;
895 ///@name Thread-safe iterators
896 /** @anchor cds_intrusive_FeldmanHashSet_rcu_iterators
897 The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment
898 under explicit RCU lock.
900 RCU lock requirement means that inserting or searching is allowed for iterating thread
901 but you must not erase the items from the set because erasing under RCU lock can lead
902 to a deadlock. However, another thread can call \p erase() safely while your thread is iterating.
904 A typical example is:
909 uint32_t payload; // only for example
911 struct set_traits: cds::intrusive::feldman_hashset::traits
913 struct hash_accessor {
914 uint32_t operator()( foo const& src ) const
921 typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu;
922 typedef cds::intrusive::FeldmanHashSet< rcu, foo, set_traits > set_type;
928 // iterate over the set
931 typename set_type::rcu_lock l; // scoped RCU lock
934 for ( auto i = s.begin(); i != s.end(); ++i ) {
935 // deal with i. Remember, erasing is prohibited here!
938 } // at this point RCU lock is released
941 Each iterator object supports the common interface:
942 - dereference operators:
944 value_type [const] * operator ->() noexcept
945 value_type [const] & operator *() noexcept
947 - pre-increment and pre-decrement. Post-operators is not supported
948 - equality operators <tt>==</tt> and <tt>!=</tt>.
949 Iterators are equal iff they point to the same cell of the same array node.
950 Note that for two iterators \p it1 and \p it2 the condition <tt> it1 == it2 </tt>
951 does not entail <tt> &(*it1) == &(*it2) </tt>: welcome to concurrent containers
953 @note It is possible the item can be iterated more that once, for example, if an iterator points to the item
954 in an array node that is being splitted.
958 /// Returns an iterator to the beginning of the set
961 return iterator(*this, head(), size_t(0) - 1);
964 /// Returns an const iterator to the beginning of the set
965 const_iterator begin() const
967 return const_iterator(*this, head(), size_t(0) - 1);
970 /// Returns an const iterator to the beginning of the set
971 const_iterator cbegin()
973 return const_iterator(*this, head(), size_t(0) - 1);
976 /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
979 return iterator(*this, head(), head_size(), false);
982 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
983 const_iterator end() const
985 return const_iterator(*this, head(), head_size(), false);
988 /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior.
989 const_iterator cend()
991 return const_iterator(*this, head(), head_size(), false);
994 /// Returns a reverse iterator to the first element of the reversed set
995 reverse_iterator rbegin()
997 return reverse_iterator(*this, head(), head_size());
1000 /// Returns a const reverse iterator to the first element of the reversed set
1001 const_reverse_iterator rbegin() const
1003 return const_reverse_iterator(*this, head(), head_size());
1006 /// Returns a const reverse iterator to the first element of the reversed set
1007 const_reverse_iterator crbegin()
1009 return const_reverse_iterator(*this, head(), head_size());
1012 /// Returns a reverse iterator to the element following the last element of the reversed set
1014 It corresponds to the element preceding the first element of the non-reversed container.
1015 This element acts as a placeholder, attempting to access it results in undefined behavior.
1017 reverse_iterator rend()
1019 return reverse_iterator(*this, head(), size_t(0) - 1, false);
1022 /// Returns a const reverse iterator to the element following the last element of the reversed set
1024 It corresponds to the element preceding the first element of the non-reversed container.
1025 This element acts as a placeholder, attempting to access it results in undefined behavior.
1027 const_reverse_iterator rend() const
1029 return const_reverse_iterator(*this, head(), size_t(0) - 1, false);
1032 /// Returns a const reverse iterator to the element following the last element of the reversed set
1034 It corresponds to the element preceding the first element of the non-reversed container.
1035 This element acts as a placeholder, attempting to access it results in undefined behavior.
1037 const_reverse_iterator crend()
1039 return const_reverse_iterator(*this, head(), size_t(0) - 1, false);
1045 template <typename Func>
1046 std::pair<bool, bool> do_update(value_type& val, Func f, bool bInsert = true)
1048 hash_type const& hash = hash_accessor()(val);
1049 traverse_data pos( hash, *this );
1050 hash_comparator cmp;
1056 node_ptr slot = base_class::traverse( pos );
1057 assert(slot.bits() == 0);
1060 if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) {
1062 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
1063 // the item with that hash value already exists
1064 // Replace it with val
1065 if ( slot.ptr() == &val ) {
1066 stats().onUpdateExisting();
1067 return std::make_pair(true, false);
1070 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(slot, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) {
1071 // slot can be disposed
1072 f( val, slot.ptr());
1074 stats().onUpdateExisting();
1075 goto update_existing_done;
1078 stats().onUpdateRetry();
1082 // the slot must be expanded
1083 base_class::expand_slot( pos, slot );
1086 stats().onUpdateFailed();
1087 return std::make_pair(false, false);
1092 // the slot is empty, try to insert data node
1095 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed))
1097 // the new data node has been inserted
1100 stats().onUpdateNew();
1101 stats().height( pos.nHeight );
1102 return std::make_pair(true, true);
1106 stats().onUpdateFailed();
1107 return std::make_pair(false, false);
1110 // insert failed - slot has been changed by another thread
1112 stats().onUpdateRetry();
1116 stats().onSlotChanged();
1120 // retire_ptr must be called only outside of RCU lock
1121 update_existing_done:
1123 gc::template retire_ptr<disposer>(pOld);
1124 return std::make_pair(true, false);
1127 template <typename Predicate>
1128 value_type * do_erase( hash_type const& hash, Predicate pred)
1130 assert(gc::is_locked());
1131 traverse_data pos( hash, *this );
1132 hash_comparator cmp;
1135 node_ptr slot = base_class::traverse( pos );
1136 assert( slot.bits() == 0 );
1138 if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) == slot ) {
1140 if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 && pred( *slot.ptr())) {
1141 // item found - replace it with nullptr
1142 if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
1144 stats().onEraseSuccess();
1148 stats().onEraseRetry();
1151 stats().onEraseFailed();
1155 // the slot is empty
1156 stats().onEraseFailed();
1161 stats().onSlotChanged();
1165 value_type * search(hash_type const& hash )
1167 assert( gc::is_locked());
1168 traverse_data pos( hash, *this );
1169 hash_comparator cmp;
1172 node_ptr slot = base_class::traverse( pos );
1173 assert( slot.bits() == 0 );
1175 if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) != slot ) {
1176 // slot value has been changed - retry
1177 stats().onSlotChanged();
1180 else if ( slot.ptr() && cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) {
1182 stats().onFindSuccess();
1185 stats().onFindFailed();
1194 void clear_array(array_node * pArrNode, size_t nSize)
1199 for (atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr) {
1201 node_ptr slot = pArr->load(memory_model::memory_order_acquire);
1202 if (slot.bits() == base_class::flag_array_node ) {
1203 // array node, go down the tree
1204 assert(slot.ptr() != nullptr);
1205 clear_array(to_array(slot.ptr()), array_node_size());
1208 else if (slot.bits() == base_class::flag_array_converting ) {
1209 // the slot is converting to array node right now
1210 while ((slot = pArr->load(memory_model::memory_order_acquire)).bits() == base_class::flag_array_converting ) {
1212 stats().onSlotConverting();
1216 assert(slot.ptr() != nullptr);
1217 assert(slot.bits() == base_class::flag_array_node );
1218 clear_array(to_array(slot.ptr()), array_node_size());
1223 if (pArr->compare_exchange_strong(slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) {
1225 gc::template retire_ptr<disposer>(slot.ptr());
1227 stats().onEraseSuccess();
1238 }} // namespace cds::intrusive
1240 #endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H