Removed trailing spaces
[libcds.git] / cds / intrusive / skip_list_rcu.h
index f08df91268584eb29c1f0f07165348899e04ee82..d36df9ffa48f609a4bc365f9b72d4c7c115743a5 100644 (file)
@@ -1,4 +1,32 @@
-//$$CDS-header$$
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+    
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.     
+*/
 
 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
@@ -10,7 +38,8 @@
 #include <cds/urcu/details/check_deadlock.h>
 #include <cds/details/binary_functor_wrapper.h>
 #include <cds/urcu/exempt_ptr.h>
-
+#include <cds/urcu/raw_ptr.h>
+#include <cds/intrusive/details/raw_ptr_disposer.h>
 
 namespace cds { namespace intrusive {
 
@@ -100,7 +129,15 @@ namespace cds { namespace intrusive {
                 assert( nLevel < height() );
                 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
 
-                return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
+#           ifdef CDS_THREAD_SANITIZER_ENABLED
+                // TSan false positive: m_arrNext is read-only array
+                CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+                atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
+                CDS_TSAN_ANNOTATE_IGNORE_READS_END;
+                return r;
+#           else
+                return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
+#           endif
             }
 
             /// Access to element of next pointer array (const version)
@@ -109,7 +146,15 @@ namespace cds { namespace intrusive {
                 assert( nLevel < height() );
                 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
 
-                return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
+#           ifdef CDS_THREAD_SANITIZER_ENABLED
+                // TSan false positive: m_arrNext is read-only array
+                CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+                atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
+                CDS_TSAN_ANNOTATE_IGNORE_READS_END;
+                return r;
+#           else
+                return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
+#           endif
             }
 
             /// Access to element of next pointer array (same as \ref next function)
@@ -173,9 +218,6 @@ namespace cds { namespace intrusive {
         protected:
             void next()
             {
-                // RCU should be locked before iterating!!!
-                assert( gc::is_locked() );
-
                 back_off bkoff;
 
                 for (;;) {
@@ -208,9 +250,6 @@ namespace cds { namespace intrusive {
             iterator( node_type& refHead )
                 : m_pNode( nullptr )
             {
-                // RCU should be locked before iterating!!!
-                assert( gc::is_locked() );
-
                 back_off bkoff;
 
                 for (;;) {
@@ -234,17 +273,11 @@ namespace cds { namespace intrusive {
         public:
             iterator()
                 : m_pNode( nullptr )
-            {
-                // RCU should be locked before iterating!!!
-                assert( gc::is_locked() );
-            }
+            {}
 
             iterator( iterator const& s)
                 : m_pNode( s.m_pNode )
-            {
-                // RCU should be locked before iterating!!!
-                assert( gc::is_locked() );
-            }
+            {}
 
             value_type * operator ->() const
             {
@@ -550,6 +583,39 @@ namespace cds { namespace intrusive {
 
         typedef std::unique_ptr< node_type, typename node_builder::node_disposer >    scoped_node_ptr;
 
+        static void dispose_node( value_type * pVal )
+        {
+            assert( pVal );
+
+            typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
+            disposer()( pVal );
+        }
+
+        struct node_disposer
+        {
+            void operator()( value_type * pVal )
+            {
+                dispose_node( pVal );
+            }
+        };
+
+        static void dispose_chain( node_type * pChain )
+        {
+            if ( pChain ) {
+                assert( !gc::is_locked() );
+
+                auto f = [&pChain]() -> cds::urcu::retired_ptr {
+                    node_type * p = pChain;
+                    if ( p ) {
+                        pChain = p->m_pDelChain;
+                        return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
+                    }
+                    return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
+                };
+                gc::batch_retire(std::ref(f));
+            }
+        }
+
         struct position {
             node_type *   pPrev[ c_nMaxHeight ];
             node_type *   pSucc[ c_nMaxHeight ];
@@ -561,12 +627,20 @@ namespace cds { namespace intrusive {
             position()
                 : pDelChain( nullptr )
             {}
-#       ifdef _DEBUG
+
             ~position()
             {
-                assert( pDelChain == nullptr );
+                dispose_chain( pDelChain );
+            }
+
+            void dispose( node_type * p )
+            {
+                assert( p != nullptr );
+                assert( p->m_pDelChain == nullptr );
+
+                p->m_pDelChain = pDelChain;
+                pDelChain = p;
             }
-#       endif
         };
 
         typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock>   check_deadlock_policy;
@@ -595,26 +669,25 @@ namespace cds { namespace intrusive {
         {
             return node_builder::make_tower( v, m_RandomLevelGen );
         }
+        //@endcond
 
-        static void dispose_node( value_type * pVal )
-        {
-            assert( pVal );
-
-            typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
-            disposer()( pVal );
-        }
+    public:
+        using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
 
-        struct node_disposer
-        {
-            void operator()( value_type * pVal )
+    private:
+        //@cond
+        struct chain_disposer {
+            void operator()( node_type * pChain ) const
             {
-                dispose_node( pVal );
+                dispose_chain( pChain );
             }
         };
+        typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
         //@endcond
 
     public:
-        using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
+        /// Result of \p get(), \p get_with() functions - pointer to the node found
+        typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
 
     protected:
         //@cond
@@ -640,7 +713,7 @@ namespace cds { namespace intrusive {
             for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
 
                 while ( true ) {
-                    pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
+                    pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
                     if ( pCur.bits() ) {
                         // pCur.bits() means that pPred is logically deleted
                         goto retry;
@@ -652,26 +725,26 @@ namespace cds { namespace intrusive {
                     }
 
                     // pSucc contains deletion mark for pCur
-                    pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
+                    pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
                         goto retry;
 
                     if ( pSucc.bits() ) {
                         // pCur is marked, i.e. logically deleted.
                         marked_node_ptr p( pCur.ptr() );
+#                   ifdef _DEBUG
+                        if ( nLevel == 0 )
+                            pCur->m_bUnlinked = true;
+#                   endif
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
                              memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 ) {
-#                       ifdef _DEBUG
-                                pCur->m_bUnlinked = true;
-#                       endif
-
                                 if ( !is_extracted( pSucc )) {
                                     // We cannot free the node at this moment since RCU is locked
                                     // Link deleted nodes to a chain to free later
-                                    link_for_remove( pos, pCur.ptr() );
+                                    pos.dispose( pCur.ptr() );
                                     m_Stat.onEraseWhileFind();
                                 }
                                 else {
@@ -718,7 +791,7 @@ namespace cds { namespace intrusive {
 
             for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
 
-                pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
+                pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
                 // pCur.bits() means that pPred is logically deleted
                 // head cannot be deleted
                 assert( pCur.bits() == 0 );
@@ -726,26 +799,26 @@ namespace cds { namespace intrusive {
                 if ( pCur.ptr() ) {
 
                     // pSucc contains deletion mark for pCur
-                    pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
+                    pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
                         goto retry;
 
                     if ( pSucc.bits() ) {
                         // pCur is marked, i.e. logically deleted.
+#                   ifdef _DEBUG
+                        if ( nLevel == 0 )
+                            pCur->m_bUnlinked = true;
+#                   endif
                         marked_node_ptr p( pCur.ptr() );
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
                             memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 ) {
-#                       ifdef _DEBUG
-                                pCur->m_bUnlinked = true;
-#                       endif
-
                                 if ( !is_extracted( pSucc )) {
                                     // We cannot free the node at this moment since RCU is locked
                                     // Link deleted nodes to a chain to free later
-                                    link_for_remove( pos, pCur.ptr() );
+                                    pos.dispose( pCur.ptr() );
                                     m_Stat.onEraseWhileFind();
                                 }
                                 else {
@@ -772,13 +845,13 @@ namespace cds { namespace intrusive {
             marked_node_ptr pSucc;
             marked_node_ptr pCur;
 
-retry:
+        retry:
             pPred = m_Head.head();
 
             for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
 
                 while ( true ) {
-                    pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
+                    pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
                     if ( pCur.bits() ) {
                         // pCur.bits() means that pPred is logically deleted
                         goto retry;
@@ -790,26 +863,26 @@ retry:
                     }
 
                     // pSucc contains deletion mark for pCur
-                    pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
+                    pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
                         goto retry;
 
                     if ( pSucc.bits() ) {
                         // pCur is marked, i.e. logically deleted.
+#                   ifdef _DEBUG
+                        if ( nLevel == 0 )
+                            pCur->m_bUnlinked = true;
+#                   endif
                         marked_node_ptr p( pCur.ptr() );
                         if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
                             memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         {
                             if ( nLevel == 0 ) {
-#                       ifdef _DEBUG
-                                pCur->m_bUnlinked = true;
-#                       endif
-
                                 if ( !is_extracted( pSucc )) {
                                     // We cannot free the node at this moment since RCU is locked
                                     // Link deleted nodes to a chain to free later
-                                    link_for_remove( pos, pCur.ptr() );
+                                    pos.dispose( pCur.ptr() );
                                     m_Stat.onEraseWhileFind();
                                 }
                                 else {
@@ -882,14 +955,6 @@ retry:
             return true;
         }
 
-        static void link_for_remove( position& pos, node_type * pDel )
-        {
-            assert( pDel->m_pDelChain == nullptr );
-
-            pDel->m_pDelChain = pos.pDelChain;
-            pos.pDelChain = pDel;
-        }
-
         template <typename Func>
         bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
         {
@@ -947,14 +1012,14 @@ retry:
                     if ( !bExtract ) {
                         // We cannot free the node at this moment since RCU is locked
                         // Link deleted nodes to a chain to free later
-                        link_for_remove( pos, pDel );
+                        pos.dispose( pDel );
                         m_Stat.onFastErase();
                     }
                     else
                         m_Stat.onFastExtract();
-
                     return true;
                 }
+                m_Stat.onEraseRetry();
             }
         }
 
@@ -1033,32 +1098,37 @@ retry:
         bool do_find_with( Q& val, Compare cmp, Func f )
         {
             position pos;
+            return do_find_with( val, cmp, f, pos );
+        }
+
+        template <typename Q, typename Compare, typename Func>
+        bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
+        {
             bool bRet;
 
-            rcu_lock l;
+            {
+                rcu_lock l;
 
-            switch ( find_fastpath( val, cmp, f )) {
-            case find_fastpath_found:
-                m_Stat.onFindFastSuccess();
-                return true;
-            case find_fastpath_not_found:
-                m_Stat.onFindFastFailed();
-                return false;
-            default:
-                break;
-            }
+                switch ( find_fastpath( val, cmp, f )) {
+                case find_fastpath_found:
+                    m_Stat.onFindFastSuccess();
+                    return true;
+                case find_fastpath_not_found:
+                    m_Stat.onFindFastFailed();
+                    return false;
+                default:
+                    break;
+                }
 
-            if ( find_slowpath( val, cmp, f, pos )) {
-                m_Stat.onFindSlowSuccess();
-                bRet = true;
-            }
-            else {
-                m_Stat.onFindSlowFailed();
-                bRet = false;
+                if ( find_slowpath( val, cmp, f, pos )) {
+                    m_Stat.onFindSlowSuccess();
+                    bRet = true;
+                }
+                else {
+                    m_Stat.onFindSlowFailed();
+                    bRet = false;
+                }
             }
-
-            defer_chain( pos );
-
             return bRet;
         }
 
@@ -1095,17 +1165,15 @@ retry:
                 }
             }
 
-            dispose_chain( pos );
             return bRet;
         }
 
         template <typename Q, typename Compare>
-        value_type * do_extract_key( Q const& key, Compare cmp )
+        value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
         {
             // RCU should be locked!!!
             assert( gc::is_locked() );
 
-            position pos;
             node_type * pDel;
 
             if ( !find_position( key, pos, cmp, false ) ) {
@@ -1129,7 +1197,6 @@ retry:
                 }
             }
 
-            defer_chain( pos );
             return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
         }
 
@@ -1138,12 +1205,12 @@ retry:
         {
             check_deadlock_policy::check();
             value_type * pDel = nullptr;
+            position pos;
             {
                 rcu_lock l;
-                pDel = do_extract_key( key, key_comparator() );
+                pDel = do_extract_key( key, key_comparator(), pos );
             }
 
-            dispose_deferred();
             return pDel;
         }
 
@@ -1153,13 +1220,12 @@ retry:
             CDS_UNUSED(pred);
             check_deadlock_policy::check();
             value_type * pDel = nullptr;
-
+            position pos;
             {
                 rcu_lock l;
-                pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>() );
+                pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
             }
 
-            dispose_deferred();
             return pDel;
         }
 
@@ -1191,11 +1257,8 @@ retry:
                         pDel = nullptr;
                     }
                 }
-
-                defer_chain( pos );
             }
 
-            dispose_deferred();
             return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
         }
 
@@ -1227,11 +1290,8 @@ retry:
                         pDel = nullptr;
                     }
                 }
-
-                defer_chain( pos );
             }
 
-            dispose_deferred();
             return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
         }
 
@@ -1241,83 +1301,6 @@ retry:
             if ( nCur < nHeight )
                 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
         }
-
-        class deferred_list_iterator
-        {
-            node_type * pCur;
-        public:
-            explicit deferred_list_iterator( node_type * p )
-                : pCur(p)
-            {}
-            deferred_list_iterator()
-                : pCur( nullptr )
-            {}
-
-            cds::urcu::retired_ptr operator *() const
-            {
-                return cds::urcu::retired_ptr( node_traits::to_value_ptr(pCur), dispose_node );
-            }
-
-            void operator ++()
-            {
-                pCur = pCur->m_pDelChain;
-            }
-
-            bool operator ==( deferred_list_iterator const& i ) const
-            {
-                return pCur == i.pCur;
-            }
-            bool operator !=( deferred_list_iterator const& i ) const
-            {
-                return !operator ==( i );
-            }
-        };
-
-        void dispose_chain( node_type * pHead )
-        {
-            // RCU should NOT be locked
-            check_deadlock_policy::check();
-
-            gc::batch_retire( deferred_list_iterator( pHead ), deferred_list_iterator() );
-        }
-
-        void dispose_chain( position& pos )
-        {
-            // RCU should NOT be locked
-            check_deadlock_policy::check();
-
-            // Delete local chain
-            if ( pos.pDelChain ) {
-                dispose_chain( pos.pDelChain );
-                pos.pDelChain = nullptr;
-            }
-
-            // Delete deferred chain
-            dispose_deferred();
-        }
-
-        void dispose_deferred()
-        {
-            dispose_chain( m_pDeferredDelChain.exchange( nullptr, memory_model::memory_order_acq_rel ) );
-        }
-
-        void defer_chain( position& pos )
-        {
-            if ( pos.pDelChain ) {
-                node_type * pHead = pos.pDelChain;
-                node_type * pTail = pHead;
-                while ( pTail->m_pDelChain )
-                    pTail = pTail->m_pDelChain;
-
-                node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed );
-                do {
-                    pTail->m_pDelChain = pDeferList;
-                } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ));
-
-                pos.pDelChain = nullptr;
-            }
-        }
-
         //@endcond
 
     public:
@@ -1340,7 +1323,17 @@ retry:
         }
 
     public:
-        /// Iterator type
+    ///@name Forward iterators (thread-safe under RCU lock)
+    //@{
+        /// Forward iterator
+        /**
+            The forward iterator has some features:
+            - it has no post-increment operator
+            - it depends on iterator of underlying \p OrderedList
+
+            You may safely use iterators in multi-threaded environment only under RCU lock.
+            Otherwise, a crash is possible if another thread deletes the element the iterator points to.
+        */
         typedef skip_list::details::iterator< gc, node_traits, back_off, false >  iterator;
 
         /// Const iterator type
@@ -1381,6 +1374,7 @@ retry:
         {
             return const_iterator();
         }
+    //@}
 
     public:
         /// Inserts new node
@@ -1468,16 +1462,15 @@ retry:
                 }
             }
 
-            dispose_chain( pos );
-
             return bRet;
         }
 
-        /// Ensures that the \p val exists in the set
+        /// Updates the node
         /**
             The operation performs inserting or changing data with lock-free manner.
 
-            If the item \p val is not found in the set, then \p val is inserted into the set.
+            If the item \p val is not found in the set, then \p val is inserted into the set
+            iff \p bInsert is \p true.
             Otherwise, the functor \p func is called with item found.
             The functor signature is:
             \code
@@ -1486,7 +1479,7 @@ retry:
             with arguments:
             - \p bNew - \p true if the item has been inserted, \p false otherwise
             - \p item - item of the set
-            - \p val - argument \p val passed into the \p %ensure() function
+            - \p val - argument \p val passed into the \p %update() function
             If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
             refer to the same thing.
 
@@ -1495,14 +1488,15 @@ retry:
 
             RCU \p synchronize method can be called. RCU should not be locked.
 
-            Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
+            Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
+            i.e. the node has been inserted or updated,
             \p second is \p true if new item has been added or \p false if the item with \p key
-            already is in the set.
+            already exists.
 
             @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
         */
         template <typename Func>
-        std::pair<bool, bool> ensure( value_type& val, Func func )
+        std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
         {
             check_deadlock_policy::check();
 
@@ -1526,7 +1520,13 @@ retry:
                             scp.release();
 
                         func( false, *node_traits::to_value_ptr(pos.pCur), val );
-                        m_Stat.onEnsureExist();
+                        m_Stat.onUpdateExist();
+                        break;
+                    }
+
+                    if ( !bInsert ) {
+                        scp.release();
+                        bRet.first = false;
                         break;
                     }
 
@@ -1546,16 +1546,22 @@ retry:
                     ++m_ItemCounter;
                     scp.release();
                     m_Stat.onAddNode( nHeight );
-                    m_Stat.onEnsureNew();
+                    m_Stat.onUpdateNew();
                     bRet.second = true;
                     break;
                 }
             }
 
-            dispose_chain( pos );
-
             return bRet;
         }
+        //@cond
+        template <typename Func>
+        CDS_DEPRECATED("ensure() is deprecated, use update()")
+        std::pair<bool, bool> ensure( value_type& val, Func func )
+        {
+            return update( val, func, true );
+        }
+        //@endcond
 
         /// Unlinks the item \p val from the set
         /**
@@ -1582,7 +1588,7 @@ retry:
             bool bRet;
 
             {
-                rcu_lock rcuLock;
+                rcu_lock l;
 
                 if ( !find_position( val, pos, key_comparator(), false ) ) {
                     m_Stat.onUnlinkFailed();
@@ -1607,8 +1613,6 @@ retry:
                 }
             }
 
-            dispose_chain( pos );
-
             return bRet;
         }
 
@@ -1648,8 +1652,7 @@ retry:
 
         /// Extracts the item from the set with comparing functor \p pred
         /**
-            The function is an analog of \ref cds_intrusive_SkipListSet_rcu_extract "extract(exempt_ptr&, Q const&)"
-            but \p pred predicate is used for key comparing.
+            The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
             \p Less has the interface like \p std::less.
             \p pred must imply the same element order as the comparator used for building the set.
         */
@@ -1855,37 +1858,52 @@ retry:
         }
         //@endcond
 
-        /// Finds \p key
-        /** @anchor cds_intrusive_SkipListSet_rcu_find_val
+        /// Checks whether the set contains \p key
+        /**
             The function searches the item with key equal to \p key
             and returns \p true if it is found, and \p false otherwise.
 
             The function applies RCU lock internally.
         */
         template <typename Q>
-        bool find( Q const& key )
+        bool contains( Q const& key )
         {
             return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
         }
+        //@cond
+        template <typename Q>
+        CDS_DEPRECATED("deprecated, use contains()")
+        bool find( Q const& key )
+        {
+            return contains( key );
+        }
+        //@endcond
 
-        /// Finds \p key with comparing functor \p pred
+        /// Checks whether the set contains \p key using \p pred predicate for searching
         /**
-            The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_val "find(Q const&)"
-            but \p pred is used for key compare.
+            The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
             \p Less functor has the interface like \p std::less.
-            \p pred must imply the same element order as the comparator used for building the set.
+            \p Less must imply the same element order as the comparator used for building the set.
         */
         template <typename Q, typename Less>
-        bool find_with( Q const& key, Less pred )
+        bool contains( Q const& key, Less pred )
         {
             CDS_UNUSED( pred );
             return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
         }
+        //@cond
+        template <typename Q, typename Less>
+        CDS_DEPRECATED("deprecated, use contains()")
+        bool find_with( Q const& key, Less pred )
+        {
+            return contains( key, pred );
+        }
+        //@endcond
 
         /// Finds \p key and return the item found
         /** \anchor cds_intrusive_SkipListSet_rcu_get
-            The function searches the item with key equal to \p key and returns the pointer to item found.
-            If \p key is not found it returns \p nullptr.
+            The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
+            If \p key is not found it returns empty \p raw_ptr.
 
             Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
 
@@ -1895,31 +1913,31 @@ retry:
             typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
             skip_list theList;
             // ...
+            typename skip_list::raw_ptr pVal;
             {
                 // Lock RCU
                 skip_list::rcu_lock lock;
 
-                foo * pVal = theList.get( 5 );
+                pVal = theList.get( 5 );
                 if ( pVal ) {
                     // Deal with pVal
                     //...
                 }
             }
-            // Unlock RCU by rcu_lock destructor
-            // pVal can be retired by disposer at any time after RCU has been unlocked
+            // You can manually release pVal after RCU-locked section
+            pVal.release();
             \endcode
-
-            After RCU unlocking the \p %force_dispose member function can be called manually,
-            see \ref force_dispose for explanation.
         */
         template <typename Q>
-        value_type * get( Q const& key )
+        raw_ptr get( Q const& key )
         {
             assert( gc::is_locked());
 
+            position pos;
             value_type * pFound;
-            return do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; } )
-                ? pFound : nullptr;
+            if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
+                return raw_ptr( pFound, raw_ptr_disposer( pos ));
+            return raw_ptr( raw_ptr_disposer( pos ));
         }
 
         /// Finds \p key and return the item found
@@ -1932,15 +1950,19 @@ retry:
             \p pred must imply the same element order as the comparator used for building the set.
         */
         template <typename Q, typename Less>
-        value_type * get_with( Q const& key, Less pred )
+        raw_ptr get_with( Q const& key, Less pred )
         {
             CDS_UNUSED( pred );
             assert( gc::is_locked());
 
-            value_type * pFound;
-            return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
-                [&pFound](value_type& found, Q const& ) { pFound = &found; } )
-                ? pFound : nullptr;
+            value_type * pFound = nullptr;
+            position pos;
+            if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
+                [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
+            {
+                return raw_ptr( pFound, raw_ptr_disposer( pos ));
+            }
+            return raw_ptr( raw_ptr_disposer( pos ));
         }
 
         /// Returns item count in the set
@@ -1991,27 +2013,6 @@ retry:
         {
             return m_Stat;
         }
-
-        /// Clears internal list of ready-to-remove items passing it to RCU reclamation cycle
-        /** @anchor cds_intrusive_SkipListSet_rcu_force_dispose
-            Skip list has complex multi-step algorithm for removing an item. In fact, when you
-            remove the item it is just marked as removed that is enough for the success of your operation.
-            Actual removing can take place in the future, in another call or even in another thread.
-            Inside RCU lock the removed item cannot be passed to RCU reclamation cycle
-            since it can lead to deadlock. To solve this problem, the current skip list implementation
-            has internal list of items which is ready to remove but is not yet passed to RCU reclamation.
-            Usually, this list will be passed to RCU reclamation in the next suitable call of skip list member function.
-            In some cases we want to pass it to RCU reclamation immediately after RCU unlocking.
-            This function provides such opportunity: it checks whether the RCU is not locked and if it is true
-            the function passes the internal ready-to-remove list to RCU reclamation cycle.
-
-            The RCU \p synchronize can be called.
-        */
-        void force_dispose()
-        {
-            if ( !gc::is_locked() )
-                dispose_deferred();
-        }
     };
 
 }} // namespace cds::intrusive