Added free-list implementations
authorkhizmax <khizmax@gmail.com>
Wed, 20 Jan 2016 13:43:47 +0000 (16:43 +0300)
committerkhizmax <khizmax@gmail.com>
Wed, 20 Jan 2016 13:43:47 +0000 (16:43 +0300)
cds/intrusive/free_list.h [new file with mode: 0644]
cds/intrusive/free_list_tagged.h [new file with mode: 0644]
projects/Win/vc14/cds.vcxproj
projects/Win/vc14/cds.vcxproj.filters

diff --git a/cds/intrusive/free_list.h b/cds/intrusive/free_list.h
new file mode 100644 (file)
index 0000000..de92e06
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+    
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.     
+*/
+
+#ifndef CDSLIB_INTRUSIVE_FREE_LIST_H
+#define CDSLIB_INTRUSIVE_FREE_LIST_H
+
+#include <cds/algo/atomic.h>
+
+namespace cds { namespace intrusive {
+
+    /// Lock-free free list
+    /** @ingroup cds_intrusive_helper
+
+        Free list is a helper class intended for reusing objects instead of freeing them completely; 
+        this avoids the overhead of \p malloc(), and also avoids its worst-case behaviour of taking an operating system lock.
+        So, the free list can be considered as a specialized allocator for objects of some type.
+
+        The algorithm is taken from <a href="http://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists">this article</a>.
+        The algo does not require any SMR like Hazard Pointer to prevent ABA problem.
+
+        There is \ref TaggedFreeList "tagged pointers" variant of free list for processors which support double-width CAS.
+
+        \b How to use
+        \code
+        #include <cds/intrusive/free_list.h>
+
+        // Your struct should be derived from FreeList::node
+        struct Foo: public cds::intrusive::FreeList::node
+        {
+            // Foo fields
+        };
+
+        // Simplified Foo allocator
+        class FooAllocator
+        {
+        public:
+            // free-list clear() must be explicitly called before destroying the free-list object
+            ~FooAllocator()
+            {
+                m_FreeList.clear( []( freelist_node * p ) { delete static_cast<Foo *>( p ); });
+            }
+
+            Foo * alloc()
+            {
+                freelist_node * p = m_FreeList.get();
+                if ( p )
+                    return static_cast<Foo *>( p );
+                return new Foo;
+            };
+
+            void dealloc( Foo * p )
+            {
+                m_FreeList.put( static_cast<freelist_node *>( p ));
+            };
+
+        private:
+            typedef cds::intrusive::FreeList::node freelist_node;
+            cds::intrusive::FreeList m_FreeList;
+        };
+        \endcode
+    */
+    class FreeList
+    {
+    public:
+        /// Free list node
+        struct node {
+            //@cond
+            atomics::atomic<uint32_t>   m_freeListRefs;
+            atomics::atomic<node *>     m_freeListNext;
+
+            node()
+                : m_freeListRefs( 0 )
+                , m_freeListNext( nullptr )
+            {}
+            //@endcond
+        };
+
+    public:
+        /// Creates empty free list
+        FreeList()
+            : m_Head( nullptr )
+        {}
+
+        /// Destroys the free list. Free-list must be empty.
+        /**
+            @warning dtor does not free elements of the list.
+            To free elements you should manually call \p clear() with an appropriate disposer.
+        */
+        ~FreeList()
+        {
+            assert( empty() );
+        }
+
+        /// Puts \p pNode to the free list
+        void put( node * pNode )
+        {
+            // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to
+            // set it using a fetch_add
+            if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList, atomics::memory_order_release ) == 0 ) {
+                // Oh look! We were the last ones referencing this node, and we know
+                // we want to add it to the free list, so let's do it!
+                add_knowing_refcount_is_zero( pNode );
+            }
+        }
+
+        /// Gets a node from the free list. If the list is empty, returns \p nullptr
+        node * get()
+        {
+            auto head = m_Head.load( atomics::memory_order_acquire );
+            while ( head != nullptr ) {
+                auto prevHead = head;
+                auto refs = head->m_freeListRefs.load( atomics::memory_order_relaxed );
+                if ( (refs & c_RefsMask) == 0 || !head->m_freeListRefs.compare_exchange_strong( refs, refs + 1,
+                    atomics::memory_order_acquire, atomics::memory_order_relaxed )) 
+                {
+                    head = m_Head.load( atomics::memory_order_acquire );
+                    continue;
+                }
+
+                // Good, reference count has been incremented (it wasn't at zero), which means
+                // we can read the next and not worry about it changing between now and the time
+                // we do the CAS
+                node * next = head->m_freeListNext.load( atomics::memory_order_relaxed );
+                if ( m_Head.compare_exchange_strong( head, next, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
+                    // Yay, got the node. This means it was on the list, which means
+                    // shouldBeOnFreeList must be false no matter the refcount (because
+                    // nobody else knows it's been taken off yet, it can't have been put back on).
+                    assert( (head->m_freeListRefs.load( atomics::memory_order_relaxed ) & c_ShouldBeOnFreeList) == 0 );
+
+                    // Decrease refcount twice, once for our ref, and once for the list's ref
+                    head->m_freeListRefs.fetch_sub( 2, atomics::memory_order_relaxed );
+
+                    return head;
+                }
+
+                // OK, the head must have changed on us, but we still need to decrease the refcount we
+                // increased
+                refs = prevHead->m_freeListRefs.fetch_sub( 1, atomics::memory_order_acq_rel );
+                if ( refs == c_ShouldBeOnFreeList + 1 )
+                    add_knowing_refcount_is_zero( prevHead );
+            }
+
+            return nullptr;
+        }
+
+        /// Checks whether the free list is empty
+        bool empty() const
+        {
+            return m_Head.load( atomics::memory_order_relaxed ) == nullptr;
+        }
+
+        /// Clears the free list (not atomic)
+        /**
+            For each element \p disp disposer is called to free memory.
+            The \p Disposer interface:
+            \code
+            struct disposer
+            {
+                void operator()( FreeList::node * node );
+            };
+            \endcode
+
+            This method must be explicitly called before the free list destructor.
+        */
+        template <typename Disposer>
+        void clear( Disposer disp )
+        {
+            node * head = m_Head.load( atomics::memory_order_relaxed );
+            m_Head.store( nullptr, atomics::memory_order_relaxed );
+            while ( head ) {
+                node * next = head->m_freeListNext.load( atomics::memory_order_relaxed );
+                disp( head );
+                head = next;
+            }
+        }
+
+    private:
+        //@cond
+        void add_knowing_refcount_is_zero( node * pNode )
+        {
+            // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we
+            // run only one copy of this method per node at a time, i.e. the single thread case), then we
+            // know we can safely change the next pointer of the node; however, once the refcount is back
+            // above zero, then other threads could increase it (happens under heavy contention, when the
+            // refcount goes to zero in between a load and a refcount increment of a node in try_get, then
+            // back up to something non-zero, then the refcount increment is done by the other thread) --
+            // so, if the CAS to add the node to the actual list fails, decrease the refcount and leave
+            // the add operation to the next thread who puts the refcount back at zero (which could be us,
+            // hence the loop).
+            node * head = m_Head.load( atomics::memory_order_relaxed );
+            while ( true ) {
+                pNode->m_freeListNext.store( head, atomics::memory_order_relaxed );
+                pNode->m_freeListRefs.store( 1, atomics::memory_order_release );
+                if ( !m_Head.compare_exchange_strong( head, pNode, atomics::memory_order_release, atomics::memory_order_relaxed )) {
+                    // Hmm, the add failed, but we can only try again when the refcount goes back to zero
+                    if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList - 1, atomics::memory_order_release ) == 1 )
+                        continue;
+                }
+                return;
+            }
+        }
+        //@endcond
+
+    private:
+        //@cond
+        static CDS_CONSTEXPR uint32_t const c_RefsMask = 0x7FFFFFFF;
+        static CDS_CONSTEXPR uint32_t const c_ShouldBeOnFreeList = 0x80000000;
+
+        // Implemented like a stack, but where node order doesn't matter (nodes are
+        // inserted out of order under contention)
+        atomics::atomic<node *>  m_Head;
+        //@endcond
+    };
+
+}} // namespace cds::intrusive
+
+#endif // CDSLIB_INTRUSIVE_FREE_LIST_H
diff --git a/cds/intrusive/free_list_tagged.h b/cds/intrusive/free_list_tagged.h
new file mode 100644 (file)
index 0000000..d1fbbd5
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+    
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.     
+*/
+
+#ifndef CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H
+#define CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H
+
+#include <cds/algo/atomic.h>
+
+namespace cds { namespace intrusive {
+
+    /// Lock-free free list based on tagged pointers (required double-width CAS)
+    /** @ingroup cds_intrusive_helper
+        This variant of \p FreeList is intended for processor architectures that support double-width CAS.
+        It uses <a href="https://en.wikipedia.org/wiki/Tagged_pointer">tagged pointer</a> technique to solve ABA problem.
+
+        \b How to use
+        \code
+        #include <cds/intrusive/free_list_tagged.h>
+
+        // Your struct should be derived from TaggedFreeList::node
+        struct Foo: public cds::intrusive::TaggedFreeList::node
+        {
+            // Foo fields
+        };
+
+        // Simplified Foo allocator
+        class FooAllocator
+        {
+        public:
+            // free-list clear() must be explicitly called before destroying the free-list object
+            ~FooAllocator()
+            {
+                m_FreeList.clear( []( freelist_node * p ) { delete static_cast<Foo *>( p ); });
+            }
+
+            Foo * alloc()
+            {
+                freelist_node * p = m_FreeList.get();
+                if ( p )
+                    return static_cast<Foo *>( p );
+                return new Foo;
+            };
+
+            void dealloc( Foo * p )
+            {
+                m_FreeList.put( static_cast<freelist_node *>( p ));
+            };
+
+        private:
+            typedef cds::intrusive::TaggedFreeList::node freelist_node;
+            cds::intrusive::TaggedFreeList m_FreeList;
+        };
+        \endcode
+    */
+    class TaggedFreeList
+    {
+    public:
+        struct node {
+            //@cond
+            atomics::atomic<node *> m_freeListNext;
+
+            node()
+                : m_freeListNext( nullptr )
+            {}
+            //@endcond
+        };
+
+    public:
+        /// Creates empty free-list
+        TaggedFreeList()
+        {
+            // Your platform must support double-width CAS
+            assert( m_Head.is_lock_free());
+        }
+
+        /// Destroys the free list. Free-list must be empty.
+        /**
+            @warning dtor does not free elements of the list.
+            To free elements you should manually call \p clear() with an appropriate disposer.
+        */
+        ~TaggedFreeList()
+        {
+            assert( empty() );
+        }
+
+
+        /// Puts \p pNode to the free list
+        void put( node * pNode )
+        {
+            tagged_ptr currentHead = m_Head.load( atomics::memory_order_relaxed );
+            tagged_ptr newHead = { pNode };
+            do {
+                newHead.tag = currentHead.tag + 1;
+                pNode->m_freeListNext.store( currentHead.ptr, atomics::memory_order_relaxed );
+            } while ( !m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
+        }
+
+        /// Gets a node from the free list. If the list is empty, returns \p nullptr
+        node * get()
+        {
+            tagged_ptr currentHead = m_Head.load( atomics::memory_order_acquire );
+            tagged_ptr newHead;
+            while ( currentHead.ptr != nullptr ) {
+                newHead.ptr = currentHead.ptr->m_freeListNext.load( atomics::memory_order_relaxed );
+                newHead.tag = currentHead.tag + 1;
+                if ( m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire ) )
+                    break;
+            }
+            return currentHead.ptr;
+        }
+
+        /// Checks whether the free list is empty
+        bool empty() const
+        {
+            return m_Head.load( atomics::memory_order_relaxed ).ptr == nullptr;
+        }
+
+        /// Clears the free list (not atomic)
+        /**
+            For each element \p disp disposer is called to free memory.
+            The \p Disposer interface:
+            \code
+            struct disposer
+            {
+                void operator()( FreeList::node * node );
+            };
+            \endcode
+
+            This method must be explicitly called before the free list destructor.
+        */
+        template <typename Disposer>
+        void clear( Disposer disp )
+        {
+            node * head = m_Head.load( atomics::memory_order_relaxed ).ptr;
+            m_Head.store( { nullptr }, atomics::memory_order_relaxed );
+            while ( head ) {
+                node * next = head->m_freeListNext.load( atomics::memory_order_relaxed );
+                disp( head );
+                head = next;
+            }
+        }
+
+    private:
+        //@cond
+        struct tagged_ptr
+        {
+            node *    ptr;
+            uintptr_t tag;
+
+            tagged_ptr()
+                : ptr( nullptr )
+                , tag( 0 )
+            {}
+        };
+
+        static_assert(sizeof( tagged_ptr ) == sizeof(void *) * 2, "sizeof( tagged_ptr ) violation" );
+
+        atomics::atomic<tagged_ptr> m_Head;
+        //@endcond    
+    };
+
+}} // namespace cds::intrusive
+
+#endif // CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H
\ No newline at end of file
index 46fff241c0b597c4d47a92d723bba053544d4b19..ea10e6d0113968c8c236905feb433cbe838f3b41 100644 (file)
     <ClInclude Include="..\..\..\cds\intrusive\ellen_bintree_dhp.h" />\r
     <ClInclude Include="..\..\..\cds\intrusive\ellen_bintree_hp.h" />\r
     <ClInclude Include="..\..\..\cds\intrusive\ellen_bintree_rcu.h" />\r
+    <ClInclude Include="..\..\..\cds\intrusive\free_list.h" />\r
+    <ClInclude Include="..\..\..\cds\intrusive\free_list_tagged.h" />\r
     <ClInclude Include="..\..\..\cds\intrusive\impl\ellen_bintree.h" />\r
     <ClInclude Include="..\..\..\cds\intrusive\impl\lazy_list.h" />\r
     <ClInclude Include="..\..\..\cds\intrusive\impl\michael_list.h" />\r
index f1b37296fa30d6c827ac2648c0582372517db8fb..3e4963d997aaf7cdbe34b26b0a4b7f45afcbeb58 100644 (file)
     <ClInclude Include="..\..\..\cds\container\feldman_hashmap_rcu.h">\r
       <Filter>Header Files\cds\container</Filter>\r
     </ClInclude>\r
+    <ClInclude Include="..\..\..\cds\intrusive\free_list.h">\r
+      <Filter>Header Files\cds\intrusive</Filter>\r
+    </ClInclude>\r
+    <ClInclude Include="..\..\..\cds\intrusive\free_list_tagged.h">\r
+      <Filter>Header Files\cds\intrusive</Filter>\r
+    </ClInclude>\r
   </ItemGroup>\r
 </Project>
\ No newline at end of file