X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=cds%2Fmemory%2Fvyukov_queue_pool.h;h=432a7f02d9aeaeeabb68547c65788dd9e5943ff2;hb=ec53bf39af3914a93bd1f53fa8657d5f87583d01;hp=663f1b8d65b9ed6cadba66dac172cd35bf80399e;hpb=f4d56635f899073e5dd2b83c208aabf793031861;p=libcds.git diff --git a/cds/memory/vyukov_queue_pool.h b/cds/memory/vyukov_queue_pool.h index 663f1b8d..432a7f02 100644 --- a/cds/memory/vyukov_queue_pool.h +++ b/cds/memory/vyukov_queue_pool.h @@ -1,10 +1,39 @@ -//$$CDS-header$$ +/* + This file is a part of libcds - Concurrent Data Structures library -#ifndef __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H -#define __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H +#define CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H #include #include +#include namespace cds { namespace memory { @@ -16,12 +45,13 @@ namespace cds { namespace memory { /// Allocator type typedef CDS_DEFAULT_ALLOCATOR allocator; }; - /// Free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue + + /// Free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue /** @ingroup cds_memory_pool Template parameters: - - \p T - the type of object maintaining by free-list - - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus - cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits + - \p T - the type of object maintaining by free-list. \p T must be default constructible. + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits \b Internals @@ -47,7 +77,7 @@ namespace cds { namespace memory { // Pool of Foo object of size 1024. struct pool_traits: public cds::memory::vyukov_queue_pool_traits { - typedef cds::opt::v::static_buffer< Foo, 1024 > buffer; + typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer; }; typedef cds::memory::vyukov_queue_pool< Foo, pool_traits > pool_type; static pool_type thePool; @@ -90,9 +120,13 @@ namespace cds { namespace memory { typedef T value_type ; ///< Value type typedef Traits traits; ///< Traits type typedef typename traits::allocator::template rebind::other allocator_type ; ///< allocator type + typedef typename traits::back_off back_off; ///< back-off strategy protected: //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + queue_type m_Queue; value_type * m_pFirst; value_type * m_pLast; @@ -102,11 +136,12 @@ namespace cds { namespace memory { //@cond void preallocate_pool() { - m_pFirst = allocator_type().allocate( m_Queue.capacity() ); + m_pFirst = std_allocator().allocate( m_Queue.capacity()); m_pLast = m_pFirst + m_Queue.capacity(); - for ( value_type * p = m_pFirst; p < m_pLast; ++p ) + for ( value_type * p = m_pFirst; p < m_pLast; ++p ) { CDS_VERIFY( m_Queue.push( *p )) ; // must be true + } } bool from_pool( value_type * p ) const @@ -120,7 +155,7 @@ namespace cds { namespace memory { /** \p nCapacity argument is the queue capacity. It should be passed if the queue is based on dynamically-allocated buffer. - See cds::intrusive::VyukovMPMCCycleQueue for explanation. + See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. */ vyukov_queue_pool( size_t nCapacity = 0 ) : m_Queue( nCapacity ) @@ -132,13 +167,13 @@ namespace cds { namespace memory { ~vyukov_queue_pool() { m_Queue.clear(); - allocator_type().deallocate( m_pFirst, m_Queue.capacity() ); + std_allocator().deallocate( m_pFirst, m_Queue.capacity()); } /// Allocates an object from pool /** The pool supports allocation only single object (\p n = 1). - If \p n > 1 the behaviour is undefined. + If \p n > 1 the behavior is undefined. If the queue is not empty, the popped value is returned. Otherwise, a new value allocated. @@ -146,20 +181,21 @@ namespace cds { namespace memory { value_type * allocate( size_t n ) { assert( n == 1 ); + CDS_UNUSED(n); value_type * p = m_Queue.pop(); if ( p ) { - assert( from_pool(p) ); - return p; + assert( from_pool(p)); + return new( p ) value_type; } - - return allocator_type().allocate( n ); + // The pool is empty - allocate new from the heap + return cxx_allocator().New(); } /// Deallocated the object \p p /** The pool supports allocation only single object (\p n = 1). - If \p n > 1 the behaviour is undefined. + If \p n > 1 the behavior is undefined. If \p p is from preallocated pool, it pushes into the queue. Otherwise, \p p is deallocated by allocator provided. @@ -167,23 +203,30 @@ namespace cds { namespace memory { void deallocate( value_type * p, size_t n ) { assert( n == 1 ); + CDS_UNUSED(n); if ( p ) { - if ( from_pool( p ) ) - m_Queue.push( *p ); + if ( from_pool(p)) { + p->~value_type(); + // The queue can notify about false fullness state + // so we push in loop + back_off bkoff; + while ( !m_Queue.push( *p )) + bkoff(); + } else - allocator_type().deallocate( p, n ); + cxx_allocator().Delete( p ); } } }; - /// Lazy free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue + /// Lazy free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue /** @ingroup cds_memory_pool Template parameters: - - \p T - the type of object maintaining by free-list - - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus - cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits + - \p T - the type of object maintaining by free-list. \p T must be default constructible + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, default is \p vyukov_queue_pool_traits \b Internals @@ -251,6 +294,9 @@ namespace cds { namespace memory { protected: //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + queue_type m_Queue; //@endcond @@ -263,17 +309,15 @@ namespace cds { namespace memory { /// Deallocates all objects from the pool ~lazy_vyukov_queue_pool() { - allocator_type a; - while ( !m_Queue.empty() ) { - value_type * p = m_Queue.pop(); - a.deallocate( p, 1 ); - } + std_allocator a; + while ( !m_Queue.empty()) + a.deallocate( m_Queue.pop(), 1 ); } /// Allocates an object from pool /** The pool supports allocation only single object (\p n = 1). - If \p n > 1 the behaviour is undefined. + If \p n > 1 the behavior is undefined. If the queue is not empty, the popped value is returned. Otherwise, a new value allocated. @@ -281,15 +325,16 @@ namespace cds { namespace memory { value_type * allocate( size_t n ) { assert( n == 1 ); + CDS_UNUSED(n); value_type * p = m_Queue.pop(); if ( p ) - return p; + return new( p ) value_type; - return allocator_type().allocate( n ); + return cxx_allocator().New(); } - /// Deallocated the object \p p + /// Deallocates the object \p p /** The pool supports allocation only single object (\p n = 1). If \p n > 1 the behaviour is undefined. @@ -300,21 +345,24 @@ namespace cds { namespace memory { void deallocate( value_type * p, size_t n ) { assert( n == 1 ); + CDS_UNUSED(n); if ( p ) { + p->~value_type(); + // Here we ignore false fullness state of the queue if ( !m_Queue.push( *p )) - allocator_type().deallocate( p, n ); + std_allocator().deallocate( p, 1 ); } } }; - /// Bounded free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue + /// Bounded free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue /** @ingroup cds_memory_pool Template parameters: - - \p T - the type of object maintaining by free-list - - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus - cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits + - \p T - the type of object maintaining by free-list. \p T must be default-constructible + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits \b Internals @@ -340,7 +388,7 @@ namespace cds { namespace memory { // Pool of Foo object of size 1024. struct pool_traits: public cds::memory::vyukov_queue_pool_traits { - typedef cds::opt::v::static_buffer< Foo, 1024 > buffer; + typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer; }; typedef cds::memory::bounded_vyukov_queue_pool< Foo, pool_traits > pool_type; static pool_type thePool; @@ -376,16 +424,25 @@ namespace cds { namespace memory { template class bounded_vyukov_queue_pool { + //@cond + struct internal_traits : public Traits { + typedef cds::atomicity::item_counter item_counter; + }; + //@endcond public: - typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type + typedef cds::intrusive::VyukovMPMCCycleQueue< T, internal_traits > queue_type ; ///< Queue type public: typedef T value_type; ///< Value type typedef Traits traits; ///< Pool traits typedef typename traits::allocator::template rebind::other allocator_type ; ///< allocator type + typedef typename traits::back_off back_off; ///< back-off strategy protected: //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + queue_type m_Queue; value_type * m_pFirst; value_type * m_pLast; @@ -395,8 +452,9 @@ namespace cds { namespace memory { //@cond void preallocate_pool() { - m_pFirst = allocator_type().allocate( m_Queue.capacity() ); - m_pLast = m_pFirst + m_Queue.capacity(); + size_t const nCount = m_Queue.capacity(); + m_pFirst = std_allocator().allocate( nCount ); + m_pLast = m_pFirst + nCount; for ( value_type * p = m_pFirst; p < m_pLast; ++p ) CDS_VERIFY( m_Queue.push( *p )) ; // must be true @@ -413,7 +471,7 @@ namespace cds { namespace memory { /** \p nCapacity argument is the queue capacity. It should be passed if the queue is based on dynamically-allocated buffer. - See cds::intrusive::VyukovMPMCCycleQueue for explanation. + See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. */ bounded_vyukov_queue_pool( size_t nCapacity = 0 ) : m_Queue( nCapacity ) @@ -425,7 +483,7 @@ namespace cds { namespace memory { ~bounded_vyukov_queue_pool() { m_Queue.clear(); - allocator_type().deallocate( m_pFirst, m_Queue.capacity() ); + std_allocator().deallocate( m_pFirst, m_Queue.capacity()); } /// Allocates an object from pool @@ -442,20 +500,31 @@ namespace cds { namespace memory { CDS_UNUSED( n ); value_type * p = m_Queue.pop(); - if ( p ) { - assert( from_pool(p) ); - return p; + + if ( !p ) { + back_off bkoff; + while ( m_Queue.size()) { + p = m_Queue.pop(); + if ( p ) + goto ok; + bkoff(); + } + + // The pool is empty + CDS_THROW_EXCEPTION( std::bad_alloc()); } - throw std::bad_alloc(); + ok: + assert( from_pool(p)); + return p; } - /// Deallocated the object \p p + /// Deallocates the object \p p /** The pool supports allocation only single object (\p n = 1). If \p n > 1 the behaviour is undefined. - \p should be from preallocated pool. + \p p should be from preallocated pool. */ void deallocate( value_type * p, size_t n ) { @@ -464,7 +533,11 @@ namespace cds { namespace memory { if ( p ) { assert( from_pool( p )); - m_Queue.push( *p ); + back_off bkoff; + // The queue can notify it is full but that is false fullness state + // So, we push in loop + while ( !m_Queue.push(*p)) + bkoff(); } } }; @@ -473,4 +546,4 @@ namespace cds { namespace memory { }} // namespace cds::memory -#endif // #ifndef __CDS_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H +#endif // #ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H