X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=gdax-orderbook-hpp%2Fdemo%2Fdependencies%2Flibcds-2.3.2%2Fcds%2Fmemory%2Fvyukov_queue_pool.h;fp=gdax-orderbook-hpp%2Fdemo%2Fdependencies%2Flibcds-2.3.2%2Fcds%2Fmemory%2Fvyukov_queue_pool.h;h=432a7f02d9aeaeeabb68547c65788dd9e5943ff2;hb=4223430d9168223029c7639149025c79e69b4f37;hp=0000000000000000000000000000000000000000;hpb=7ea7751a31c0388bf888052517be181a2989b113;p=c11concurrency-benchmarks.git diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/vyukov_queue_pool.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/vyukov_queue_pool.h new file mode 100644 index 0000000..432a7f0 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/vyukov_queue_pool.h @@ -0,0 +1,549 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H +#define CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H + +#include +#include +#include + +namespace cds { namespace memory { + + /// \p vyukov_queue_pool traits + /** @ingroup cds_memory_pool + */ + struct vyukov_queue_pool_traits : public cds::intrusive::vyukov_queue::traits + { + /// Allocator type + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + + /// Free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list. \p T must be default constructible. + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits + + \b Internals + + This free-list is very simple. + At construction time, the free-list allocates the array of N items + and stores them into queue, where N is the queue capacity. + When allocating the free-list tries to pop an object from + internal queue i.e. from preallocated pool. If success the popped object is returned. + Otherwise a new one is allocated. When deallocating, the free-list checks whether + the object is from preallocated pool. If so, the object is pushed into queue, otherwise + it is deallocated by using the allocator provided. + The pool can manage more than \p N items but only \p N items is contained in the free-list. + + \b Usage + + \p %vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %vyukov_queue_pool, provide + an accessor to that object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + struct pool_traits: public cds::memory::vyukov_queue_pool_traits + { + typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer; + }; + typedef cds::memory::vyukov_queue_pool< Foo, pool_traits > pool_type; + static pool_type thePool; + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + */ + template + class vyukov_queue_pool + { + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type + + public: + typedef T value_type ; ///< Value type + typedef Traits traits; ///< Traits type + typedef typename traits::allocator::template rebind::other allocator_type ; ///< allocator type + typedef typename traits::back_off back_off; ///< back-off strategy + + protected: + //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + + queue_type m_Queue; + value_type * m_pFirst; + value_type * m_pLast; + //@endcond + + protected: + //@cond + void preallocate_pool() + { + m_pFirst = std_allocator().allocate( m_Queue.capacity()); + m_pLast = m_pFirst + m_Queue.capacity(); + + for ( value_type * p = m_pFirst; p < m_pLast; ++p ) { + CDS_VERIFY( m_Queue.push( *p )) ; // must be true + } + } + + bool from_pool( value_type * p ) const + { + return m_pFirst <= p && p < m_pLast; + } + //@endcond + + public: + /// Preallocates the pool of object + /** + \p nCapacity argument is the queue capacity. It should be passed + if the queue is based on dynamically-allocated buffer. + See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. + */ + vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + { + preallocate_pool(); + } + + /// Deallocates the pool. + ~vyukov_queue_pool() + { + m_Queue.clear(); + std_allocator().deallocate( m_pFirst, m_Queue.capacity()); + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behavior is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a new value allocated. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + CDS_UNUSED(n); + + value_type * p = m_Queue.pop(); + if ( p ) { + assert( from_pool(p)); + return new( p ) value_type; + } + // The pool is empty - allocate new from the heap + return cxx_allocator().New(); + } + + /// Deallocated the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behavior is undefined. + + If \p p is from preallocated pool, it pushes into the queue. + Otherwise, \p p is deallocated by allocator provided. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + CDS_UNUSED(n); + + if ( p ) { + if ( from_pool(p)) { + p->~value_type(); + // The queue can notify about false fullness state + // so we push in loop + back_off bkoff; + while ( !m_Queue.push( *p )) + bkoff(); + } + else + cxx_allocator().Delete( p ); + } + } + }; + + + /// Lazy free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list. \p T must be default constructible + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, default is \p vyukov_queue_pool_traits + + \b Internals + + This free-list is very simple. + At construction time the pool is empty. + When allocating the free-list tries to pop an object from + internal queue. If success the popped object is returned. + Otherwise a new one is allocated. + When deallocating, the free-list tries to push the object into the pool. + If internal queue is full, the object is deallocated by using the allocator provided. + The pool can manage more than \p N items but only \p N items is placed in the free-list. + + \b Usage + + \p %lazy_vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %lazy_vyukov_queue_pool, provide + an accessor functor to this object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + typedef cds::memory::lazy_vyukov_queue_pool< Foo > pool_type; + static pool_type thePool( 1024 ); + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + + */ + template + class lazy_vyukov_queue_pool + { + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type + + public: + typedef T value_type ; ///< Value type + typedef Traits traits; ///< Pool traits + typedef typename traits::allocator::template rebind::other allocator_type ; ///< allocator type + + protected: + //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + + queue_type m_Queue; + //@endcond + + public: + /// Constructs empty pool + lazy_vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + {} + + /// Deallocates all objects from the pool + ~lazy_vyukov_queue_pool() + { + std_allocator a; + while ( !m_Queue.empty()) + a.deallocate( m_Queue.pop(), 1 ); + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behavior is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a new value allocated. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + CDS_UNUSED(n); + + value_type * p = m_Queue.pop(); + if ( p ) + return new( p ) value_type; + + return cxx_allocator().New(); + } + + /// Deallocates the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If the queue is not full, \p p is pushed into the queue. + Otherwise, \p p is deallocated by allocator provided. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + CDS_UNUSED(n); + + if ( p ) { + p->~value_type(); + // Here we ignore false fullness state of the queue + if ( !m_Queue.push( *p )) + std_allocator().deallocate( p, 1 ); + } + } + + }; + + /// Bounded free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list. \p T must be default-constructible + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits + + \b Internals + + At construction time, the free-list allocates the array of N items + and stores them into the queue, where N is the queue capacity. + When allocating the free-list tries to pop an object from + internal queue i.e. from preallocated pool. If success the popped object is returned. + Otherwise a \p std::bad_alloc exception is raised. + So, the pool can contain up to \p N items. + When deallocating, the object is pushed into the queue. + In debug mode \p deallocate() member function asserts + that the pointer is from preallocated pool. + + \b Usage + + \p %bounded_vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %bounded_vyukov_queue_pool, provide + an accessor functor to this object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + struct pool_traits: public cds::memory::vyukov_queue_pool_traits + { + typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer; + }; + typedef cds::memory::bounded_vyukov_queue_pool< Foo, pool_traits > pool_type; + static pool_type thePool; + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + */ + template + class bounded_vyukov_queue_pool + { + //@cond + struct internal_traits : public Traits { + typedef cds::atomicity::item_counter item_counter; + }; + //@endcond + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, internal_traits > queue_type ; ///< Queue type + + public: + typedef T value_type; ///< Value type + typedef Traits traits; ///< Pool traits + typedef typename traits::allocator::template rebind::other allocator_type ; ///< allocator type + typedef typename traits::back_off back_off; ///< back-off strategy + + protected: + //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + + queue_type m_Queue; + value_type * m_pFirst; + value_type * m_pLast; + //@endcond + + protected: + //@cond + void preallocate_pool() + { + size_t const nCount = m_Queue.capacity(); + m_pFirst = std_allocator().allocate( nCount ); + m_pLast = m_pFirst + nCount; + + for ( value_type * p = m_pFirst; p < m_pLast; ++p ) + CDS_VERIFY( m_Queue.push( *p )) ; // must be true + } + + bool from_pool( value_type * p ) const + { + return m_pFirst <= p && p < m_pLast; + } + //@endcond + + public: + /// Preallocates the pool of object + /** + \p nCapacity argument is the queue capacity. It should be passed + if the queue is based on dynamically-allocated buffer. + See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. + */ + bounded_vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + { + preallocate_pool(); + } + + /// Deallocates the pool. + ~bounded_vyukov_queue_pool() + { + m_Queue.clear(); + std_allocator().deallocate( m_pFirst, m_Queue.capacity()); + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a \p std::bad_alloc exception is raised. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + CDS_UNUSED( n ); + + value_type * p = m_Queue.pop(); + + if ( !p ) { + back_off bkoff; + while ( m_Queue.size()) { + p = m_Queue.pop(); + if ( p ) + goto ok; + bkoff(); + } + + // The pool is empty + CDS_THROW_EXCEPTION( std::bad_alloc()); + } + + ok: + assert( from_pool(p)); + return p; + } + + /// Deallocates the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + \p p should be from preallocated pool. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + CDS_UNUSED( n ); + + if ( p ) { + assert( from_pool( p )); + back_off bkoff; + // The queue can notify it is full but that is false fullness state + // So, we push in loop + while ( !m_Queue.push(*p)) + bkoff(); + } + } + }; + + +}} // namespace cds::memory + + +#endif // #ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H