From 8d95ff045007558e24b7d0ec286f0b906d897bbb Mon Sep 17 00:00:00 2001 From: khizmax Date: Sun, 14 May 2017 23:23:59 +0300 Subject: [PATCH] Fixed several bugs in WeakRungBuffer Added single-threaded test for single-consumer VyukowQueue Added single-threaded test for WeakRingBuffer --- cds/container/weak_ringbuffer.h | 80 +++++++- test/include/cds_test/fixture.h | 2 +- test/unit/queue/test_bounded_queue.h | 112 ++++++----- test/unit/queue/vyukov_mpmc_queue.cpp | 51 ++++- test/unit/queue/weak_ringbuffer.cpp | 276 ++++++++++++++++---------- 5 files changed, 356 insertions(+), 165 deletions(-) diff --git a/cds/container/weak_ringbuffer.h b/cds/container/weak_ringbuffer.h index 5b8a36f7..d328ba55 100644 --- a/cds/container/weak_ringbuffer.h +++ b/cds/container/weak_ringbuffer.h @@ -96,7 +96,7 @@ namespace cds { namespace container { - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) or \p opt::v::sequential_consistent (sequentially consisnent memory model). - Example: declare \p %WeakRingBuffer with static iternal buffer of size 1024: + Example: declare \p %WeakRingBuffer with static iternal buffer for 1024 objects: \code typedef cds::container::WeakRingBuffer< Foo, typename cds::container::weak_ringbuffer::make_traits< @@ -129,7 +129,10 @@ namespace cds { namespace container { There are a specialization \ref cds_nonintrusive_WeakRingBuffer_void "WeakRingBuffer" that is not a queue but a "memory pool" between producer and consumer threads. - \p WeakRingBuffer supports data of different size. + \p WeakRingBuffer supports variable-sized data. + + @warning: \p %WeakRingBuffer is developed for 64-bit architecture. + On 32-bit platform an integer overflow of internal counters is possible. */ template class WeakRingBuffer: public cds::bounded_container @@ -571,6 +574,62 @@ namespace cds { namespace container { /// Single-producer single-consumer ring buffer for untyped variable-sized data /** @ingroup cds_nonintrusive_queue @anchor cds_nonintrusive_WeakRingBuffer_void + + This SPSC ring-buffer is intended for data of variable size. The producer + allocates a buffer from ring, fill it with data and pushes them back to ring. + The consumer thread reads data from front-end and then pops them: + \code + // allocates 1M ring buffer + WeakRingBuffer theRing( 1024 * 1024 ); + + void producer_thread() + { + // Get data of size N bytes + size_t size; + void* data; + + while ( true ) { + // Get external data + std::tie( data, size ) = get_data(); + + if ( data == nullptr ) + break; + + // Allocates a buffer from the ring + void* buf = theRing.back( size ); + if ( !buf ) { + std::cout << "The ring is full" << std::endl; + break; + } + + memcpy( buf, data, size ); + + // Push data into the ring + theRing.push_back(); + } + } + + void consumer_thread() + { + while ( true ) { + auto buf = theRing.front(); + + if ( buf.first == nullptr ) { + std::cout << "The ring is empty" << std::endl; + break; + } + + // Process data + process_data( buf.first, buf.second ); + + // Free buffer + theRing.pop_front(); + } + } + \endcode + + @warning: \p %WeakRingBuffer is developed for 64-bit architecture. + On 32-bit platform an integer overflow of internal counters is possible. */ #ifdef CDS_DOXYGEN_INVOKED template @@ -608,6 +667,8 @@ namespace cds { namespace container { /// [producer] Reserve \p size bytes void* back( size_t size ) { + assert( size > 0 ); + // Any data is rounded to 8-byte boundary size_t real_size = calc_real_size( size ); @@ -650,6 +711,7 @@ namespace cds { namespace container { } } + back_.store( back, memory_model::memory_order_release ); reserved = buffer_.buffer(); } @@ -710,6 +772,7 @@ namespace cds { namespace container { size = *reinterpret_cast( buf ); assert( !is_tail( size ) ); + assert( buf == buffer_.buffer() ); } #ifdef _DEBUG @@ -720,7 +783,7 @@ namespace cds { namespace container { } #endif - return std::make_pair( reinterpret_cast( buf + sizeof( size_t ) ), size ); + return std::make_pair( reinterpret_cast( buf + sizeof( size_t )), size ); } /// [consumer] Pops top data @@ -741,9 +804,7 @@ namespace cds { namespace container { assert( ( reinterpret_cast( buf ) & ( sizeof( uintptr_t ) - 1 ) ) == 0 ); size_t size = *reinterpret_cast( buf ); - assert( !is_tail( size ) ); - - size_t real_size = calc_real_size( size ); + size_t real_size = calc_real_size( untail( size )); #ifdef _DEBUG if ( cback_ - front < real_size ) { @@ -789,6 +850,7 @@ namespace cds { namespace container { } private: + //@cond static size_t calc_real_size( size_t size ) { size_t real_size = (( size + sizeof( uintptr_t ) - 1 ) & ~( sizeof( uintptr_t ) - 1 )) + sizeof( size_t ); @@ -809,6 +871,12 @@ namespace cds { namespace container { return size | ( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 )); } + static size_t untail( size_t size ) + { + return size & (( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 ) ) - 1); + } + //@endcond + private: //@cond atomics::atomic front_; diff --git a/test/include/cds_test/fixture.h b/test/include/cds_test/fixture.h index e09d429b..f488f941 100644 --- a/test/include/cds_test/fixture.h +++ b/test/include/cds_test/fixture.h @@ -39,7 +39,7 @@ namespace cds_test { class fixture : public ::testing::Test { - protected: + public: template static void shuffle( RandomIt first, RandomIt last ) { diff --git a/test/unit/queue/test_bounded_queue.h b/test/unit/queue/test_bounded_queue.h index a1657b1b..9186771d 100644 --- a/test/unit/queue/test_bounded_queue.h +++ b/test/unit/queue/test_bounded_queue.h @@ -50,69 +50,75 @@ namespace cds_test { ASSERT_CONTAINER_SIZE( q, 0 ); // enqueue/dequeue - for ( size_t i = 0; i < nSize; ++i ) { - it = static_cast(i); - ASSERT_TRUE( q.enqueue( it )); - ASSERT_CONTAINER_SIZE( q, i + 1 ); - } - ASSERT_FALSE( q.empty()); - ASSERT_CONTAINER_SIZE( q, nSize ); - ASSERT_FALSE( q.enqueue( static_cast( nSize ) * 2 ) ); + for ( unsigned pass = 0; pass < 3; ++pass ) { + for ( size_t i = 0; i < nSize; ++i ) { + it = static_cast( i ); + ASSERT_TRUE( q.enqueue( it ) ); + ASSERT_CONTAINER_SIZE( q, i + 1 ); + } + ASSERT_FALSE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, nSize ); + ASSERT_FALSE( q.enqueue( static_cast( nSize ) * 2 ) ); - for ( size_t i = 0; i < nSize; ++i ) { - it = -1; - ASSERT_TRUE( q.dequeue( it )); - ASSERT_EQ( it, static_cast( i )); - ASSERT_CONTAINER_SIZE( q, nSize - i - 1 ); + for ( size_t i = 0; i < nSize; ++i ) { + it = -1; + ASSERT_TRUE( q.dequeue( it ) ); + ASSERT_EQ( it, static_cast( i ) ); + ASSERT_CONTAINER_SIZE( q, nSize - i - 1 ); + } + ASSERT_TRUE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, 0 ); } - ASSERT_TRUE( q.empty()); - ASSERT_CONTAINER_SIZE( q, 0 ); // push/pop - for ( size_t i = 0; i < nSize; ++i ) { - it = static_cast(i); - ASSERT_TRUE( q.push( it )); - ASSERT_CONTAINER_SIZE( q, i + 1 ); - } - ASSERT_FALSE( q.empty()); - ASSERT_CONTAINER_SIZE( q, nSize ); + for ( unsigned pass = 0; pass < 3; ++pass ) { + for ( size_t i = 0; i < nSize; ++i ) { + it = static_cast( i ); + ASSERT_TRUE( q.push( it ) ); + ASSERT_CONTAINER_SIZE( q, i + 1 ); + } + ASSERT_FALSE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, nSize ); - for ( size_t i = 0; i < nSize; ++i ) { - it = -1; - ASSERT_TRUE( q.pop( it )); - ASSERT_EQ( it, static_cast( i )); - ASSERT_CONTAINER_SIZE( q, nSize - i - 1 ); + for ( size_t i = 0; i < nSize; ++i ) { + it = -1; + ASSERT_TRUE( q.pop( it ) ); + ASSERT_EQ( it, static_cast( i ) ); + ASSERT_CONTAINER_SIZE( q, nSize - i - 1 ); + } + ASSERT_TRUE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, 0 ); } - ASSERT_TRUE( q.empty()); - ASSERT_CONTAINER_SIZE( q, 0 ); // push/pop with lambda - for ( size_t i = 0; i < nSize; ++i ) { - it = static_cast(i); - ASSERT_NE( it, -1 ); - auto f = [&it]( value_type& dest ) { dest = it; it = -1; }; - if ( i & 1 ) - ASSERT_TRUE( q.enqueue_with( f )); - else - ASSERT_TRUE( q.push_with( f )); - ASSERT_EQ( it, -1 ); - ASSERT_CONTAINER_SIZE( q, i + 1 ); - } - ASSERT_FALSE( q.empty()); - ASSERT_CONTAINER_SIZE( q, nSize ); + for ( unsigned pass = 0; pass < 3; ++pass ) { + for ( size_t i = 0; i < nSize; ++i ) { + it = static_cast( i ); + ASSERT_NE( it, -1 ); + auto f = [&it]( value_type& dest ) { dest = it; it = -1; }; + if ( i & 1 ) + ASSERT_TRUE( q.enqueue_with( f ) ); + else + ASSERT_TRUE( q.push_with( f ) ); + ASSERT_EQ( it, -1 ); + ASSERT_CONTAINER_SIZE( q, i + 1 ); + } + ASSERT_FALSE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, nSize ); - for ( size_t i = 0; i < nSize; ++i ) { - it = -1; - auto f = [&it]( value_type& src ) { it = src; src = -1; }; - if ( i & 1 ) - ASSERT_TRUE( q.pop_with( f )); - else - ASSERT_TRUE( q.dequeue_with( f )); - ASSERT_EQ( it, static_cast( i )); - ASSERT_CONTAINER_SIZE( q, nSize - i - 1 ); + for ( size_t i = 0; i < nSize; ++i ) { + it = -1; + auto f = [&it]( value_type& src ) { it = src; src = -1; }; + if ( i & 1 ) + ASSERT_TRUE( q.pop_with( f ) ); + else + ASSERT_TRUE( q.dequeue_with( f ) ); + ASSERT_EQ( it, static_cast( i ) ); + ASSERT_CONTAINER_SIZE( q, nSize - i - 1 ); + } + ASSERT_TRUE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, 0u ); } - ASSERT_TRUE( q.empty()); - ASSERT_CONTAINER_SIZE( q, 0u ); for ( size_t i = 0; i < nSize; ++i ) { ASSERT_TRUE( q.push( static_cast(i))); diff --git a/test/unit/queue/vyukov_mpmc_queue.cpp b/test/unit/queue/vyukov_mpmc_queue.cpp index 421e36f0..fb145024 100644 --- a/test/unit/queue/vyukov_mpmc_queue.cpp +++ b/test/unit/queue/vyukov_mpmc_queue.cpp @@ -36,7 +36,43 @@ namespace { namespace cc = cds::container; class VyukovMPMCCycleQueue: public cds_test::bounded_queue - {}; + { + public: + template + void test_single_consumer( Queue& q ) + { + typedef typename Queue::value_type value_type; + + const size_t nSize = q.capacity(); + + ASSERT_TRUE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, 0 ); + + // enqueue/dequeue + for ( unsigned pass = 0; pass < 3; ++pass ) { + for ( size_t i = 0; i < nSize; ++i ) { + ASSERT_TRUE( q.enqueue( static_cast( i ) ) ); + ASSERT_CONTAINER_SIZE( q, i + 1 ); + } + ASSERT_FALSE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, nSize ); + ASSERT_FALSE( q.enqueue( static_cast( nSize ) * 2 ) ); + + for ( size_t i = 0; i < nSize; ++i ) { + value_type* fr = q.front(); + ASSERT_TRUE( fr != nullptr ); + ASSERT_EQ( *fr, static_cast( i ) ); + ASSERT_TRUE( q.pop_front() ); + ASSERT_CONTAINER_SIZE( q, nSize - i - 1 ); + } + ASSERT_TRUE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, 0 ); + + ASSERT_TRUE( q.front() == nullptr ); + ASSERT_FALSE( q.pop_front() ); + } + } + }; TEST_F( VyukovMPMCCycleQueue, defaulted ) { @@ -130,5 +166,18 @@ namespace { test_string( q ); } + TEST_F( VyukovMPMCCycleQueue, single_consumer ) + { + struct traits: public cds::container::vyukov_queue::traits + { + enum: bool { single_consumer = true }; + }; + typedef cds::container::VyukovMPMCCycleQueue< int, traits > test_queue; + + test_queue q( 128 ); + test( q ); + test_single_consumer( q ); + } + } // namespace diff --git a/test/unit/queue/weak_ringbuffer.cpp b/test/unit/queue/weak_ringbuffer.cpp index 64d97b6c..7718dad0 100644 --- a/test/unit/queue/weak_ringbuffer.cpp +++ b/test/unit/queue/weak_ringbuffer.cpp @@ -31,6 +31,7 @@ #include "test_bounded_queue.h" #include +#include namespace { namespace cc = cds::container; @@ -50,50 +51,52 @@ namespace { { value_type el[nArrSize]; - // batch push - for ( size_t i = 0; i < nSize; i += nArrSize ) { - for ( size_t k = 0; k < nArrSize; ++k ) - el[k] = static_cast( i + k ); + for ( unsigned pass = 0; pass < 3; ++pass ) { + // batch push + for ( size_t i = 0; i < nSize; i += nArrSize ) { + for ( size_t k = 0; k < nArrSize; ++k ) + el[k] = static_cast( i + k ); + + if ( i + nArrSize <= nSize ) { + ASSERT_TRUE( q.push( el, nArrSize ) ); + } + else { + ASSERT_FALSE( q.push( el, nArrSize ) ); + } + } - if ( i + nArrSize <= nSize ) { - ASSERT_TRUE( q.push( el, nArrSize ) ); + ASSERT_TRUE( !q.empty() ); + if ( nSize % nArrSize != 0 ) { + ASSERT_FALSE( q.full() ); + ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize ); + for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) { + ASSERT_TRUE( q.enqueue( static_cast( i ) ) ); + } } - else { - ASSERT_FALSE( q.push( el, nArrSize ) ); + ASSERT_TRUE( q.full() ); + ASSERT_CONTAINER_SIZE( q, nSize ); + + // batch pop + value_type expected = 0; + while ( q.pop( el, nArrSize ) ) { + for ( size_t i = 0; i < nArrSize; ++i ) { + ASSERT_EQ( el[i], expected ); + ++expected; + } } - } - ASSERT_TRUE( !q.empty() ); - if ( nSize % nArrSize != 0 ) { - ASSERT_FALSE( q.full() ); - ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize ); - for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) { - ASSERT_TRUE( q.enqueue( static_cast( i ) ) ); + if ( nSize % nArrSize == 0 ) { + ASSERT_TRUE( q.empty() ); } - } - ASSERT_TRUE( q.full() ); - ASSERT_CONTAINER_SIZE( q, nSize ); - - // batch pop - value_type expected = 0; - while ( q.pop( el, nArrSize ) ) { - for ( size_t i = 0; i < nArrSize; ++i ) { - ASSERT_EQ( el[i], expected ); - ++expected; + else { + ASSERT_FALSE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, nSize % nArrSize ); + q.clear(); } - } - - if ( nSize % nArrSize == 0 ) { ASSERT_TRUE( q.empty() ); + ASSERT_FALSE( q.full() ); + ASSERT_CONTAINER_SIZE( q, 0u ); } - else { - ASSERT_FALSE( q.empty() ); - ASSERT_CONTAINER_SIZE( q, nSize % nArrSize ); - q.clear(); - } - ASSERT_TRUE( q.empty() ); - ASSERT_FALSE( q.full() ); - ASSERT_CONTAINER_SIZE( q, 0u ); } { @@ -101,92 +104,137 @@ namespace { size_t el[nArrSize]; auto func_push = []( value_type& dest, size_t src ) { dest = static_cast( src * 10 ); }; - for ( size_t i = 0; i < nSize; i += nArrSize ) { - for ( size_t k = 0; k < nArrSize; ++k ) - el[k] = i + k; - if ( i + nArrSize <= nSize ) { - ASSERT_TRUE( q.push( el, nArrSize, func_push ) ); + for ( unsigned pass = 0; pass < 3; ++pass ) { + for ( size_t i = 0; i < nSize; i += nArrSize ) { + for ( size_t k = 0; k < nArrSize; ++k ) + el[k] = i + k; + + if ( i + nArrSize <= nSize ) { + ASSERT_TRUE( q.push( el, nArrSize, func_push ) ); + } + else { + ASSERT_FALSE( q.push( el, nArrSize, func_push ) ); + } } - else { - ASSERT_FALSE( q.push( el, nArrSize, func_push ) ); - } - } - ASSERT_TRUE( !q.empty() ); - if ( nSize % nArrSize != 0 ) { - ASSERT_FALSE( q.full() ); - ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize ); - for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) { - ASSERT_TRUE( q.push( &i, 1, func_push ) ); + ASSERT_TRUE( !q.empty() ); + if ( nSize % nArrSize != 0 ) { + ASSERT_FALSE( q.full() ); + ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize ); + for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) { + ASSERT_TRUE( q.push( &i, 1, func_push ) ); + } } - } - ASSERT_TRUE( q.full() ); - ASSERT_CONTAINER_SIZE( q, nSize ); - - // batch pop with functor - auto func_pop = []( size_t& dest, value_type src ) { dest = static_cast( src / 10 ); }; - size_t expected = 0; - while ( q.pop( el, nArrSize, func_pop ) ) { - for ( size_t i = 0; i < nArrSize; ++i ) { - ASSERT_EQ( el[i], expected ); - ++expected; + ASSERT_TRUE( q.full() ); + ASSERT_CONTAINER_SIZE( q, nSize ); + + // batch pop with functor + auto func_pop = []( size_t& dest, value_type src ) { dest = static_cast( src / 10 ); }; + size_t expected = 0; + while ( q.pop( el, nArrSize, func_pop ) ) { + for ( size_t i = 0; i < nArrSize; ++i ) { + ASSERT_EQ( el[i], expected ); + ++expected; + } } - } - if ( nSize % nArrSize == 0 ) { - ASSERT_TRUE( q.empty() ); - } - else { - ASSERT_FALSE( q.empty() ); - ASSERT_CONTAINER_SIZE( q, nSize % nArrSize ); - size_t v; - while ( q.pop( &v, 1, func_pop ) ) { - ASSERT_EQ( v, expected ); - ++expected; + if ( nSize % nArrSize == 0 ) { + ASSERT_TRUE( q.empty() ); } + else { + ASSERT_FALSE( q.empty() ); + ASSERT_CONTAINER_SIZE( q, nSize % nArrSize ); + size_t v; + while ( q.pop( &v, 1, func_pop ) ) { + ASSERT_EQ( v, expected ); + ++expected; + } + } + ASSERT_TRUE( q.empty() ); + ASSERT_FALSE( q.full() ); + ASSERT_CONTAINER_SIZE( q, 0u ); } - ASSERT_TRUE( q.empty() ); - ASSERT_FALSE( q.full() ); - ASSERT_CONTAINER_SIZE( q, 0u ); // front/pop_front - for ( size_t i = 0; i < nSize; i += nArrSize ) { - for ( size_t k = 0; k < nArrSize; ++k ) - el[k] = i + k; + for ( unsigned pass = 0; pass < 3; ++pass ) { + for ( size_t i = 0; i < nSize; i += nArrSize ) { + for ( size_t k = 0; k < nArrSize; ++k ) + el[k] = i + k; + + if ( i + nArrSize <= nSize ) { + ASSERT_TRUE( q.push( el, nArrSize, func_push ) ); + } + else { + ASSERT_FALSE( q.push( el, nArrSize, func_push ) ); + } + } - if ( i + nArrSize <= nSize ) { - ASSERT_TRUE( q.push( el, nArrSize, func_push ) ); + ASSERT_TRUE( !q.empty() ); + if ( nSize % nArrSize != 0 ) { + ASSERT_FALSE( q.full() ); + ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize ); + for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) { + ASSERT_TRUE( q.push( &i, 1, func_push ) ); + } } - else { - ASSERT_FALSE( q.push( el, nArrSize, func_push ) ); + ASSERT_TRUE( q.full() ); + ASSERT_CONTAINER_SIZE( q, nSize ); + + value_type cur = 0; + while ( !q.empty() ) { + value_type* front = q.front(); + ASSERT_TRUE( front != nullptr ); + ASSERT_EQ( cur, *front ); + ASSERT_TRUE( q.pop_front() ); + cur += 10; } - } - ASSERT_TRUE( !q.empty() ); - if ( nSize % nArrSize != 0 ) { - ASSERT_FALSE( q.full() ); - ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize ); - for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) { - ASSERT_TRUE( q.push( &i, 1, func_push ) ); - } - } - ASSERT_TRUE( q.full() ); - ASSERT_CONTAINER_SIZE( q, nSize ); - - value_type cur = 0; - while ( !q.empty() ) { - value_type* front = q.front(); - ASSERT_TRUE( front != nullptr ); - ASSERT_EQ( cur, *front ); - ASSERT_TRUE( q.pop_front() ); - cur += 10; + ASSERT_TRUE( q.empty() ); + ASSERT_TRUE( q.front() == nullptr ); + ASSERT_FALSE( q.pop_front() ); } + } + } + + template + void test_varsize_buffer( Queue& q ) + { + size_t const capacity = q.capacity(); + + ASSERT_TRUE( q.empty() ); + ASSERT_EQ( q.size(), 0u ); + ASSERT_TRUE( q.front().first == nullptr ); + ASSERT_FALSE( q.pop_front() ); + + size_t total_push = 0; + uint8_t chfill = 0; + while ( total_push < capacity * 4 ) { + unsigned buf_size = cds_test::fixture::rand( static_cast( capacity / 4 )) + 1; + total_push += buf_size; + + void* buf = q.back( buf_size ); + ASSERT_TRUE( buf != nullptr ); + + memset( buf, chfill, buf_size ); + q.push_back(); + + ASSERT_GE( q.size(), buf_size ); + + auto pair = q.front(); + ASSERT_TRUE( pair.first != nullptr ); + ASSERT_EQ( pair.second, buf_size ); + for ( size_t i = 0; i < pair.second; ++i ) + ASSERT_EQ( *reinterpret_cast( pair.first ), chfill ); - ASSERT_TRUE( q.empty() ); - ASSERT_TRUE( q.front() == nullptr ); + ASSERT_TRUE( q.pop_front() ); ASSERT_FALSE( q.pop_front() ); } + + ASSERT_TRUE( q.empty() ); + ASSERT_EQ( q.size(), 0u ); + ASSERT_TRUE( q.front().first == nullptr ); + ASSERT_FALSE( q.pop_front() ); } }; @@ -252,4 +300,24 @@ namespace { test_array( q ); } + TEST_F( WeakRingBuffer, var_sized ) + { + typedef cds::container::WeakRingBuffer< void > test_queue; + + test_queue q( 1024 * 64 ); + test_varsize_buffer( q ); + } + + TEST_F( WeakRingBuffer, var_sized_static ) + { + struct traits: public cds::container::weak_ringbuffer::traits + { + typedef cds::opt::v::uninitialized_static_buffer buffer; + }; + typedef cds::container::WeakRingBuffer< void, traits > test_queue; + + test_queue q; + test_varsize_buffer( q ); + } + } // namespace \ No newline at end of file -- 2.34.1