2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "intrusive_stack_type.h"
35 class intrusive_stack_push_pop : public cds_test::stress_fixture
38 static size_t s_nPushThreadCount;
39 static size_t s_nPopThreadCount;
40 static size_t s_nStackSize;
41 static size_t s_nEliminationSize;
42 static bool s_bFCIterative;
43 static unsigned int s_nFCCombinePassCount;
44 static unsigned int s_nFCCompactFactor;
46 static atomics::atomic<size_t> s_nWorkingProducers;
48 static constexpr const size_t c_nValArraySize = 1024;
49 static constexpr const size_t c_nBadConsumer = 0xbadc0ffe;
60 template <typename Base = empty >
61 struct value_type : public Base
63 atomics::atomic<size_t> nNo;
68 value_type( size_t n ) : nNo( n ) {}
72 template <class Stack>
73 class Producer : public cds_test::thread
75 typedef cds_test::thread base_class;
79 size_t m_arrPush[c_nValArraySize];
81 // Interval in m_arrValue
82 typename Stack::value_type * m_pStart;
83 typename Stack::value_type * m_pEnd;
86 Producer( cds_test::thread_pool& pool, Stack& s )
87 : base_class( pool, producer_thread )
94 Producer( Producer& src )
96 , m_Stack( src.m_Stack )
102 virtual thread * clone()
104 return new Producer( *this );
110 memset( m_arrPush, 0, sizeof( m_arrPush ));
113 for ( typename Stack::value_type * p = m_pStart; p < m_pEnd; ++p, ++i ) {
116 p->nNo.store( no = i % c_nValArraySize, atomics::memory_order_release );
117 if ( m_Stack.push( *p ))
123 s_nWorkingProducers.fetch_sub( 1, atomics::memory_order_release );
127 template <class Stack>
128 class Consumer : public cds_test::thread
130 typedef cds_test::thread base_class;
135 size_t m_arrPop[c_nValArraySize];
138 Consumer( cds_test::thread_pool& pool, Stack& s )
139 : base_class( pool, consumer_thread )
146 Consumer( Consumer& src )
148 , m_Stack( src.m_Stack )
154 virtual thread * clone()
156 return new Consumer( *this );
164 memset( m_arrPop, 0, sizeof( m_arrPop ));
166 while ( !(s_nWorkingProducers.load( atomics::memory_order_acquire ) == 0 && m_Stack.empty())) {
167 typename Stack::value_type * p = m_Stack.pop();
171 size_t no = p->nNo.load( atomics::memory_order_acquire );
172 if ( no < sizeof( m_arrPop ) / sizeof( m_arrPop[0] ))
183 template <typename T>
186 std::unique_ptr< T[] > m_pArr;
189 value_array( size_t nSize )
190 : m_pArr( new T[nSize] )
193 T * get() const { return m_pArr.get(); }
197 static void SetUpTestCase();
198 //static void TearDownTestCase();
201 template <class Stack>
202 void analyze( Stack& /*stack*/ )
204 cds_test::thread_pool& pool = get_pool();
206 size_t nPushError = 0;
207 size_t nPopEmpty = 0;
208 size_t nPopCount = 0;
209 size_t arrVal[c_nValArraySize];
210 memset( arrVal, 0, sizeof( arrVal ));
211 size_t nDirtyPop = 0;
213 for ( size_t threadNo = 0; threadNo < pool.size(); ++threadNo ) {
214 cds_test::thread& thread = pool.get( threadNo );
215 if ( thread.type() == producer_thread ) {
216 Producer<Stack>& producer = static_cast<Producer<Stack>&>(thread);
217 nPushError += producer.m_nPushError;
218 for ( size_t i = 0; i < sizeof( arrVal ) / sizeof( arrVal[0] ); ++i )
219 arrVal[i] += producer.m_arrPush[i];
222 ASSERT_TRUE( thread.type() == consumer_thread );
223 Consumer<Stack>& consumer = static_cast<Consumer<Stack>&>(thread);
224 nPopEmpty += consumer.m_nPopEmpty;
225 nPopCount += consumer.m_nPopCount;
226 nDirtyPop += consumer.m_nDirtyPop;
227 for ( size_t i = 0; i < sizeof( arrVal ) / sizeof( arrVal[0] ); ++i )
228 arrVal[i] -= consumer.m_arrPop[i];
232 EXPECT_EQ( nPopCount, s_nStackSize );
233 EXPECT_EQ( nDirtyPop, 0u );
234 EXPECT_EQ( nPushError, 0u );
236 for ( size_t i = 0; i < sizeof( arrVal ) / sizeof( arrVal[0] ); ++i ) {
237 EXPECT_EQ( arrVal[i], 0u ) << "i=" << i;
240 propout() << std::make_pair( "push_count", s_nStackSize )
241 << std::make_pair( "push_error", nPushError )
242 << std::make_pair( "pop_count", nPopCount )
243 << std::make_pair( "pop_empty", nPopEmpty )
244 << std::make_pair( "dirty_pop", nDirtyPop )
249 template <typename Stack>
250 void do_test( Stack& stack, value_array<typename Stack::value_type>& arrValue )
252 cds_test::thread_pool& pool = get_pool();
254 s_nWorkingProducers.store( s_nPushThreadCount, atomics::memory_order_release );
255 size_t const nPushCount = s_nStackSize / s_nPushThreadCount;
257 typename Stack::value_type * pValStart = arrValue.get();
258 typename Stack::value_type * pValEnd = pValStart + s_nStackSize;
260 pool.add( new Producer<Stack>( pool, stack ), s_nPushThreadCount );
262 for ( typename Stack::value_type * it = pValStart; it != pValEnd; ++it )
263 it->nConsumer = c_nBadConsumer;
265 typename Stack::value_type * pStart = pValStart;
266 for ( size_t thread_no = 0; thread_no < pool.size(); ++thread_no ) {
267 static_cast<Producer<Stack>&>(pool.get( thread_no )).m_pStart = pStart;
268 pStart += nPushCount;
269 static_cast<Producer<Stack>&>(pool.get( thread_no )).m_pEnd = pStart;
272 pool.add( new Consumer<Stack>( pool, stack ), s_nPopThreadCount );
274 propout() << std::make_pair( "producer_thread_count", s_nPushThreadCount )
275 << std::make_pair( "consumer_thread_count", s_nPopThreadCount )
276 << std::make_pair( "push_count", nPushCount * s_nPushThreadCount )
279 std::chrono::milliseconds duration = pool.run();
281 propout() << std::make_pair( "duration", duration );
283 s_nStackSize = nPushCount * s_nPushThreadCount;
286 typename Stack::value_type * pEnd = pValStart + s_nStackSize;
287 size_t const nBadConsumer = c_nBadConsumer;
288 for ( typename Stack::value_type * it = pValStart; it != pEnd; ++it )
289 EXPECT_NE( it->nConsumer, nBadConsumer );
294 propout() << stack.statistics();
297 } // namespace cds_test