virtual ~thread()
{}
- void join()
- {
- m_impl.join();
- }
-
protected:
virtual thread * clone() = 0;
virtual void test() = 0;
friend class thread_pool;
thread_pool& m_pool;
- int m_type;
- size_t m_id;
- std::thread m_impl;
+ int const m_type;
+ size_t const m_id;
};
// Pool of test threads
class thread_pool
{
+ class barrier
+ {
+ public:
+ barrier()
+ : m_count( 0 )
+ {}
+
+ void reset( size_t count )
+ {
+ std::unique_lock< std::mutex > lock( m_mtx );
+ m_count = count;
+ }
+
+ bool wait()
+ {
+ std::unique_lock< std::mutex > lock( m_mtx );
+ if ( --m_count == 0 ) {
+ m_cv.notify_all();
+ return true;
+ }
+
+ while ( m_count != 0 )
+ m_cv.wait( lock );
+
+ return false;
+ }
+
+ private:
+ size_t m_count;
+ std::mutex m_mtx;
+ std::condition_variable m_cv;
+ };
+
+ class initial_gate
+ {
+ public:
+ initial_gate()
+ : m_ready( false )
+ {}
+
+ void wait()
+ {
+ std::unique_lock< std::mutex > lock( m_mtx );
+ while ( !m_ready )
+ m_cv.wait( lock );
+ }
+
+ void ready()
+ {
+ std::unique_lock< std::mutex > lock( m_mtx );
+ m_ready = true;
+ m_cv.notify_all();
+ }
+
+ void reset()
+ {
+ std::unique_lock< std::mutex > lock( m_mtx );
+ m_ready = false;
+ }
+
+ private:
+ std::mutex m_mtx;
+ std::condition_variable m_cv;
+ bool m_ready;
+ };
+
public:
explicit thread_pool( ::testing::Test& fixture )
: m_fixture( fixture )
- , m_bRunning( false )
- , m_bStopped( false )
- , m_doneCount( 0 )
, m_bTimeElapsed( false )
- , m_readyCount( 0 )
{}
~thread_pool()
void add( thread * what )
{
- m_threads.push_back( what );
+ m_workers.push_back( what );
}
void add( thread * what, size_t count )
std::chrono::milliseconds run( std::chrono::seconds duration )
{
- m_bStopped = false;
- m_doneCount = 0;
+ m_startBarrier.reset( m_workers.size() + 1 );
+ m_stopBarrier.reset( m_workers.size() + 1 );
- while ( m_readyCount.load() != m_threads.size())
- std::this_thread::yield();
+ // Create threads
+ std::vector< std::thread > threads;
+ threads.reserve( m_workers.size() );
+ for ( auto w : m_workers )
+ threads.emplace_back( &thread::run, w );
+
+ // The pool is intialized
+ m_startPoint.ready();
m_bTimeElapsed.store( false, std::memory_order_release );
auto native_duration = std::chrono::duration_cast<std::chrono::steady_clock::duration>(duration);
+
+ // The pool is ready to start all workers
+ m_startBarrier.wait();
+
auto time_start = std::chrono::steady_clock::now();
auto const expected_end = time_start + native_duration;
- {
- scoped_lock l( m_cvMutex );
- m_bRunning = true;
- m_cvStart.notify_all();
- }
-
if ( duration != std::chrono::seconds::zero()) {
for ( ;; ) {
std::this_thread::sleep_for( native_duration );
}
m_bTimeElapsed.store( true, std::memory_order_release );
- {
- scoped_lock l( m_cvMutex );
- while ( m_doneCount != m_threads.size())
- m_cvDone.wait( l );
- m_bStopped = true;
- }
- auto time_end = std::chrono::steady_clock::now();
+ // Waiting for all workers done
+ m_stopBarrier.wait();
- m_cvStop.notify_all();
+ auto time_end = std::chrono::steady_clock::now();
- for ( auto t : m_threads )
- t->join();
+ for ( auto& t : threads )
+ t.join();
return m_testDuration = std::chrono::duration_cast<std::chrono::milliseconds>(time_end - time_start);
}
- size_t size() const { return m_threads.size(); }
- thread& get( size_t idx ) const { return *m_threads.at( idx ); }
+ size_t size() const { return m_workers.size(); }
+ thread& get( size_t idx ) const { return *m_workers.at( idx ); }
template <typename Fixture>
Fixture& fixture()
void clear()
{
- for ( auto t : m_threads )
+ for ( auto t : m_workers )
delete t;
- m_threads.clear();
- m_bRunning = false;
- m_bStopped = false;
- m_doneCount = 0;
- m_readyCount = 0;
+ m_workers.clear();
+ m_startPoint.reset();
+ }
+
+ void reset()
+ {
+ clear();
}
protected: // thread interface
size_t get_next_id()
{
- return m_threads.size();
+ return m_workers.size();
}
- void ready_to_start( thread& /*who*/ )
+ void ready_to_start( thread& /*who*/ )
{
// Called from test thread
- // Wait for all thread created
- scoped_lock l( m_cvMutex );
- m_readyCount.fetch_add( 1 );
- while ( !m_bRunning )
- m_cvStart.wait( l );
+ // Wait until the pool is ready
+ m_startPoint.wait();
+
+ // Wait until all thread ready
+ m_startBarrier.wait();
}
- void thread_done( thread& /*who*/ )
+ void thread_done( thread& /*who*/ )
{
// Called from test thread
-
- {
- scoped_lock l( m_cvMutex );
- ++m_doneCount;
-
- // Tell pool that the thread is done
- m_cvDone.notify_all();
-
- // Wait for all thread done
- while ( !m_bStopped )
- m_cvStop.wait( l );
- }
+ m_stopBarrier.wait();
}
private:
friend class thread;
::testing::Test& m_fixture;
- std::vector<thread *> m_threads;
+ std::vector<thread *> m_workers;
- typedef std::unique_lock<std::mutex> scoped_lock;
- std::mutex m_cvMutex;
- std::condition_variable m_cvStart;
- std::condition_variable m_cvStop;
- std::condition_variable m_cvDone;
+ initial_gate m_startPoint;
+ barrier m_startBarrier;
+ barrier m_stopBarrier;
- volatile bool m_bRunning;
- volatile bool m_bStopped;
- volatile size_t m_doneCount;
std::atomic<bool> m_bTimeElapsed;
- std::atomic<size_t> m_readyCount;
-
std::chrono::milliseconds m_testDuration;
};
: m_pool( master )
, m_type( type )
, m_id( master.get_next_id())
- , m_impl( &thread::run, this )
{}
inline thread::thread( thread const& sample )
: m_pool( sample.m_pool )
, m_type( sample.m_type )
, m_id( m_pool.get_next_id())
- , m_impl( &thread::run, this )
{}
inline void thread::run()
FeldmanMapArrayBits=4\r
\r
[map_delodd]\r
-MapSize=50000\r
+MapSize=10000\r
InsThreadCount=3\r
DelThreadCount=2\r
ExtractThreadCount=2\r
MaxLoadFactor=4\r
+PassCount=30\r
\r
#Cuckoo map properties\r
CuckooInitialSize=256\r
FeldmanMapArrayBits=4
[map_delodd]
-MapSize=300000
+MapSize=10000
InsThreadCount=2
DelThreadCount=2
ExtractThreadCount=2
MaxLoadFactor=4
+PassCount=40
#Cuckoo map properties
CuckooInitialSize=1024
FeldmanMapArrayBits=4\r
\r
[map_delodd]\r
-MapSize=500000\r
+MapSize=10000\r
InsThreadCount=4\r
DelThreadCount=3\r
ExtractThreadCount=3\r
MaxLoadFactor=4\r
+PassCount=50\r
\r
#Cuckoo map properties\r
CuckooInitialSize=1024\r
\r
\r
[map_delodd]\r
-MapSize=1000000\r
+MapSize=10000\r
InsThreadCount=4\r
DelThreadCount=3\r
ExtractThreadCount=3\r
MaxLoadFactor=4\r
+PassCount=100\r
\r
#Cuckoo map properties\r
CuckooInitialSize=1024\r
namespace map {
- size_t Map_DelOdd::s_nMapSize = 1000000;
+ size_t Map_DelOdd::s_nMapSize = 10000;
size_t Map_DelOdd::s_nInsThreadCount = 4;
size_t Map_DelOdd::s_nDelThreadCount = 4;
size_t Map_DelOdd::s_nExtractThreadCount = 4;
size_t Map_DelOdd::s_nMaxLoadFactor = 8;
+ size_t Map_DelOdd::s_nInsertPassCount = 100;
size_t Map_DelOdd::s_nCuckooInitialSize = 1024;
size_t Map_DelOdd::s_nCuckooProbesetSize = 16;
size_t Map_DelOdd::s_nFeldmanMap_HeadBits = 10;
size_t Map_DelOdd::s_nFeldmanMap_ArrayBits = 4;
-
-
+
size_t Map_DelOdd::s_nLoadFactor = 1;
- std::vector<size_t> Map_DelOdd::m_arrInsert;
- std::vector<size_t> Map_DelOdd::m_arrRemove;
+ std::vector<size_t> Map_DelOdd::m_arrElements;
void Map_DelOdd::SetUpTestCase()
{
if ( s_nMaxLoadFactor == 0 )
s_nMaxLoadFactor = 1;
+ s_nInsertPassCount = cfg.get_size_t( "PassCount", s_nInsertPassCount );
+ if ( s_nInsertPassCount == 0 )
+ s_nInsertPassCount = 100;
+
s_nCuckooInitialSize = cfg.get_size_t( "CuckooInitialSize", s_nCuckooInitialSize );
if ( s_nCuckooInitialSize < 256 )
s_nCuckooInitialSize = 256;
if ( s_nFeldmanMap_ArrayBits == 0 )
s_nFeldmanMap_ArrayBits = 2;
-
- m_arrInsert.resize( s_nMapSize );
- m_arrRemove.resize( s_nMapSize );
- for ( size_t i = 0; i < s_nMapSize; ++i ) {
- m_arrInsert[i] = i;
- m_arrRemove[i] = i;
- }
- shuffle( m_arrInsert.begin(), m_arrInsert.end());
- shuffle( m_arrRemove.begin(), m_arrRemove.end());
+ m_arrElements.resize( s_nMapSize );
+ for ( size_t i = 0; i < s_nMapSize; ++i )
+ m_arrElements[i] = i;;
+ shuffle( m_arrElements.begin(), m_arrElements.end() );
}
void Map_DelOdd::TearDownTestCase()
{
- m_arrInsert.clear();
- m_arrRemove.clear();
+ m_arrElements.clear();
}
std::vector<size_t> Map_DelOdd_LF::get_load_factors()
};
static_assert(sizeof( key_thread ) % 8 == 0, "Key size mismatch!!!");
- }
+ } // namespace
template <>
struct cmp<key_thread> {
static size_t s_nExtractThreadCount; // extract thread count
static size_t s_nMapSize; // max map size
static size_t s_nMaxLoadFactor; // maximum load factor
+ static size_t s_nInsertPassCount;
static size_t s_nCuckooInitialSize; // initial size for CuckooMap
static size_t s_nCuckooProbesetSize; // CuckooMap probeset size (only for list-based probeset)
static size_t s_nLoadFactor; // current load factor
- static std::vector<size_t> m_arrInsert;
- static std::vector<size_t> m_arrRemove;
+ static std::vector<size_t> m_arrElements;
static void SetUpTestCase();
static void TearDownTestCase();
+ template <typename Pred>
+ static void prepare_array( std::vector<size_t>& arr, Pred pred )
+ {
+ arr.reserve( m_arrElements.size() );
+ for ( auto el : m_arrElements ) {
+ if ( pred( el ) )
+ arr.push_back( el );
+ }
+ arr.resize( arr.size() );
+ shuffle( arr.begin(), arr.end() );
+ }
+
protected:
typedef key_thread key_type;
typedef size_t value_type;
typedef cds_test::thread base_class;
Map& m_Map;
- struct ensure_func
+ struct update_func
{
template <typename Q>
void operator()( bool /*bNew*/, Q const& ) const
void operator()( Q&, Q*) const
{}
};
+
+ void init_data()
+ {
+ prepare_array( m_arr, []( size_t ) -> bool { return true; } );
+ for ( size_t i = 0; i < m_arr.size(); ++i ) {
+ if ( m_Map.insert( key_type( m_arr[i], id() ) ) )
+ ++m_nInsertInitSuccess;
+ else
+ ++m_nInsertInitFailed;
+ }
+ }
+
public:
size_t m_nInsertSuccess = 0;
size_t m_nInsertFailed = 0;
+ size_t m_nInsertInitSuccess = 0;
+ size_t m_nInsertInitFailed = 0;
+
+ std::vector<size_t> m_arr;
public:
Inserter( cds_test::thread_pool& pool, Map& map )
: base_class( pool, inserter_thread )
, m_Map( map )
- {}
+ {
+ init_data();
+ }
Inserter( Inserter& src )
: base_class( src )
, m_Map( src.m_Map )
- {}
+ {
+ init_data();
+ }
virtual thread * clone()
{
Map& rMap = m_Map;
Map_DelOdd& fixture = pool().template fixture<Map_DelOdd>();
- std::vector<size_t>& arrData = fixture.m_arrInsert;
- for ( size_t i = 0; i < arrData.size(); ++i ) {
- if ( rMap.insert( key_type( arrData[i], id())))
- ++m_nInsertSuccess;
- else
- ++m_nInsertFailed;
- }
-
- ensure_func f;
- for ( size_t i = arrData.size() - 1; i > 0; --i ) {
- if ( arrData[i] & 1 ) {
- rMap.update( key_type( arrData[i], id()), f );
+ update_func f;
+
+ for ( size_t nPass = 0; nPass < s_nInsertPassCount; ++nPass ) {
+ if ( nPass & 1 ) {
+ // insert pass
+ for ( auto el : m_arr ) {
+ if ( el & 1 ) {
+ if ( rMap.insert( key_type( el, id() )))
+ ++m_nInsertSuccess;
+ else
+ ++m_nInsertFailed;
+ }
+ }
+ }
+ else {
+ // update pass
+ for ( auto el : m_arr ) {
+ if ( el & 1 ) {
+ bool success;
+ bool inserted;
+ std::tie( success, inserted ) = rMap.update( key_type( el, id() ), f );
+ if ( success && inserted )
+ ++m_nInsertSuccess;
+ else
+ ++m_nInsertFailed;
+ }
+ }
}
}
- fixture.m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_acquire );
+ fixture.m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_release );
+ m_arr.resize( 0 );
}
};
typedef cds_test::thread base_class;
Map& m_Map;
+ void init_data()
+ {
+ prepare_array( m_arr, []( size_t el ) ->bool { return ( el & 1 ) != 0; } );
+ }
+
public:
size_t m_nDeleteSuccess = 0;
size_t m_nDeleteFailed = 0;
+ std::vector<size_t> m_arr;
+
public:
Deleter( cds_test::thread_pool& pool, Map& map )
: base_class( pool, deleter_thread )
, m_Map( map )
- {}
+ {
+ init_data();
+ }
+
Deleter( Deleter& src )
: base_class( src )
, m_Map( src.m_Map )
- {}
+ {
+ init_data();
+ }
virtual thread * clone()
{
return new Deleter( *this );
}
- template <typename MapType, bool>
- struct eraser {
- static bool erase(MapType& map, size_t key, size_t /*insThread*/)
- {
- return map.erase_with(key, key_less());
- }
- };
-
- template <typename MapType>
- struct eraser<MapType, true>
- {
- static bool erase(MapType& map, size_t key, size_t insThread)
- {
- return map.erase(key_type(key, insThread));
- }
- };
-
virtual void test()
{
Map& rMap = m_Map;
Map_DelOdd& fixture = pool().template fixture<Map_DelOdd>();
size_t const nInsThreadCount = s_nInsThreadCount;
- for ( size_t pass = 0; pass < 2; pass++ ) {
- std::vector<size_t>& arrData = fixture.m_arrRemove;
+ do {
if ( id() & 1 ) {
- for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- for ( size_t i = 0; i < arrData.size(); ++i ) {
- if ( arrData[i] & 1 ) {
- if ( Map::c_bEraseExactKey ) {
- for (size_t key = 0; key < nInsThreadCount; ++key) {
- if ( eraser<Map, Map::c_bEraseExactKey>::erase( rMap, arrData[i], key ))
- ++m_nDeleteSuccess;
- else
- ++m_nDeleteFailed;
- }
- }
- else {
- if ( eraser<Map, Map::c_bEraseExactKey>::erase(rMap, arrData[i], 0))
- ++m_nDeleteSuccess;
- else
- ++m_nDeleteFailed;
- }
- }
+ for ( auto el: m_arr ) {
+ for ( size_t k = 0; k < nInsThreadCount; ++k ) {
+ if ( rMap.erase( key_type( el, k )))
+ ++m_nDeleteSuccess;
+ else
+ ++m_nDeleteFailed;
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
}
else {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- for ( size_t i = arrData.size() - 1; i > 0; --i ) {
- if ( arrData[i] & 1 ) {
- if ( Map::c_bEraseExactKey ) {
- for (size_t key = 0; key < nInsThreadCount; ++key) {
- if (eraser<Map, Map::c_bEraseExactKey>::erase(rMap, arrData[i], key))
- ++m_nDeleteSuccess;
- else
- ++m_nDeleteFailed;
- }
- }
- else {
- if (eraser<Map, Map::c_bEraseExactKey>::erase(rMap, arrData[i], 0))
- ++m_nDeleteSuccess;
- else
- ++m_nDeleteFailed;
- }
- }
+ for ( auto el: m_arr ) {
+ if ( rMap.erase( key_type( el, k ) ) )
+ ++m_nDeleteSuccess;
+ else
+ ++m_nDeleteFailed;
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
}
- }
+ } while ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) != 0 );
+
+ m_arr.resize( 0 );
}
};
typedef cds_test::thread base_class;
Map& m_Map;
+ void init_data()
+ {
+ prepare_array( m_arr, []( size_t el ) ->bool { return ( el & 1 ) != 0; } );
+ }
+
public:
size_t m_nDeleteSuccess = 0;
size_t m_nDeleteFailed = 0;
+ std::vector<size_t> m_arr;
+
public:
Extractor( cds_test::thread_pool& pool, Map& map )
: base_class( pool, extractor_thread )
, m_Map( map )
- {}
+ {
+ init_data();
+ }
Extractor( Extractor& src )
: base_class( src )
, m_Map( src.m_Map )
- {}
+ {
+ init_data();
+ }
virtual thread * clone()
{
return new Extractor( *this );
}
- template <typename MapType, bool>
- struct extractor {
- static typename Map::guarded_ptr extract(MapType& map, size_t key, size_t /*insThread*/)
- {
- return map.extract_with(key, key_less());
- }
- };
-
- template <typename MapType>
- struct extractor<MapType, true>
- {
- static typename Map::guarded_ptr extract(MapType& map, size_t key, size_t insThread)
- {
- return map.extract(key_type(key, insThread));
- }
- };
-
virtual void test()
{
Map& rMap = m_Map;
Map_DelOdd& fixture = pool().template fixture<Map_DelOdd>();
size_t const nInsThreadCount = s_nInsThreadCount;
- for ( size_t pass = 0; pass < 2; ++pass ) {
- std::vector<size_t>& arrData = fixture.m_arrRemove;
+ do {
if ( id() & 1 ) {
- for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- for ( size_t i = 0; i < arrData.size(); ++i ) {
- if ( arrData[i] & 1 ) {
- gp = extractor< Map, Map::c_bEraseExactKey >::extract( rMap, arrData[i], k );
- if ( gp )
- ++m_nDeleteSuccess;
- else
- ++m_nDeleteFailed;
- gp.release();
- }
+ for ( auto el : m_arr ) {
+ for ( size_t k = 0; k < nInsThreadCount; ++k ) {
+ gp = rMap.extract( key_type( el, k ));
+ if ( gp )
+ ++m_nDeleteSuccess;
+ else
+ ++m_nDeleteFailed;
+ gp.release();
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
}
else {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- for ( size_t i = arrData.size() - 1; i > 0; --i ) {
- if ( arrData[i] & 1 ) {
- gp = extractor< Map, Map::c_bEraseExactKey >::extract( rMap, arrData[i], k);
- if ( gp )
- ++m_nDeleteSuccess;
- else
- ++m_nDeleteFailed;
- gp.release();
- }
+ for ( auto el: m_arr ) {
+ gp = rMap.extract( key_type( el, k ) );
+ if ( gp )
+ ++m_nDeleteSuccess;
+ else
+ ++m_nDeleteFailed;
+ gp.release();
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
}
- }
+ } while ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) != 0 );
+
+ m_arr.resize( 0 );
}
};
typedef cds_test::thread base_class;
Map& m_Map;
+ void init_data()
+ {
+ prepare_array( m_arr, []( size_t el ) -> bool { return ( el & 1 ) != 0; } );
+ }
+
public:
size_t m_nDeleteSuccess = 0;
size_t m_nDeleteFailed = 0;
+ std::vector<size_t> m_arr;
+
public:
Extractor( cds_test::thread_pool& pool, Map& map )
: base_class( pool, extractor_thread )
, m_Map( map )
- {}
+ {
+ init_data();
+ }
Extractor( Extractor& src )
: base_class( src )
, m_Map( src.m_Map )
- {}
+ {
+ init_data();
+ }
virtual thread * clone()
{
return new Extractor( *this );
}
- template <typename MapType, bool>
- struct extractor {
- static typename Map::exempt_ptr extract( MapType& map, size_t key, size_t /*insThread*/ )
- {
- return map.extract_with( key, key_less());
- }
- };
-
- template <typename MapType>
- struct extractor<MapType, true>
- {
- static typename Map::exempt_ptr extract(MapType& map, size_t key, size_t insThread)
- {
- return map.extract( key_type(key, insThread));
- }
- };
-
virtual void test()
{
Map& rMap = m_Map;
typename Map::exempt_ptr xp;
size_t const nInsThreadCount = s_nInsThreadCount;
- std::vector<size_t>& arrData = fixture.m_arrRemove;
- if ( id() & 1 ) {
- for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- for ( size_t i = 0; i < arrData.size(); ++i ) {
- if ( arrData[i] & 1 ) {
+ do {
+ if ( id() & 1 ) {
+ for ( size_t k = 0; k < nInsThreadCount; ++k ) {
+ for ( auto el: m_arr ) {
if ( Map::c_bExtractLockExternal ) {
- {
- typename Map::rcu_lock l;
- xp = extractor<Map, Map::c_bEraseExactKey>::extract( rMap, arrData[i], k );
- if ( xp )
- ++m_nDeleteSuccess;
- else
- ++m_nDeleteFailed;
- }
+ typename Map::rcu_lock l;
+ xp = rMap.extract( key_type( el, k ) );
+ if ( xp )
+ ++m_nDeleteSuccess;
+ else
+ ++m_nDeleteFailed;
}
else {
- xp = extractor<Map, Map::c_bEraseExactKey>::extract( rMap, arrData[i], k);
+ xp = rMap.extract( key_type( el, k ) );
if ( xp )
++m_nDeleteSuccess;
else
xp.release();
}
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
- }
- else {
- for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- for ( size_t i = arrData.size() - 1; i > 0; --i ) {
- if ( arrData[i] & 1 ) {
+ else {
+ for ( auto el : m_arr ) {
+ for ( size_t k = 0; k < nInsThreadCount; ++k ) {
if ( Map::c_bExtractLockExternal ) {
- {
- typename Map::rcu_lock l;
- xp = extractor<Map, Map::c_bEraseExactKey>::extract(rMap, arrData[i], k);
- if ( xp )
- ++m_nDeleteSuccess;
- else
- ++m_nDeleteFailed;
- }
+ typename Map::rcu_lock l;
+ xp = rMap.extract( key_type( el, k ) );
+ if ( xp )
+ ++m_nDeleteSuccess;
+ else
+ ++m_nDeleteFailed;
}
else {
- xp = extractor<Map, Map::c_bEraseExactKey>::extract(rMap, arrData[i], k);
+ xp = rMap.extract( key_type( el, k ) );
if ( xp )
++m_nDeleteSuccess;
else
xp.release();
}
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
- }
+ } while ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) != 0 );
+
+ m_arr.resize( 0 );
}
};
propout() << std::make_pair( "insert_thread_count", s_nInsThreadCount )
<< std::make_pair( "delete_thread_count", s_nDelThreadCount )
- << std::make_pair( "map_size", s_nMapSize );
+ << std::make_pair( "map_size", s_nMapSize )
+ << std::make_pair( "pass_count", s_nInsertPassCount );
std::chrono::milliseconds duration = pool.run();
propout() << std::make_pair( "duration", duration );
+ size_t nInsertInitFailed = 0;
+ size_t nInsertInitSuccess = 0;
size_t nInsertSuccess = 0;
size_t nInsertFailed = 0;
size_t nDeleteSuccess = 0;
insert_thread& inserter = static_cast<insert_thread&>(thr);
nInsertSuccess += inserter.m_nInsertSuccess;
nInsertFailed += inserter.m_nInsertFailed;
+ nInsertInitSuccess += inserter.m_nInsertInitSuccess;
+ nInsertInitFailed += inserter.m_nInsertInitFailed;
}
else {
assert( thr.type() == deleter_thread );
}
}
- EXPECT_EQ( nInsertSuccess, s_nMapSize * s_nInsThreadCount );
- EXPECT_EQ( nInsertFailed, 0u );
+ size_t const nInitialOddKeys = ( s_nMapSize * s_nInsThreadCount ) / 2;
+
+ EXPECT_EQ( nInsertInitFailed, 0u );
+ EXPECT_EQ( nInsertInitSuccess, s_nMapSize * s_nInsThreadCount );
+ EXPECT_GE( nInsertSuccess + nInitialOddKeys, nDeleteSuccess );
+ EXPECT_LE( nInsertSuccess, nDeleteSuccess );
propout()
+ << std::make_pair( "insert_init_success", nInsertInitSuccess )
+ << std::make_pair( "insert_init_failed", nInsertInitFailed )
<< std::make_pair( "insert_success", nInsertSuccess )
<< std::make_pair( "insert_failed", nInsertFailed )
<< std::make_pair( "delete_success", nDeleteSuccess )
propout() << std::make_pair( "insert_thread_count", s_nInsThreadCount )
<< std::make_pair( "delete_thread_count", s_nDelThreadCount )
<< std::make_pair( "extract_thread_count", s_nExtractThreadCount )
- << std::make_pair( "map_size", s_nMapSize );
+ << std::make_pair( "map_size", s_nMapSize )
+ << std::make_pair( "pass_count", s_nInsertPassCount );
std::chrono::milliseconds duration = pool.run();
propout() << std::make_pair( "duration", duration );
+ size_t nInsertInitFailed = 0;
+ size_t nInsertInitSuccess = 0;
size_t nInsertSuccess = 0;
size_t nInsertFailed = 0;
size_t nDeleteSuccess = 0;
insert_thread& inserter = static_cast<insert_thread&>(thr);
nInsertSuccess += inserter.m_nInsertSuccess;
nInsertFailed += inserter.m_nInsertFailed;
+ nInsertInitSuccess += inserter.m_nInsertInitSuccess;
+ nInsertInitFailed += inserter.m_nInsertInitFailed;
}
break;
case deleter_thread:
}
}
- EXPECT_EQ( nInsertSuccess, s_nMapSize * s_nInsThreadCount );
- EXPECT_EQ( nInsertFailed, 0u );
+ size_t const nInitialOddKeys = ( s_nMapSize * s_nInsThreadCount ) / 2;
+
+ EXPECT_EQ( nInsertInitFailed, 0u );
+ EXPECT_EQ( nInsertInitSuccess, s_nMapSize * s_nInsThreadCount );
+ EXPECT_GE( nInsertSuccess + nInitialOddKeys, nDeleteSuccess + nExtractSuccess );
+ EXPECT_LE( nInsertSuccess, nDeleteSuccess + nExtractSuccess );
propout()
+ << std::make_pair( "insert_init_success", nInsertInitSuccess )
+ << std::make_pair( "insert_init_failed", nInsertInitFailed )
<< std::make_pair( "insert_success", nInsertSuccess )
<< std::make_pair( "insert_failed", nInsertFailed )
<< std::make_pair( "delete_success", nDeleteSuccess )
{
static_assert( Map::c_bExtractSupported, "Map class must support extract() method" );
+ size_t nMapSize = s_nMapSize;
+ s_nMapSize *= s_nInsThreadCount;
+
Map testMap( *this );
+
+ s_nMapSize = nMapSize;
do_test_extract( testMap );
}
template <class Map>
void run_test()
{
+ size_t nMapSize = s_nMapSize;
+ s_nMapSize *= s_nInsThreadCount;
+
Map testMap( *this );
+
+ s_nMapSize = nMapSize;
do_test( testMap );
}
+
+ template <class Map>
+ void run_feldman();
};
class Map_DelOdd_LF: public Map_DelOdd
namespace map {
- namespace {
- class Map_DelOdd2: public map::Map_DelOdd {
- public:
- template <typename Map>
- void run()
- {
- typedef typename Map::traits original_traits;
- struct traits: public original_traits {
- enum { hash_size = sizeof( uint32_t ) + sizeof( uint16_t ) };
- };
- typedef typename Map::template rebind_traits< traits >::result map_type;
-
- run_test_extract<map_type>();
- }
+ template <class Map>
+ void Map_DelOdd::run_feldman()
+ {
+ typedef typename Map::traits original_traits;
+ struct traits: public original_traits {
+ enum { hash_size = sizeof( uint32_t ) + sizeof( uint16_t ) };
};
+ typedef typename Map::template rebind_traits< traits >::result map_type;
- CDSSTRESS_FeldmanHashMap_fixed( Map_DelOdd2, run, key_thread, size_t )
+ run_test_extract<map_type>();
}
+ CDSSTRESS_FeldmanHashMap_fixed( Map_DelOdd, run_feldman, key_thread, size_t )
+
} // namespace map
size_t Set_DelOdd::s_nDelThreadCount = 4;
size_t Set_DelOdd::s_nExtractThreadCount = 4;
size_t Set_DelOdd::s_nMaxLoadFactor = 8;
+ size_t Set_DelOdd::s_nInsertPassCount = 100;
size_t Set_DelOdd::s_nCuckooInitialSize = 1024;
size_t Set_DelOdd::s_nCuckooProbesetSize = 16;
if ( s_nMaxLoadFactor == 0 )
s_nMaxLoadFactor = 1;
+ s_nInsertPassCount = cfg.get_size_t( "PassCount", s_nInsertPassCount );
+ if ( s_nInsertPassCount == 0 )
+ s_nInsertPassCount = 100;
+
s_nCuckooInitialSize = cfg.get_size_t( "CuckooInitialSize", s_nCuckooInitialSize );
if ( s_nCuckooInitialSize < 256 )
s_nCuckooInitialSize = 256;
static size_t s_nDelThreadCount; // delete thread count
static size_t s_nExtractThreadCount; // extract thread count
static size_t s_nMaxLoadFactor; // maximum load factor
+ static size_t s_nInsertPassCount;
static size_t s_nCuckooInitialSize; // initial size for CuckooSet
static size_t s_nCuckooProbesetSize; // CuckooSet probeset size (only for list-based probeset)
static void SetUpTestCase();
static void TearDownTestCase();
+ template <typename Pred>
+ static void prepare_array( std::vector<size_t>& arr, Pred pred )
+ {
+ arr.reserve( m_arrData.size() );
+ for ( auto el : m_arrData ) {
+ if ( pred( el ) )
+ arr.push_back( el );
+ }
+ arr.resize( arr.size() );
+ shuffle( arr.begin(), arr.end() );
+ }
+
protected:
typedef key_thread key_type;
typedef size_t value_type;
void operator()(key_value_pair& /*cur*/, key_value_pair * /*prev*/) const
{}
};
+
+ void init_data()
+ {
+ prepare_array( m_arr, []( size_t ) -> bool { return true; } );
+ for ( size_t i = 0; i < m_arr.size(); ++i ) {
+ if ( m_Set.insert( key_type( m_arr[i], id() ) ) )
+ ++m_nInsertInitSuccess;
+ else
+ ++m_nInsertInitFailed;
+ }
+ }
+
public:
size_t m_nInsertSuccess = 0;
size_t m_nInsertFailed = 0;
+ size_t m_nInsertInitSuccess = 0;
+ size_t m_nInsertInitFailed = 0;
+
+ std::vector<size_t> m_arr;
public:
Inserter( cds_test::thread_pool& pool, Set& set )
: base_class( pool, inserter_thread )
, m_Set( set )
- {}
+ {
+ init_data();
+ }
Inserter( Inserter& src )
: base_class( src )
, m_Set( src.m_Set )
- {}
+ {
+ init_data();
+ }
virtual thread * clone()
{
Set& rSet = m_Set;
Set_DelOdd& fixture = pool().template fixture<Set_DelOdd>();
- std::vector<size_t>& arrData = fixture.m_arrData;
- for ( size_t i = 0; i < arrData.size(); ++i ) {
- if ( rSet.insert( key_type( arrData[i], id())))
- ++m_nInsertSuccess;
- else
- ++m_nInsertFailed;
- }
-
- update_functor f;
- for ( size_t i = arrData.size() - 1; i > 0; --i ) {
- if ( arrData[i] & 1 )
- rSet.update( key_type( arrData[i], id()), f, true );
+ for ( size_t nPass = 0; nPass < s_nInsertPassCount; ++nPass ) {
+ if ( nPass & 1 ) {
+ // insert pass
+ for ( auto el : m_arr ) {
+ if ( el & 1 ) {
+ if ( rSet.insert( key_type( el, id() ) ) )
+ ++m_nInsertSuccess;
+ else
+ ++m_nInsertFailed;
+ }
+ }
+ }
+ else {
+ // update pass
+ for ( auto el : m_arr ) {
+ if ( el & 1 ) {
+ bool success;
+ bool inserted;
+ std::tie( success, inserted ) = rSet.update( key_type( el, id() ), update_functor() );
+ if ( success && inserted )
+ ++m_nInsertSuccess;
+ else
+ ++m_nInsertFailed;
+ }
+ }
+ }
}
fixture.m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_release );
+ m_arr.resize( 0 );
}
};
typedef cds_test::thread base_class;
Set& m_Set;
+ void init_data()
+ {
+ prepare_array( m_arr, []( size_t el ) ->bool { return ( el & 1 ) != 0; } );
+ }
+
public:
size_t m_nDeleteSuccess = 0;
size_t m_nDeleteFailed = 0;
+ std::vector<size_t> m_arr;
+
public:
Deleter( cds_test::thread_pool& pool, Set& set )
: base_class( pool, deleter_thread )
, m_Set( set )
- {}
+ {
+ init_data();
+ }
Deleter( Deleter& src )
: base_class( src )
, m_Set( src.m_Set )
- {}
+ {
+ init_data();
+ }
virtual thread * clone()
{
size_t const nInsThreadCount = s_nInsThreadCount;
Set_DelOdd& fixture = pool().template fixture<Set_DelOdd>();
- std::vector<size_t>& arrData = fixture.m_arrData;
- if ( id() & 1 ) {
- for (size_t i = 0; i < arrData.size(); ++i) {
- if ( arrData[i] & 1 ) {
+ do {
+ if ( id() & 1 ) {
+ for ( auto el : m_arr ) {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- if ( eraser<Set, Set::c_bEraseExactKey>::erase( rSet, arrData[i], k ))
+ if ( rSet.erase( key_type( el, k ) ) )
++m_nDeleteSuccess;
else
++m_nDeleteFailed;
}
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
- }
- else {
- for ( size_t i = arrData.size() - 1; i > 0; --i ) {
- if ( arrData[i] & 1 ) {
- for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- if (eraser<Set, Set::c_bEraseExactKey>::erase(rSet, arrData[i], k))
+ else {
+ for ( size_t k = 0; k < nInsThreadCount; ++k ) {
+ for ( auto el : m_arr ) {
+ if ( rSet.erase( key_type( el, k ) ) )
++m_nDeleteSuccess;
else
++m_nDeleteFailed;
}
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
- }
+ } while ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) != 0 );
+
+ m_arr.resize( 0 );
}
};
typedef cds_test::thread base_class;
Set& m_Set;
+ std::vector<size_t> m_arr;
+
+ void init_data()
+ {
+ prepare_array( m_arr, []( size_t el ) ->bool { return ( el & 1 ) != 0; } );
+ }
+
public:
size_t m_nExtractSuccess = 0;
size_t m_nExtractFailed = 0;
Extractor( cds_test::thread_pool& pool, Set& set )
: base_class( pool, extractor_thread )
, m_Set( set )
- {}
+ {
+ init_data();
+ }
Extractor( Extractor& src )
: base_class( src )
, m_Set( src.m_Set )
- {}
+ {
+ init_data();
+ }
virtual thread * clone()
{
return new Extractor( *this );
}
- template <typename SetType, bool>
- struct extractor {
- static typename SetType::guarded_ptr extract(SetType& s, size_t key, size_t /*thread*/)
- {
- return s.extract_with( key, key_less());
- }
- };
-
- template <typename SetType>
- struct extractor<SetType, true> {
- static typename SetType::guarded_ptr extract(SetType& s, size_t key, size_t thread)
- {
- return s.extract( key_type(key, thread));
- }
- };
-
virtual void test()
{
Set& rSet = m_Set;
-
typename Set::guarded_ptr gp;
Set_DelOdd& fixture = pool().template fixture<Set_DelOdd>();
- std::vector<size_t>& arrData = fixture.m_arrData;
size_t const nInsThreadCount = s_nInsThreadCount;
- if ( id() & 1 ) {
- for ( size_t i = 0; i < arrData.size(); ++i ) {
- if ( arrData[i] & 1 ) {
+ do {
+ if ( id() & 1 ) {
+ for ( auto el : m_arr ) {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- gp = extractor<Set, Set::c_bEraseExactKey>::extract( rSet, arrData[i], k );
+ gp = rSet.extract( key_type( el, k ) );
if ( gp )
++m_nExtractSuccess;
else
gp.release();
}
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
- }
- else {
- for ( size_t i = arrData.size() - 1; i > 0; --i ) {
- if ( arrData[i] & 1 ) {
- for ( size_t k = 0; k < nInsThreadCount; ++k ) {
- gp = extractor<Set, Set::c_bEraseExactKey>::extract( rSet, arrData[i], k);
+ else {
+ for ( size_t k = 0; k < nInsThreadCount; ++k ) {
+ for ( auto el : m_arr ) {
+ gp = rSet.extract( key_type( el, k ) );
if ( gp )
++m_nExtractSuccess;
else
gp.release();
}
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
- }
+ } while ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) != 0 );
+
+ m_arr.resize( 0 );
}
};
{
typedef cds_test::thread base_class;
Set& m_Set;
+ std::vector<size_t> m_arr;
+
+ void init_data()
+ {
+ prepare_array( m_arr, []( size_t el ) -> bool { return ( el & 1 ) != 0; } );
+ }
public:
size_t m_nExtractSuccess = 0;
Extractor( cds_test::thread_pool& pool, Set& set )
: base_class( pool, extractor_thread )
, m_Set( set )
- {}
+ {
+ init_data();
+ }
Extractor( Extractor& src )
: base_class( src )
, m_Set( src.m_Set )
- {}
+ {
+ init_data();
+ }
virtual thread * clone()
{
return new Extractor( *this );
}
- template <typename SetType, bool>
- struct extractor {
- static typename SetType::exempt_ptr extract(SetType& s, size_t key, size_t /*thread*/)
- {
- return s.extract_with(key, key_less());
- }
- };
-
- template <typename SetType>
- struct extractor<SetType, true> {
- static typename SetType::exempt_ptr extract(SetType& s, size_t key, size_t thread)
- {
- return s.extract(key_type(key, thread));
- }
- };
-
virtual void test()
{
Set& rSet = m_Set;
-
typename Set::exempt_ptr xp;
Set_DelOdd& fixture = pool().template fixture<Set_DelOdd>();
- std::vector<size_t>& arrData = fixture.m_arrData;
size_t const nInsThreadCount = fixture.s_nInsThreadCount;
- if ( id() & 1 ) {
- for ( size_t i = 0; i < arrData.size(); ++i ) {
- if ( arrData[i] & 1 ) {
- for ( size_t k = 0; k < nInsThreadCount; ++k ) {
+ do {
+ if ( id() & 1 ) {
+ for ( size_t k = 0; k < nInsThreadCount; ++k ) {
+ for ( auto el : m_arr ) {
if ( Set::c_bExtractLockExternal ) {
typename Set::rcu_lock l;
- xp = extractor<Set, Set::c_bEraseExactKey>::extract( rSet, arrData[i], k);
+ xp = rSet.extract( key_type( el, k ) );
if ( xp )
++m_nExtractSuccess;
else
++m_nExtractFailed;
}
else {
- xp = extractor<Set, Set::c_bEraseExactKey>::extract(rSet, arrData[i], k);
+ xp = rSet.extract( key_type( el, k ) );
if ( xp )
++m_nExtractSuccess;
else
xp.release();
}
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
- }
- else {
- for ( size_t i = arrData.size() - 1; i > 0; --i ) {
- if ( arrData[i] & 1 ) {
+ else {
+ for ( auto el : m_arr ) {
for ( size_t k = 0; k < nInsThreadCount; ++k ) {
if ( Set::c_bExtractLockExternal ) {
typename Set::rcu_lock l;
- xp = extractor<Set, Set::c_bEraseExactKey>::extract(rSet, arrData[i], k);
+ xp = rSet.extract( key_type( el, k ) );
if ( xp )
++m_nExtractSuccess;
else
++m_nExtractFailed;
}
else {
- xp = extractor<Set, Set::c_bEraseExactKey>::extract(rSet, arrData[i], k);
+ xp = rSet.extract( key_type( el, k ) );
if ( xp )
++m_nExtractSuccess;
else
xp.release();
}
}
- if ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
- break;
}
- }
+ } while ( fixture.m_nInsThreadCount.load( atomics::memory_order_acquire ) != 0 );
+
+ m_arr.resize( 0 );
}
};
propout() << std::make_pair( "insert_thread_count", s_nInsThreadCount )
<< std::make_pair( "delete_thread_count", s_nDelThreadCount )
- << std::make_pair( "set_size", s_nSetSize );
+ << std::make_pair( "set_size", s_nSetSize )
+ << std::make_pair( "pass_count", s_nInsertPassCount );
std::chrono::milliseconds duration = pool.run();
propout() << std::make_pair( "duration", duration );
+ size_t nInsertInitFailed = 0;
+ size_t nInsertInitSuccess = 0;
size_t nInsertSuccess = 0;
size_t nInsertFailed = 0;
size_t nDeleteSuccess = 0;
insert_thread& inserter = static_cast<insert_thread&>(thr);
nInsertSuccess += inserter.m_nInsertSuccess;
nInsertFailed += inserter.m_nInsertFailed;
+ nInsertInitSuccess += inserter.m_nInsertInitSuccess;
+ nInsertInitFailed += inserter.m_nInsertInitFailed;
}
else {
assert( thr.type() == deleter_thread );
}
}
- EXPECT_EQ( nInsertSuccess, s_nSetSize * s_nInsThreadCount );
- EXPECT_EQ( nInsertFailed, 0u );
+ size_t const nInitialOddKeys = ( s_nSetSize * s_nInsThreadCount ) / 2;
+
+ EXPECT_EQ( nInsertInitFailed, 0u );
+ EXPECT_EQ( nInsertInitSuccess, s_nSetSize * s_nInsThreadCount );
+ EXPECT_GE( nInsertSuccess + nInitialOddKeys, nDeleteSuccess );
+ EXPECT_LE( nInsertSuccess, nDeleteSuccess );
propout()
+ << std::make_pair( "insert_init_success", nInsertInitSuccess )
+ << std::make_pair( "insert_init_failed", nInsertInitFailed )
<< std::make_pair( "insert_success", nInsertSuccess )
<< std::make_pair( "insert_failed", nInsertFailed )
<< std::make_pair( "delete_success", nDeleteSuccess )
propout() << std::make_pair( "insert_thread_count", s_nInsThreadCount )
<< std::make_pair( "delete_thread_count", s_nDelThreadCount )
<< std::make_pair( "extract_thread_count", s_nExtractThreadCount )
- << std::make_pair( "set_size", s_nSetSize );
+ << std::make_pair( "set_size", s_nSetSize )
+ << std::make_pair( "pass_count", s_nInsertPassCount );
std::chrono::milliseconds duration = pool.run();
propout() << std::make_pair( "duration", duration );
+ size_t nInsertInitFailed = 0;
+ size_t nInsertInitSuccess = 0;
size_t nInsertSuccess = 0;
size_t nInsertFailed = 0;
size_t nDeleteSuccess = 0;
insert_thread& inserter = static_cast<insert_thread&>( thr );
nInsertSuccess += inserter.m_nInsertSuccess;
nInsertFailed += inserter.m_nInsertFailed;
+ nInsertInitSuccess += inserter.m_nInsertInitSuccess;
+ nInsertInitFailed += inserter.m_nInsertInitFailed;
}
break;
case deleter_thread:
}
}
- EXPECT_EQ( nInsertSuccess, s_nSetSize * s_nInsThreadCount );
- EXPECT_EQ( nInsertFailed, 0u );
+ size_t const nInitialOddKeys = ( s_nSetSize * s_nInsThreadCount ) / 2;
+
+ EXPECT_EQ( nInsertInitFailed, 0u );
+ EXPECT_EQ( nInsertInitSuccess, s_nSetSize * s_nInsThreadCount );
+ EXPECT_GE( nInsertSuccess + nInitialOddKeys, nDeleteSuccess + nExtractSuccess );
+ EXPECT_LE( nInsertSuccess, nDeleteSuccess + nExtractSuccess );
propout()
+ << std::make_pair( "insert_init_success", nInsertInitSuccess )
+ << std::make_pair( "insert_init_failed", nInsertInitFailed )
<< std::make_pair( "insert_success", nInsertSuccess )
<< std::make_pair( "insert_failed", nInsertFailed )
<< std::make_pair( "delete_success", nDeleteSuccess )
do_test_extract_with( testSet );
analyze( testSet );
}
+
+ template <class Map>
+ void run_feldman();
};
class Set_DelOdd_LF: public Set_DelOdd
namespace set {
- namespace {
- class Set_DelOdd2: public set::Set_DelOdd
- {
- public:
- template <typename Set>
- void run()
- {
- typedef typename Set::traits original_traits;
- struct traits: public original_traits
- {
- enum { hash_size = sizeof(uint32_t) + sizeof(uint16_t) };
- };
-
- typedef typename Set::template rebind_traits< traits >::result set_type;
- run_test_extract< set_type >();
- }
+ template <class Set>
+ void Set_DelOdd::run_feldman()
+ {
+ typedef typename Set::traits original_traits;
+ struct traits: public original_traits {
+ enum { hash_size = sizeof( uint32_t ) + sizeof( uint16_t ) };
};
+ typedef typename Set::template rebind_traits< traits >::result set_type;
- CDSSTRESS_FeldmanHashSet_fixed( Set_DelOdd2, run, key_thread, size_t )
+ run_test_extract<set_type>();
}
+ CDSSTRESS_FeldmanHashSet_fixed( Set_DelOdd, run_feldman, key_thread, size_t )
+
} // namespace set
# verosity=n Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).
# history_size=[0..7], default 2
-race:cds_test::thread::run
-
# DHP
#race:cds::gc::details::retired_ptr::free