project(cds)
-set(PROJECT_VERSION 2.3.0)
+set(PROJECT_VERSION 2.4.0)
# Options
option(WITH_TESTS "Build unit tests" OFF)
Choosing the best value for th timeout is platform and application specific task.
The default values for timeout is provided by \p Traits class that should
\p timeout data member. There are two predefined \p Traits implementation:
- - \p delay_const_traits - defines \p timeout as a constant (enum).
+ - \p delay_const_traits - defines \p timeout as a constant (enum).
To change timeout you should recompile your application.
- - \p delay_runtime_traits - specifies timeout as static data member that can be changed
+ - \p delay_runtime_traits - specifies timeout as static data member that can be changed
at runtime to tune the back-off strategy.
You may use \p Traits template parameter to separate back-off implementations.
/// 32bit
uint32_t operator()( uint32_t x ) const
{
- x = ( ( ( x & 0xaaaaaaaa ) >> 1 ) | ( ( x & 0x55555555 ) << 1 ) );
- x = ( ( ( x & 0xcccccccc ) >> 2 ) | ( ( x & 0x33333333 ) << 2 ) );
- x = ( ( ( x & 0xf0f0f0f0 ) >> 4 ) | ( ( x & 0x0f0f0f0f ) << 4 ) );
- x = ( ( ( x & 0xff00ff00 ) >> 8 ) | ( ( x & 0x00ff00ff ) << 8 ) );
- return( ( x >> 16 ) | ( x << 16 ) );
+ x = ( ( ( x & 0xaaaaaaaa ) >> 1 ) | ( ( x & 0x55555555 ) << 1 ));
+ x = ( ( ( x & 0xcccccccc ) >> 2 ) | ( ( x & 0x33333333 ) << 2 ));
+ x = ( ( ( x & 0xf0f0f0f0 ) >> 4 ) | ( ( x & 0x0f0f0f0f ) << 4 ));
+ x = ( ( ( x & 0xff00ff00 ) >> 8 ) | ( ( x & 0x00ff00ff ) << 8 ));
+ return( ( x >> 16 ) | ( x << 16 ));
}
/// 64bit
uint64_t operator()( uint64_t x ) const
{
- return ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x ) ) ) << 32 ) // low 32bit
- | ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x >> 32 ) ) ) ); // high 32bit
+ return ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x )) ) << 32 ) // low 32bit
+ | ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x >> 32 )) )); // high 32bit
}
};
return ( static_cast<uint32_t>( table[x & 0xff] ) << 24 ) |
( static_cast<uint32_t>( table[( x >> 8 ) & 0xff] ) << 16 ) |
( static_cast<uint32_t>( table[( x >> 16 ) & 0xff] ) << 8 ) |
- ( static_cast<uint32_t>( table[( x >> 24 ) & 0xff] ) );
+ ( static_cast<uint32_t>( table[( x >> 24 ) & 0xff] ));
}
/// 64bit
uint64_t operator()( uint64_t x ) const
{
- return ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x ) ) ) << 32 ) |
- static_cast<uint64_t>( operator()( static_cast<uint32_t>( x >> 32 ) ) );
+ return ( static_cast<uint64_t>( operator()( static_cast<uint32_t>( x )) ) << 32 ) |
+ static_cast<uint64_t>( operator()( static_cast<uint32_t>( x >> 32 )) );
}
};
//@cond
static uint8_t muldiv32_byte( uint8_t b )
{
- return static_cast<uint8_t>( ( ( b * 0x0802LU & 0x22110LU ) | ( b * 0x8020LU & 0x88440LU ) ) * 0x10101LU >> 16 );
+ return static_cast<uint8_t>( ( ( b * 0x0802LU & 0x22110LU ) | ( b * 0x8020LU & 0x88440LU )) * 0x10101LU >> 16 );
}
static uint8_t muldiv64_byte( uint8_t b )
// for 32bit architecture
static uint32_t muldiv32( uint32_t x )
{
- return static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 24 ) ) )
- | ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 16 ) ) ) << 8 )
- | ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 8 ) ) ) << 16 )
- | ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x ) ) ) << 24 );
+ return static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 24 )) )
+ | ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 16 )) ) << 8 )
+ | ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x >> 8 )) ) << 16 )
+ | ( static_cast<uint32_t>( muldiv32_byte( static_cast<uint8_t>( x )) ) << 24 );
}
static uint64_t muldiv32( uint64_t x )
{
- return static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 56 ) ) )
- | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 48 ) ) ) << 8 )
- | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 40 ) ) ) << 16 )
- | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 32 ) ) ) << 24 )
- | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 24 ) ) ) << 32 )
- | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 16 ) ) ) << 40 )
- | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 8 ) ) ) << 48 )
- | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x ) ) ) << 56 );
+ return static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 56 )) )
+ | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 48 )) ) << 8 )
+ | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 40 )) ) << 16 )
+ | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 32 )) ) << 24 )
+ | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 24 )) ) << 32 )
+ | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 16 )) ) << 40 )
+ | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x >> 8 )) ) << 48 )
+ | ( static_cast<uint64_t>( muldiv32_byte( static_cast<uint8_t>( x )) ) << 56 );
}
/// for 64bit architectire
static uint32_t muldiv64( uint32_t x )
{
- return static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 24 ) ) )
- | ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 16 ) ) ) << 8 )
- | ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 8 ) ) ) << 16 )
- | ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x ) ) ) << 24 );
+ return static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 24 )) )
+ | ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 16 )) ) << 8 )
+ | ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x >> 8 )) ) << 16 )
+ | ( static_cast<uint32_t>( muldiv64_byte( static_cast<uint8_t>( x )) ) << 24 );
}
static uint64_t muldiv64( uint64_t x )
{
- return static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 56 ) ) )
- | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 48 ) ) ) << 8 )
- | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 40 ) ) ) << 16 )
- | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 32 ) ) ) << 24 )
- | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 24 ) ) ) << 32 )
- | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 16 ) ) ) << 40 )
- | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 8 ) ) ) << 48 )
- | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x ) ) ) << 56 );
+ return static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 56 )) )
+ | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 48 )) ) << 8 )
+ | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 40 )) ) << 16 )
+ | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 32 )) ) << 24 )
+ | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 24 )) ) << 32 )
+ | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 16 )) ) << 40 )
+ | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x >> 8 )) ) << 48 )
+ | ( static_cast<uint64_t>( muldiv64_byte( static_cast<uint8_t>( x )) ) << 56 );
}
//@endcond
return true;
}
- bool ret = m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds ) ) == std::cv_status::no_timeout;
+ bool ret = m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout;
m_wakeup = false;
return ret;
}
return true;
}
- bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds ) ) == std::cv_status::no_timeout;
+ bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout;
m_wakeup = false;
return ret;
}
return true;
}
- bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds ) ) == std::cv_status::no_timeout;
+ bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout;
rec.m_wakeup = false;
return ret;
}
public:
/// Initializises the splitter with reference to \p h and zero start bit offset
explicit split_bitstring( bitstring const& h )
- : cur_( reinterpret_cast<uint8_t const*>( &h ) )
+ : cur_( reinterpret_cast<uint8_t const*>( &h ))
, offset_( 0 )
, first_( cur_ )
, last_( cur_ + c_bitstring_size )
split_bitstring( bitstring const& h, size_t nBitOffset )
: cur_( reinterpret_cast<uint8_t const*>( &h ) + nBitOffset / c_nBitPerByte )
, offset_( nBitOffset % c_nBitPerByte )
- , first_( reinterpret_cast<uint8_t const*>( &h ) )
+ , first_( reinterpret_cast<uint8_t const*>( &h ))
, last_( first_ + c_bitstring_size )
{}
*/
uint_type cut( unsigned count )
{
- assert( !eos() );
+ assert( !eos());
uint_type result = 0;
# if defined( CDS_ARCH_LITTLE_ENDIAN )
public:
/// Initializises the splitter with reference to \p h and zero start bit offset
explicit byte_splitter( bitstring const& h )
- : cur_( reinterpret_cast<uint8_t const*>( &h ) )
+ : cur_( reinterpret_cast<uint8_t const*>( &h ))
, first_( cur_ )
, last_( cur_ + c_bitstring_size )
{}
/// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset
byte_splitter( bitstring const& h, size_t nBitOffset )
: cur_( reinterpret_cast<uint8_t const*>( &h ) + nBitOffset / c_nBitPerByte )
- , first_( reinterpret_cast<uint8_t const*>( &h ) )
+ , first_( reinterpret_cast<uint8_t const*>( &h ))
, last_( first_ + c_bitstring_size )
{
assert( is_correct( static_cast<unsigned>( nBitOffset )));
*/
uint_type cut( unsigned count )
{
- assert( !eos() );
- assert( is_correct( count ) );
+ assert( !eos());
+ assert( is_correct( count ));
uint_type result = 0;
*/
int_type cut( unsigned count )
{
- assert( !eos() );
+ assert( !eos());
assert( is_correct( count ));
int_type result = ( number_ >> shift_ ) & (( 1 << count ) - 1 );
*/
int_type safe_cut( unsigned count )
{
- if ( eos() )
+ if ( eos())
return 0;
- unsigned rest = static_cast<unsigned>( rest_count() );
+ unsigned rest = static_cast<unsigned>( rest_count());
if ( rest < count )
count = rest;
return count ? cut( count ) : 0;
// Sanitizer attributes
// Example: CDS_DISABLE_SANITIZE( "function" )
#ifdef CDS_ADDRESS_SANITIZER_ENABLED
-# define CDS_SUPPRESS_SANITIZE( ... ) __attribute__(( no_sanitize( __VA_ARGS__ ) ))
+# define CDS_SUPPRESS_SANITIZE( ... ) __attribute__(( no_sanitize( __VA_ARGS__ )))
#else
# define CDS_SUPPRESS_SANITIZE( ... )
#endif
// double-width CAS support
// note: gcc-4.8 does not support double-word atomics
// gcc-4.9: a lot of crashes when use DCAS
-// gcc-7: 128-bit atomic is not lock-free, see https://gcc.gnu.org/ml/gcc/2017-01/msg00167.html
+// gcc-7: 128-bit atomic is not lock-free, see https://gcc.gnu.org/ml/gcc/2017-01/msg00167.html
// You can manually suppress wide-atomic support by defining in compiler command line:
// for 64bit platform: -DCDS_DISABLE_128BIT_ATOMIC
// for 32bit platform: -DCDS_DISABLE_64BIT_ATOMIC
return pMem;
}
else
- pMem = reinterpret_cast<unsigned char *>( node_allocator_type().allocate( 1 ) );
+ pMem = reinterpret_cast<unsigned char *>( node_allocator_type().allocate( 1 ));
return pMem;
}
if ( !pChild ) {
// Found min/max
- if ( pNode->is_valued( memory_model::memory_order_acquire ) ) {
+ if ( pNode->is_valued( memory_model::memory_order_acquire )) {
int result = try_remove_node( pParent, pNode, nVersion, func, disp );
if ( result == update_flags::result_removed )
int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire );
int balance = hLL - hLRL;
- if ( balance >= -1 && balance <= 1 && !( ( hLL == 0 || hLRL == 0 ) && !pLeft->is_valued( memory_model::memory_order_relaxed ) ) ) {
+ if ( balance >= -1 && balance <= 1 && !( ( hLL == 0 || hLRL == 0 ) && !pLeft->is_valued( memory_model::memory_order_relaxed )) ) {
// nParent.child.left won't be damaged after a double rotation
return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
}
// rotate right
return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
}
-
+
return pNode; // retry
}
node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
int balance = hRR - hRLR;
- if ( balance >= -1 && balance <= 1 && !( ( hRR == 0 || hRLR == 0 ) && !pRight->is_valued( memory_model::memory_order_relaxed ) ) )
+ if ( balance >= -1 && balance <= 1 && !( ( hRR == 0 || hRLR == 0 ) && !pRight->is_valued( memory_model::memory_order_relaxed )) )
return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
}
erase_at( Iterator const& iter )
#endif
{
- assert( iter != end() );
+ assert( iter != end());
assert( iter.bucket() != nullptr );
if ( iter.bucket()->erase_at( iter.underlying_iterator())) {
erase_at( Iterator const& iter )
#endif
{
- assert( iter != end() );
+ assert( iter != end());
assert( iter.bucket() != nullptr );
if ( iter.bucket()->erase_at( iter.underlying_iterator())) {
Ring buffer is a bounded queue. Additionally, \p %WeakRingBuffer supports batch operations -
you can push/pop an array of elements.
- There are a specialization \ref cds_nonintrusive_WeakRingBuffer_void "WeakRingBuffer<void, Traits>"
- that is not a queue but a "memory pool" between producer and consumer threads.
+ There are a specialization \ref cds_nonintrusive_WeakRingBuffer_void "WeakRingBuffer<void, Traits>"
+ that is not a queue but a "memory pool" between producer and consumer threads.
\p WeakRingBuffer<void> supports variable-sized data.
@warning: \p %WeakRingBuffer is developed for 64-bit architecture.
\code
cds::container::WeakRingBuffer<std::string> ringbuf;
char const* arr[10];
- ringbuf.push( arr, 10,
+ ringbuf.push( arr, 10,
[]( std::string& element, char const* src ) {
new( &element ) std::string( src );
});
template <typename Q, typename CopyFunc>
bool push( Q* arr, size_t count, CopyFunc copy )
{
- assert( count < capacity() );
+ assert( count < capacity());
counter_type back = back_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( back - pfront_ ) <= capacity() );
+ assert( static_cast<size_t>( back - pfront_ ) <= capacity());
if ( static_cast<size_t>( pfront_ + capacity() - back ) < count ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
{
counter_type back = back_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( back - pfront_ ) <= capacity() );
+ assert( static_cast<size_t>( back - pfront_ ) <= capacity());
if ( pfront_ + capacity() - back < 1 ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
{
counter_type back = back_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( back - pfront_ ) <= capacity() );
+ assert( static_cast<size_t>( back - pfront_ ) <= capacity());
if ( pfront_ + capacity() - back < 1 ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
template <typename Q, typename CopyFunc>
bool pop( Q* arr, size_t count, CopyFunc copy )
{
- assert( count < capacity() );
+ assert( count < capacity());
counter_type front = front_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( cback_ - front ) < capacity() );
+ assert( static_cast<size_t>( cback_ - front ) < capacity());
if ( static_cast<size_t>( cback_ - front ) < count ) {
cback_ = back_.load( memory_model::memory_order_acquire );
bool dequeue_with( Func f )
{
counter_type front = front_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( cback_ - front ) < capacity() );
+ assert( static_cast<size_t>( cback_ - front ) < capacity());
if ( cback_ - front < 1 ) {
cback_ = back_.load( memory_model::memory_order_acquire );
value_type* front()
{
counter_type front = front_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( cback_ - front ) < capacity() );
+ assert( static_cast<size_t>( cback_ - front ) < capacity());
if ( cback_ - front < 1 ) {
cback_ = back_.load( memory_model::memory_order_acquire );
bool pop_front()
{
counter_type front = front_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( cback_ - front ) <= capacity() );
+ assert( static_cast<size_t>( cback_ - front ) <= capacity());
if ( cback_ - front < 1 ) {
cback_ = back_.load( memory_model::memory_order_acquire );
void clear()
{
value_type v;
- while ( pop( v ) );
+ while ( pop( v ));
}
/// Checks if the ring-buffer is empty
/// [producer] Reserve \p size bytes
/**
- The function returns a pointer to reserved buffer of \p size bytes.
+ The function returns a pointer to reserved buffer of \p size bytes.
If no enough space in the ring buffer the function returns \p nullptr.
After successful \p %back() you should fill the buffer provided and call \p push_back():
size_t real_size = calc_real_size( size );
// check if we can reserve read_size bytes
- assert( real_size < capacity() );
+ assert( real_size < capacity());
counter_type back = back_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( back - pfront_ ) <= capacity() );
+ assert( static_cast<size_t>( back - pfront_ ) <= capacity());
if ( static_cast<size_t>( pfront_ + capacity() - back ) < real_size ) {
pfront_ = front_.load( memory_model::memory_order_acquire );
size_t tail_size = capacity() - static_cast<size_t>( buffer_.mod( back ));
if ( tail_size < real_size ) {
// make unused tail
- assert( tail_size >= sizeof( size_t ) );
- assert( !is_tail( tail_size ) );
+ assert( tail_size >= sizeof( size_t ));
+ assert( !is_tail( tail_size ));
*reinterpret_cast<size_t*>( reserved ) = make_tail( tail_size - sizeof(size_t));
back += tail_size;
// reserve and store size
*reinterpret_cast<size_t*>( reserved ) = size;
- return reinterpret_cast<void*>( reserved + sizeof( size_t ) );
+ return reinterpret_cast<void*>( reserved + sizeof( size_t ));
}
/// [producer] Push reserved bytes into ring
counter_type back = back_.load( memory_model::memory_order_relaxed );
uint8_t* reserved = buffer_.buffer() + buffer_.mod( back );
- size_t real_size = calc_real_size( *reinterpret_cast<size_t*>( reserved ) );
- assert( real_size < capacity() );
+ size_t real_size = calc_real_size( *reinterpret_cast<size_t*>( reserved ));
+ assert( real_size < capacity());
back_.store( back + real_size, memory_model::memory_order_release );
}
std::pair<void*, size_t> front()
{
counter_type front = front_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( cback_ - front ) < capacity() );
+ assert( static_cast<size_t>( cback_ - front ) < capacity());
if ( cback_ - front < sizeof( size_t )) {
cback_ = back_.load( memory_model::memory_order_acquire );
- if ( cback_ - front < sizeof( size_t ) )
+ if ( cback_ - front < sizeof( size_t ))
return std::make_pair( nullptr, 0u );
}
uint8_t * buf = buffer_.buffer() + buffer_.mod( front );
// check alignment
- assert( ( reinterpret_cast<uintptr_t>( buf ) & ( sizeof( uintptr_t ) - 1 ) ) == 0 );
+ assert( ( reinterpret_cast<uintptr_t>( buf ) & ( sizeof( uintptr_t ) - 1 )) == 0 );
size_t size = *reinterpret_cast<size_t*>( buf );
- if ( is_tail( size ) ) {
+ if ( is_tail( size )) {
// unused tail, skip
- CDS_VERIFY( pop_front() );
+ CDS_VERIFY( pop_front());
front = front_.load( memory_model::memory_order_relaxed );
buf = buffer_.buffer() + buffer_.mod( front );
size = *reinterpret_cast<size_t*>( buf );
- assert( !is_tail( size ) );
- assert( buf == buffer_.buffer() );
+ assert( !is_tail( size ));
+ assert( buf == buffer_.buffer());
}
#ifdef _DEBUG
bool pop_front()
{
counter_type front = front_.load( memory_model::memory_order_relaxed );
- assert( static_cast<size_t>( cback_ - front ) <= capacity() );
+ assert( static_cast<size_t>( cback_ - front ) <= capacity());
- if ( cback_ - front < sizeof(size_t) ) {
+ if ( cback_ - front < sizeof(size_t)) {
cback_ = back_.load( memory_model::memory_order_acquire );
- if ( cback_ - front < sizeof( size_t ) )
+ if ( cback_ - front < sizeof( size_t ))
return false;
}
uint8_t * buf = buffer_.buffer() + buffer_.mod( front );
// check alignment
- assert( ( reinterpret_cast<uintptr_t>( buf ) & ( sizeof( uintptr_t ) - 1 ) ) == 0 );
+ assert( ( reinterpret_cast<uintptr_t>( buf ) & ( sizeof( uintptr_t ) - 1 )) == 0 );
size_t size = *reinterpret_cast<size_t*>( buf );
size_t real_size = calc_real_size( untail( size ));
/// [consumer] Clears the ring buffer
void clear()
{
- for ( auto el = front(); el.first; el = front() )
+ for ( auto el = front(); el.first; el = front())
pop_front();
}
size_t real_size = (( size + sizeof( uintptr_t ) - 1 ) & ~( sizeof( uintptr_t ) - 1 )) + sizeof( size_t );
assert( real_size > size );
- assert( real_size - size >= sizeof( size_t ) );
+ assert( real_size - size >= sizeof( size_t ));
return real_size;
}
static size_t untail( size_t size )
{
- return size & (( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 ) ) - 1);
+ return size & (( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 )) - 1);
}
//@endcond
- \p boost.thread (thread-loal storage support), boost.system
- \p google-test
+ Some parts of libcds may depend on DCAS (double-width compare-and-swap) atomic primitive if
+ the target architecture supports it. For x86, cmake build script enables -mcx16 compiler flag that
+ switches DCAS support on. You may manually disable DCAS support with the following command line flags
+ in GCC/clang (for MS VC++ compiler DCAS is not supported):
+ - \p -DCDS_DISABLE_128BIT_ATOMIC - for 64bit build
+ - \p -DCDS_DISABLE_64BIT_ATOMIC - for 32bit build
+
+ @warning All your projects AND libcds MUST be compiled with the same flags - either with DCAS support or without it.
+
\par Windows build
Prerequisites: for building <b>cds</b> library and test suite you need:
throws the \p exception.
If exception is disabled, \p %throw_exception() prints an exception message to
- standard output and call \p abort().
+ standard output and call \p abort().
You can supply your own \p %cds::throw_exception() function;
for that you should specify \p -DCDS_USER_DEFINED_THROW_EXCEPTION
- in compiler command line.
+ in compiler command line.
@note \p %throw_exception() never returns. If the user-defined \p %throw_exception() returns,
the behavior is undefined.
template <typename E>
CDS_NORETURN static inline void throw_exception( E&& exception, char const* file, int line )
{
- printf( "file %s, line %d: %s\n", file, line, exception.what() );
+ printf( "file %s, line %d: %s\n", file, line, exception.what());
abort();
}
#endif
template <typename T>
T* get_as() const noexcept
{
- return reinterpret_cast<T*>( get() );
+ return reinterpret_cast<T*>( get());
}
template <typename T>
guard* operator[]( size_t idx ) const noexcept
{
- assert( idx < capacity() );
+ assert( idx < capacity());
return arr_[idx];
}
void clear( size_t idx ) noexcept
{
- assert( idx < capacity() );
+ assert( idx < capacity());
assert( arr_[idx] != nullptr );
arr_[idx]->clear();
guard* release( size_t idx ) noexcept
{
- assert( idx < capacity() );
+ assert( idx < capacity());
guard* g = arr_[idx];
arr_[idx] = nullptr;
void reset( size_t idx, guard* g ) noexcept
{
- assert( idx < capacity() );
+ assert( idx < capacity());
assert( arr_[idx] == nullptr );
arr_[idx] = g;
//@cond
/// Per-thread hazard pointer storage
- class thread_hp_storage
+ class thread_hp_storage
{
friend class smr;
public:
{
assert( current_block_ != nullptr );
assert( current_block_->first() <= current_cell_ );
- assert( current_cell_ < current_block_->last() );
+ assert( current_cell_ < current_block_->last());
//assert( &p != current_cell_ );
*current_cell_ = p;
CDS_HPSTAT( ++retire_call_count_ );
- if ( ++current_cell_ == current_block_->last() ) {
+ if ( ++current_cell_ == current_block_->last()) {
// goto next block if exists
if ( current_block_->next_ ) {
current_block_ = current_block_->next_;
}
bool repush( retired_ptr* p ) CDS_NOEXCEPT
- {
+ {
bool ret = push( *p );
CDS_HPSTAT( --retire_call_count_ );
assert( ret );
{
assert( list_head_ != nullptr );
assert( current_block_ == list_tail_ );
- assert( current_cell_ == current_block_->last() );
+ assert( current_cell_ == current_block_->last());
retired_block* block = retired_allocator::instance().alloc();
assert( block->next_ == nullptr );
- list_tail_ = list_tail_->next_ = block;
+ current_block_ = list_tail_ = list_tail_->next_ = block;
current_cell_ = block->first();
++block_count_;
CDS_HPSTAT( ++extend_call_count_ );
free_count =
scan_count =
help_scan_count =
- thread_rec_count =
- hp_block_count =
- retired_block_count =
- hp_extend_count =
+ thread_rec_count =
+ hp_block_count =
+ retired_block_count =
+ hp_extend_count =
retired_extend_count = 0;
}
};
assert( instance_ != nullptr );
# else
if ( !instance_ )
- CDS_THROW_EXCEPTION( not_initialized() );
+ CDS_THROW_EXCEPTION( not_initialized());
# endif
return *instance_;
}
%DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation"
+ @note Internally, %DHP depends on free-list implementation. There are
+ DCAS-based free-list \p cds::intrusive::TaggedFreeList and more complicated CAS-based free-list
+ \p cds::intrusive::FreeList. For x86 architecture and GCC/clang, libcds selects appropriate free-list
+ based on \p -mcx16 compiler flag. You may manually disable DCAS support specifying
+ \p -DCDS_DISABLE_128BIT_ATOMIC for 64bit build or \p -DCDS_DISABLE_64BIT_ATOMIC for 32bit build
+ in compiler command line. All your projects and libcds MUST be compiled with the same flags -
+ either with DCAS support or without it.
+ For MS VC++ compiler DCAS is not supported.
+
See \ref cds_how_to_use "How to use" section for details how to apply SMR.
*/
class DHP
public:
/// Default ctor allocates a guard (hazard pointer) from thread-private storage
Guard() CDS_NOEXCEPT
- : guard_( dhp::smr::tls()->hazards_.alloc() )
+ : guard_( dhp::smr::tls()->hazards_.alloc())
{}
/// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
template <typename T>
T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
T pRet;
do {
template <typename T, class Func>
T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
T pRet;
do {
template <typename T>
T * assign( size_t nIndex, T * p )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
guards_.set( nIndex, p );
dhp::smr::tls()->sync();
template <typename T>
T * get( size_t nIndex ) const
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return guards_[nIndex]->template get_as<T>();
}
/// Get native guarded pointer stored
guarded_pointer get_native( size_t nIndex ) const
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return guards_[nIndex]->get();
}
value_type * operator ->() const CDS_NOEXCEPT
{
assert( !empty());
- return value_cast()( guard_->get_as<guarded_type>() );
+ return value_cast()( guard_->get_as<guarded_type>());
}
/// Returns a reference to guarded value
value_type& operator *() CDS_NOEXCEPT
{
assert( !empty());
- return *value_cast()( guard_->get_as<guarded_type>() );
+ return *value_cast()( guard_->get_as<guarded_type>());
}
/// Returns const reference to guarded value
static void retire( T * p, void (* func)(void *))
{
dhp::thread_data* rec = dhp::smr::tls();
- if ( !rec->retired_.push( dhp::retired_ptr( p, func ) ) )
+ if ( !rec->retired_.push( dhp::retired_ptr( p, func )) )
dhp::smr::instance().scan( rec );
}
*/
static void scan()
{
- dhp::smr::instance().scan( dhp::smr::tls() );
+ dhp::smr::instance().scan( dhp::smr::tls());
}
/// Synonym for \p scan()
# ifdef CDS_DISABLE_SMR_EXCEPTION
assert( !full());
# else
- if ( full() )
+ if ( full())
CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
# endif
guard* g = free_head_;
void free( guard* g ) CDS_NOEXCEPT
{
- assert( g >= array_ && g < array_ + capacity() );
+ assert( g >= array_ && g < array_ + capacity());
if ( g ) {
g->clear();
guard& operator[]( size_t idx )
{
- assert( idx < capacity() );
+ assert( idx < capacity());
return array_[idx];
}
/// \p smr::scan() strategy
enum scan_type {
- classic, ///< classic scan as described in Michael's works (see smr::classic_scan() )
- inplace ///< inplace scan without allocation (see smr::inplace_scan() )
+ classic, ///< classic scan as described in Michael's works (see smr::classic_scan())
+ inplace ///< inplace scan without allocation (see smr::inplace_scan())
};
//@cond
# ifdef CDS_DISABLE_SMR_EXCEPTION
assert( false ); // not enough hazard ptr
# else
- CDS_THROW_EXCEPTION( not_enought_hazard_ptr() );
+ CDS_THROW_EXCEPTION( not_enought_hazard_ptr());
# endif
}
}
@warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
*/
Guard()
- : guard_( hp::smr::tls()->hazards_.alloc() )
+ : guard_( hp::smr::tls()->hazards_.alloc())
{}
/// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
template <typename T>
T * assign( size_t nIndex, T * p )
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
guards_.set( nIndex, p );
hp::smr::tls()->sync();
template <typename T>
T * get( size_t nIndex ) const
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return guards_[nIndex]->template get_as<T>();
}
bool expand_slot( traverse_data& pos, node_ptr current)
{
- assert( !pos.splitter.eos() );
+ assert( !pos.splitter.eos());
return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset());
}
return false;
}
- typename hash_splitter::uint_type idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut(
+ typename hash_splitter::uint_type idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut(
static_cast<unsigned>( m_Metrics.array_node_size_log ));
pArr->nodes[idx].store(current, memory_model::memory_order_release);
\p c_nUpperBound must be no more than 32.
- <tt>random_generator()</tt> - the constructor of generator object initialises the generator instance (its internal state).
- <tt>unsigned int operator()()</tt> - the main generating function. Returns random level from range <tt>[0 .. c_nUpperBound - 1]</tt>
-
+
Stateful generators are supported.
- \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
- \p skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift,
- \p skip_list::turbo32 (the default) or user-provided one.
+ \p skip_list::turbo32 (the default) or user-provided one.
See \p skip_list::random_level_generator option description for explanation.
- \p opt::allocator - although the skip-list is an intrusive container,
an allocator should be provided to maintain variable randomly-calculated height of the node
if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
// alloc next free node from m_auxNode
size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
- if ( idx < capacity() ) {
- CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) );
+ if ( idx < capacity()) {
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ));
return new( &m_auxNode[idx] ) aux_node_type();
}
}
if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) {
size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
if ( idx < m_metrics.nSegmentSize ) {
- CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) );
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ));
return new( aux_segment->segment() + idx ) aux_node_type();
}
}
new_aux_segment->next_segment = aux_segment;
new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
- if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) {
- CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) );
- return new( new_aux_segment->segment() ) aux_node_type();
+ if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire )) {
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ));
+ return new( new_aux_segment->segment()) aux_node_type();
}
free_aux_segment( new_aux_segment );
aux_node_segment* allocate_aux_segment()
{
char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
- CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) );
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ));
return new(p) aux_node_segment();
}
}
static void free_update_desc_void( void* pDesc )
{
- free_update_desc( reinterpret_cast<update_desc*>( pDesc ) );
+ free_update_desc( reinterpret_cast<update_desc*>( pDesc ));
}
class retired_list
return false;
}
- if ( !pos.splitter.eos() ) {
+ if ( !pos.splitter.eos()) {
// the slot must be expanded
base_class::expand_slot( pos, slot );
}
}
if ( bInsert ) {
- if ( !pos.splitter.eos() ) {
+ if ( !pos.splitter.eos()) {
// the slot must be expanded
base_class::expand_slot( pos, slot );
}
*/
bool erase_at( iterator const& iter )
{
- assert( iter != end() );
+ assert( iter != end());
- marked_data_ptr val( iter.data() );
- if ( iter.m_pNode->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ marked_data_ptr val( iter.data());
+ if ( iter.m_pNode->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
--m_ItemCounter;
- retire_data( val.ptr() );
+ retire_data( val.ptr());
m_Stat.onEraseSuccess();
return true;
}
erase_at( Iterator const& iter )
#endif
{
- assert( iter != end() );
+ assert( iter != end());
assert( iter.bucket() != nullptr );
if ( iter.bucket()->erase_at( iter.underlying_iterator())) {
erase_at( Iterator const& iter )
#endif
{
- assert( iter != end() );
+ assert( iter != end());
if ( m_List.erase_at( iter.underlying_iterator())) {
--m_ItemCounter;
p->m_nHash = nHash;
CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
# ifdef CDS_DEBUG
- cds_assert( !p->m_busy.load( atomics::memory_order_acquire ) );
+ cds_assert( !p->m_busy.load( atomics::memory_order_acquire ));
p->m_busy.store( true, atomics::memory_order_release );
# endif
}
void free_aux_node( aux_node_type * p )
{
# ifdef CDS_DEBUG
- cds_assert( p->m_busy.load( atomics::memory_order_acquire ) );
+ cds_assert( p->m_busy.load( atomics::memory_order_acquire ));
p->m_busy.store( false, atomics::memory_order_release );
# endif
scoped_resize_lock al( m_MutexPolicy );
if ( al.success()) {
- if ( nOldCapacity != bucket_count( atomics::memory_order_acquire ) ) {
+ if ( nOldCapacity != bucket_count( atomics::memory_order_acquire )) {
// someone resized already
return;
}
, m_nBucketMask( c_nMinimalCapacity - 1 )
, m_MutexPolicy( c_nMinimalCapacity )
{
- alloc_bucket_table( bucket_count() );
+ alloc_bucket_table( bucket_count());
}
/// Ctor with initial capacity specified
)
: m_Buckets( nullptr )
, m_nBucketMask( calc_init_capacity(nCapacity) - 1 )
- , m_MutexPolicy( bucket_count() )
+ , m_MutexPolicy( bucket_count())
{
- alloc_bucket_table( bucket_count() );
+ alloc_bucket_table( bucket_count());
}
/// Ctor with resizing policy (copy semantics)
)
: m_Buckets( nullptr )
, m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 )
- , m_MutexPolicy( bucket_count() )
+ , m_MutexPolicy( bucket_count())
, m_ResizingPolicy( resizingPolicy )
{
- alloc_bucket_table( bucket_count() );
+ alloc_bucket_table( bucket_count());
}
/// Ctor with resizing policy (move semantics)
)
: m_Buckets( nullptr )
, m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 )
- , m_MutexPolicy( bucket_count() )
+ , m_MutexPolicy( bucket_count())
, m_ResizingPolicy( std::forward<resizing_policy>( resizingPolicy ))
{
- alloc_bucket_table( bucket_count() );
+ alloc_bucket_table( bucket_count());
}
/// Destructor destroys internal data
~StripedSet()
{
- free_bucket_table( m_Buckets, bucket_count() );
+ free_bucket_table( m_Buckets, bucket_count());
}
public:
}
// The pool is empty
- CDS_THROW_EXCEPTION( std::bad_alloc() );
+ CDS_THROW_EXCEPTION( std::bad_alloc());
}
ok:
assert( cds::beans::is_power2( nAlign ));
pointer p = reinterpret_cast<T *>( cds::OS::aligned_malloc( sizeof(T) * nCount, nAlign ));
if ( !p )
- CDS_THROW_EXCEPTION( std::bad_alloc() );
+ CDS_THROW_EXCEPTION( std::bad_alloc());
assert( cds::details::is_aligned( p, nAlign ));
return p;
}
/// Unlock the spin-lock
void unlock() CDS_NOEXCEPT
{
- assert( is_taken( OS::get_current_thread_id() ));
+ assert( is_taken( OS::get_current_thread_id()));
integral_type n = m_spin.load( atomics::memory_order_relaxed );
if ( n > 1 )
/// Change the owner of locked spin-lock. May be called by thread that owns spin-lock
void change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
{
- assert( is_taken( OS::get_current_thread_id() ));
+ assert( is_taken( OS::get_current_thread_id()));
assert( newOwnerId != OS::c_NullThreadId );
m_OwnerId = newOwnerId;
static void init()
{
pthread_error_code nErr;
- if ( ( nErr = pthread_key_create( &m_key, key_destructor ) ) != 0 )
+ if ( ( nErr = pthread_key_create( &m_key, key_destructor )) != 0 )
CDS_THROW_EXCEPTION( pthread_exception( nErr, "pthread_key_create" ));
}
static void fini()
{
pthread_error_code nErr;
- if ( ( nErr = pthread_key_delete( m_key ) ) != 0 )
+ if ( ( nErr = pthread_key_delete( m_key )) != 0 )
CDS_THROW_EXCEPTION( pthread_exception( nErr, "pthread_key_delete" ));
}
{
pthread_error_code nErr;
ThreadData * pData = new ThreadData;
- if ( ( nErr = pthread_setspecific( m_key, pData ) ) != 0 )
+ if ( ( nErr = pthread_setspecific( m_key, pData )) != 0 )
CDS_THROW_EXCEPTION( pthread_exception( nErr, "pthread_setspecific" ));
}
static void free()
assert( pData );
if ( pData ) {
- if ( pData->fini() )
+ if ( pData->fini())
_threadData( do_detachThread );
}
else
static void init()
{
if ( m_key == TLS_OUT_OF_INDEXES ) {
- if ( ( m_key = ::TlsAlloc() ) == TLS_OUT_OF_INDEXES )
+ if ( ( m_key = ::TlsAlloc()) == TLS_OUT_OF_INDEXES )
CDS_THROW_EXCEPTION( api_exception( ::GetLastError(), "TlsAlloc" ));
}
}
{
api_error_code nErr;
void * pData = ::TlsGetValue( m_key );
- if ( pData == nullptr && ( nErr = ::GetLastError() ) != ERROR_SUCCESS )
+ if ( pData == nullptr && ( nErr = ::GetLastError()) != ERROR_SUCCESS )
CDS_THROW_EXCEPTION( api_exception( nErr, "TlsGetValue" ));
return reinterpret_cast<ThreadData *>( pData );
}
assert( (tmp & rcu_class::c_nNestMask) > 0 );
#if CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30800
- // CLang 3.6-3.7: some tests of intrusive::FeldmanHashSet based on general-purpose RCU
+ // CLang 3.6-3.7: some tests of intrusive::FeldmanHashSet based on general-purpose RCU
// are failed even in single-threaded mode (unit tests) without magic compiler barrier below
CDS_COMPILER_RW_BARRIER;
#endif
bool synchronize( epoch_retired_ptr& ep )
{
uint64_t nEpoch;
- atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
std::unique_lock<lock_type> sl( m_Lock );
if ( ep.m_p && m_Buffer.push( ep ))
flip_and_wait();
}
clear_buffer( nEpoch );
- atomics::atomic_thread_fence( atomics::memory_order_release );
return true;
}
//@endcond
bool synchronize( epoch_retired_ptr& ep )
{
uint64_t nEpoch;
- atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
std::unique_lock<lock_type> sl( m_Lock );
if ( ep.m_p && m_Buffer.push( ep ) && m_Buffer.size() < capacity())
2.4.0
+ - issue #81: bug in gc::DHP when extending thread's retired array,
+ thanks to gtroxler (https://github.com/gtroxler)
2.3.0 31.07.2017
General release
# This could be handy for archiving the generated documentation or
# if some version control system is used.
-PROJECT_NUMBER = 2.3.0
+PROJECT_NUMBER = 2.4.0
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
# base path where the generated documentation will be put.
- *nix: [use CMake](build/cmake/readme.md)\r
- Windows: use MS Visual C++ 2015 project\r
\r
+Some parts of libcds may depend on DCAS (double-width compare-and-swap) atomic primitive if\r
+the target architecture supports it. For x86, cmake build script enables `-mcx16` compiler flag that\r
+switches DCAS support on. You may manually disable DCAS support with the following command line flags\r
+in GCC/clang (for MS VC++ compiler DCAS is not supported):\r
+ - `-DCDS_DISABLE_128BIT_ATOMIC` - for 64bit build\r
+ - `-DCDS_DISABLE_64BIT_ATOMIC` - for 32bit build\r
+\r
+**All your projects AND libcds MUST be compiled with the same flags - either with DCAS support or without it.**\r
+ \r
+ \r
**Pull request requirements**\r
- Pull-request to *master* branch will be unconditionally rejected\r
- *integration* branch is intended for pull-request. Usually, *integration* branch is the same as *master*\r
else {
// allocate new block
gb = new( s_alloc_memory( sizeof( guard_block ) + sizeof( guard ) * defaults::c_extended_guard_block_size )) guard_block;
- new ( gb->first() ) guard[defaults::c_extended_guard_block_size];
+ new ( gb->first()) guard[defaults::c_extended_guard_block_size];
CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed ));
}
CDS_EXPORT_API retired_allocator::~retired_allocator()
{
- while ( retired_block* rb = static_cast<retired_block*>( free_list_.get() ) ) {
+ while ( retired_block* rb = static_cast<retired_block*>( free_list_.get()) ) {
rb->~retired_block();
s_free_memory( rb );
}
// allocate new block
rb = new( s_alloc_memory( sizeof( retired_block ) + sizeof( retired_ptr ) * retired_block::c_capacity )) retired_block;
new ( rb->first()) retired_ptr[retired_block::c_capacity];
- CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed ) );
+ CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed ));
}
rb->next_ = nullptr;
CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id(); )
- CDS_HPSTAT( statistics( s_postmortem_stat ) );
+ CDS_HPSTAT( statistics( s_postmortem_stat ));
thread_record* pHead = thread_list_.load( atomics::memory_order_relaxed );
thread_list_.store( nullptr, atomics::memory_order_release );
thread_data* rec = tls_;
if ( rec ) {
tls_ = nullptr;
- instance().free_thread_data( static_cast<thread_record*>( rec ) );
+ instance().free_thread_data( static_cast<thread_record*>( rec ));
}
}
char* mem = reinterpret_cast<char*>( s_alloc_memory( sizeof( thread_record ) + guard_array_size ));
return new( mem ) thread_record(
- reinterpret_cast<guard*>( mem + sizeof( thread_record ) ), initial_hazard_count_
+ reinterpret_cast<guard*>( mem + sizeof( thread_record )), initial_hazard_count_
);
}
const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
// First try to reuse a free (non-active) DHP record
- for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_acquire ) ) {
+ for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_acquire )) {
cds::OS::ThreadId thId = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_relaxed, atomics::memory_order_relaxed ) )
+ if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_relaxed, atomics::memory_order_relaxed ))
continue;
hprec->m_bFree.store( false, atomics::memory_order_release );
break;
}
-
+
if ( !hprec ) {
// No HP records available for reuse
// Allocate and push a new HP record
thread_record* pOldHead = thread_list_.load( atomics::memory_order_acquire );
do {
hprec->m_pNextNode.store( pOldHead, atomics::memory_order_release );
- } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ) );
+ } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ));
}
hprec->hazards_.init();
scan( pRec );
help_scan( pRec );
- if ( pRec->retired_.empty() ) {
+ if ( pRec->retired_.empty()) {
pRec->retired_.fini();
pRec->m_bFree.store( true, std::memory_order_release );
}
if ( pNode->m_idOwner.load( std::memory_order_relaxed ) != cds::OS::c_NullThreadId ) {
copy_hazards( plist, pNode->hazards_.array_, pNode->hazards_.initial_capacity_ );
- for ( guard_block* block = pNode->hazards_.extended_list_.load( atomics::memory_order_acquire );
+ for ( guard_block* block = pNode->hazards_.extended_list_.load( atomics::memory_order_acquire );
block;
- block = block->next_block_.load( atomics::memory_order_acquire ) )
+ block = block->next_block_.load( atomics::memory_order_acquire ))
{
copy_hazards( plist, block->first(), defaults::c_extended_guard_block_size );
}
last_plist_size_.compare_exchange_weak( plist_size, plist.size(), std::memory_order_relaxed, std::memory_order_relaxed );
// Sort plist to simplify search in
- std::sort( plist.begin(), plist.end() );
+ std::sort( plist.begin(), plist.end());
// Stage 2: Search plist
size_t free_count = 0;
CDS_HPSTAT( pRec->free_call_count_ += free_count );
// If the count of freed elements is too small, increase retired array
- if ( free_count < retired_count / 4 && last_block == pRec->retired_.list_tail_ && last_block_cell == last_block->last() )
+ if ( free_count < retired_count / 4 && last_block == pRec->retired_.list_tail_ && last_block_cell == last_block->last())
pRec->retired_.extend();
}
CDS_EXPORT_API void smr::help_scan( thread_data* pThis )
{
- assert( static_cast<thread_record*>( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id() );
+ assert( static_cast<thread_record*>( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id());
CDS_HPSTAT( ++pThis->help_scan_call_count_ );
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
- for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) )
+ for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ))
{
if ( hprec == static_cast<thread_record*>( pThis ))
continue;
// If m_bFree == true then hprec->retired_ is empty - we don't need to see it
- if ( hprec->m_bFree.load( atomics::memory_order_acquire ) ) {
+ if ( hprec->m_bFree.load( atomics::memory_order_acquire )) {
CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- assert( hprec->retired_.empty() );
+ assert( hprec->retired_.empty());
CDS_TSAN_ANNOTATE_IGNORE_READS_END;
continue;
}
{
cds::OS::ThreadId curOwner = hprec->m_idOwner.load( atomics::memory_order_relaxed );
if ( curOwner == nullThreadId ) {
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
continue;
}
else
for ( retired_block* block = src.list_head_; block; block = block->next_ ) {
retired_ptr* last = block == src.current_block_ ? src.current_cell_ : block->last();
for ( retired_ptr* p = block->first(); p != last; ++p ) {
- if ( !dest.push( *p ) )
+ if ( !dest.push( *p ))
scan( pThis );
}
{
st.clear();
# ifdef CDS_ENABLE_HPSTAT
- for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) )
+ for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ))
{
CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
++st.thread_rec_count;
static T* allocate( size_t nCount )
{
- return reinterpret_cast<T*>( s_alloc_memory( sizeof( value_type ) * nCount ) );
+ return reinterpret_cast<T*>( s_alloc_memory( sizeof( value_type ) * nCount ));
}
static void deallocate( T* p, size_t /*nCount*/ )
{
- s_free_memory( reinterpret_cast<void*>( p ) );
+ s_free_memory( reinterpret_cast<void*>( p ));
}
};
CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id();)
- CDS_HPSTAT( statistics( s_postmortem_stat ) );
+ CDS_HPSTAT( statistics( s_postmortem_stat ));
thread_record* pHead = thread_list_.load( atomics::memory_order_relaxed );
thread_list_.store( nullptr, atomics::memory_order_release );
thread_record* pOldHead = thread_list_.load( atomics::memory_order_relaxed );
do {
hprec->m_pNextNode.store( pOldHead, atomics::memory_order_release );
- } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ) );
+ } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ));
return hprec;
}
}
// Sort plist to simplify search in
- std::sort( plist.begin(), plist.end() );
+ std::sort( plist.begin(), plist.end());
// Stage 2: Search plist
retired_array& retired = pRec->retired_;
auto itEnd = plist.end();
retired_ptr* insert_pos = first_retired;
for ( retired_ptr* it = first_retired; it != last_retired; ++it ) {
- if ( std::binary_search( itBegin, itEnd, first_retired->m_p ) ) {
+ if ( std::binary_search( itBegin, itEnd, first_retired->m_p )) {
if ( insert_pos != it )
*insert_pos = *it;
++insert_pos;
CDS_EXPORT_API void smr::help_scan( thread_data* pThis )
{
- assert( static_cast<thread_record*>( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id() );
+ assert( static_cast<thread_record*>( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id());
CDS_HPSTAT( ++pThis->help_scan_count_ );
{
cds::OS::ThreadId curOwner = hprec->m_idOwner.load( atomics::memory_order_relaxed );
if ( curOwner == nullThreadId ) {
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
continue;
}
else
// If it has ones then we move them to pThis that is private for current thread.
retired_array& src = hprec->retired_;
retired_array& dest = pThis->retired_;
- assert( !dest.full() );
+ assert( !dest.full());
retired_ptr* src_first = src.first();
retired_ptr* src_last = src.last();
{
st.clear();
# ifdef CDS_ENABLE_HPSTAT
- for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) )
+ for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ))
{
CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
++st.thread_rec_count;
CDS_EXPORT_API void ThreadData::init()
{
if ( m_nAttachCount++ == 0 ) {
- if ( cds::gc::HP::isUsed() )
+ if ( cds::gc::HP::isUsed())
cds::gc::hp::smr::attach_thread();
- if ( cds::gc::DHP::isUsed() )
+ if ( cds::gc::DHP::isUsed())
cds::gc::dhp::smr::attach_thread();
- if ( cds::urcu::details::singleton<cds::urcu::general_instant_tag>::isUsed() )
+ if ( cds::urcu::details::singleton<cds::urcu::general_instant_tag>::isUsed())
m_pGPIRCU = cds::urcu::details::singleton<cds::urcu::general_instant_tag>::attach_thread();
- if ( cds::urcu::details::singleton<cds::urcu::general_buffered_tag>::isUsed() )
+ if ( cds::urcu::details::singleton<cds::urcu::general_buffered_tag>::isUsed())
m_pGPBRCU = cds::urcu::details::singleton<cds::urcu::general_buffered_tag>::attach_thread();
- if ( cds::urcu::details::singleton<cds::urcu::general_threaded_tag>::isUsed() )
+ if ( cds::urcu::details::singleton<cds::urcu::general_threaded_tag>::isUsed())
m_pGPTRCU = cds::urcu::details::singleton<cds::urcu::general_threaded_tag>::attach_thread();
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
- if ( cds::urcu::details::singleton<cds::urcu::signal_buffered_tag>::isUsed() )
+ if ( cds::urcu::details::singleton<cds::urcu::signal_buffered_tag>::isUsed())
m_pSHBRCU = cds::urcu::details::singleton<cds::urcu::signal_buffered_tag>::attach_thread();
#endif
}
CDS_EXPORT_API bool ThreadData::fini()
{
if ( --m_nAttachCount == 0 ) {
- if ( cds::gc::DHP::isUsed() )
+ if ( cds::gc::DHP::isUsed())
cds::gc::dhp::smr::detach_thread();
- if ( cds::gc::HP::isUsed() )
+ if ( cds::gc::HP::isUsed())
cds::gc::hp::smr::detach_thread();
- if ( cds::urcu::details::singleton<cds::urcu::general_instant_tag>::isUsed() ) {
+ if ( cds::urcu::details::singleton<cds::urcu::general_instant_tag>::isUsed()) {
cds::urcu::details::singleton<cds::urcu::general_instant_tag>::detach_thread( m_pGPIRCU );
m_pGPIRCU = nullptr;
}
- if ( cds::urcu::details::singleton<cds::urcu::general_buffered_tag>::isUsed() ) {
+ if ( cds::urcu::details::singleton<cds::urcu::general_buffered_tag>::isUsed()) {
cds::urcu::details::singleton<cds::urcu::general_buffered_tag>::detach_thread( m_pGPBRCU );
m_pGPBRCU = nullptr;
}
- if ( cds::urcu::details::singleton<cds::urcu::general_threaded_tag>::isUsed() ) {
+ if ( cds::urcu::details::singleton<cds::urcu::general_threaded_tag>::isUsed()) {
cds::urcu::details::singleton<cds::urcu::general_threaded_tag>::detach_thread( m_pGPTRCU );
m_pGPTRCU = nullptr;
}
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
- if ( cds::urcu::details::singleton<cds::urcu::signal_buffered_tag>::isUsed() ) {
+ if ( cds::urcu::details::singleton<cds::urcu::signal_buffered_tag>::isUsed()) {
cds::urcu::details::singleton<cds::urcu::signal_buffered_tag>::detach_thread( m_pSHBRCU );
m_pSHBRCU = nullptr;
}
#define bswap_64(value) (((uint64_t)bswap_32((uint32_t)((value) & 0xffffffff)) << 32) | (uint64_t)bswap_32((uint32_t)((value) >> 32)))
#endif
-#endif // CDSTEST_BYTESWAP_H
\ No newline at end of file
+#endif // CDSTEST_BYTESWAP_H
template <typename C, void (C::*) (size_t)> class selector
{};
- template <typename C> static select_small test( selector<C, &C::set_array>* ) ;
+ template <typename C> static select_small test( selector<C, &C::set_array>* );
template <typename C> static select_big test(...);
public:
- static constexpr bool value = sizeof(test<T>(0)) == sizeof(char) ;
+ static constexpr bool value = sizeof(test<T>(0)) == sizeof(char);
};
template<int DefaultSize = 10>
pop_buff[i] = static_cast<int>( std::sqrt( std::abs( static_cast<double>( pop_buff[i] ) * rand())));
}
- void set_array(size_t new_size)
+ void set_array(size_t new_size)
{
set_array_size(new_size);
}
cds_test::config const& general_cfg = cds_test::stress_fixture::get_config( "General" );\r
\r
// Init SMR\r
- cds::gc::HP hzpGC( \r
+ cds::gc::HP hzpGC(\r
general_cfg.get_size_t( "hazard_pointer_count", 16 ),\r
general_cfg.get_size_t( "hp_max_thread_count", 0 ),\r
general_cfg.get_size_t( "hp_retired_ptr_count", 0 ),\r
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Map_Del3_LF, ::testing::ValuesIn( Map_Del3_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Map_Del3_LF, ::testing::ValuesIn( Map_Del3_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Map_Del3_LF, ::testing::ValuesIn( Map_Del3_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Map_Del3_LF, ::testing::ValuesIn( Map_Del3_LF::get_load_factors()) );
#endif
} // namespace map
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Map_DelOdd_LF, ::testing::ValuesIn( Map_DelOdd_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Map_DelOdd_LF, ::testing::ValuesIn( Map_DelOdd_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Map_DelOdd_LF, ::testing::ValuesIn( Map_DelOdd_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Map_DelOdd_LF, ::testing::ValuesIn( Map_DelOdd_LF::get_load_factors()) );
#endif
} // namespace map
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Map_find_string_LF, ::testing::ValuesIn( Map_find_string::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Map_find_string_LF, ::testing::ValuesIn( Map_find_string::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Map_find_string_LF, ::testing::ValuesIn( Map_find_string::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Map_find_string_LF, ::testing::ValuesIn( Map_find_string::get_load_factors()) );
#endif
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Map_InsDel_func_LF, ::testing::ValuesIn( Map_InsDel_func_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Map_InsDel_func_LF, ::testing::ValuesIn( Map_InsDel_func_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Map_InsDel_func_LF, ::testing::ValuesIn( Map_InsDel_func_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Map_InsDel_func_LF, ::testing::ValuesIn( Map_InsDel_func_LF::get_load_factors()) );
#endif
} // namespace map
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Map_InsDel_item_int_LF, ::testing::ValuesIn( Map_InsDel_item_int_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Map_InsDel_item_int_LF, ::testing::ValuesIn( Map_InsDel_item_int_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Map_InsDel_item_int_LF, ::testing::ValuesIn( Map_InsDel_item_int_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Map_InsDel_item_int_LF, ::testing::ValuesIn( Map_InsDel_item_int_LF::get_load_factors()) );
#endif
} // namespace map
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Map_InsDel_string_LF, ::testing::ValuesIn( Map_InsDel_string::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Map_InsDel_string_LF, ::testing::ValuesIn( Map_InsDel_string::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Map_InsDel_string_LF, ::testing::ValuesIn( Map_InsDel_string::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Map_InsDel_string_LF, ::testing::ValuesIn( Map_InsDel_string::get_load_factors()) );
#endif
} // namespace map
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Map_InsDelFind_LF, ::testing::ValuesIn( Map_InsDelFind_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Map_InsDelFind_LF, ::testing::ValuesIn( Map_InsDelFind_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Map_InsDelFind_LF, ::testing::ValuesIn( Map_InsDelFind_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Map_InsDelFind_LF, ::testing::ValuesIn( Map_InsDelFind_LF::get_load_factors()) );
#endif
} // namespace map
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Map_Iter_Del3_LF, ::testing::ValuesIn( Map_Iter_Del3_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Map_Iter_Del3_LF, ::testing::ValuesIn( Map_Iter_Del3_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Map_Iter_Del3_LF, ::testing::ValuesIn( Map_Iter_Del3_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Map_Iter_Del3_LF, ::testing::ValuesIn( Map_Iter_Del3_LF::get_load_factors()) );
#endif
} // namespace map
auto itEnd = rMap.template get_end<Iterator>();
for ( auto it = rMap.template get_begin<Iterator>(); it != itEnd; ++it ) {
if ( it->first.nKey & 3 ) {
- if ( rMap.erase_at( it ) )
+ if ( rMap.erase_at( it ))
++m_nDeleteSuccess;
else
++m_nDeleteFailed;
m_arr.push_back( i );
for ( key_type i = keyMax - 10; i <= keyMax; ++i )
m_arr.push_back( i );
- shuffle( m_arr.begin(), m_arr.end() );
+ shuffle( m_arr.begin(), m_arr.end());
}
public:
for ( size_t nPass = 0; nPass < s_nPassCount; ++nPass ) {
for ( key_type key : m_arr ) {
- if ( m_Map.insert( key, key ) ) {
+ if ( m_Map.insert( key, key )) {
if ( key == keyMin )
++m_nInsertMinSuccess;
else if ( key == keyMax )
arr.resize( s_nMapSize );
for ( int i = 0; i < static_cast<int>( s_nMapSize ); ++i )
arr[i] = i;;
- shuffle( arr.begin(), arr.end() );
+ shuffle( arr.begin(), arr.end());
for ( key_type key : arr )
testMap.insert( key, key );
INSTANTIATE_TEST_CASE_P( SQ,
intrusive_segmented_queue_push_pop,
- ::testing::ValuesIn( intrusive_segmented_queue_push_pop::get_test_parameters() ), get_test_parameter_name );
+ ::testing::ValuesIn( intrusive_segmented_queue_push_pop::get_test_parameters()), get_test_parameter_name );
#else
INSTANTIATE_TEST_CASE_P( SQ,
intrusive_segmented_queue_push_pop,
- ::testing::ValuesIn( intrusive_segmented_queue_push_pop::get_test_parameters() ) );
+ ::testing::ValuesIn( intrusive_segmented_queue_push_pop::get_test_parameters()) );
#endif
}
INSTANTIATE_TEST_CASE_P( SQ,
segmented_queue_pop,
- ::testing::ValuesIn( segmented_queue_pop::get_test_parameters() ), get_test_parameter_name );
+ ::testing::ValuesIn( segmented_queue_pop::get_test_parameters()), get_test_parameter_name );
#else
INSTANTIATE_TEST_CASE_P( SQ,
segmented_queue_pop,
- ::testing::ValuesIn( segmented_queue_pop::get_test_parameters() ) );
+ ::testing::ValuesIn( segmented_queue_pop::get_test_parameters()) );
#endif
}
INSTANTIATE_TEST_CASE_P( SQ,
segmented_queue_push,
- ::testing::ValuesIn( segmented_queue_push::get_test_parameters() ), get_test_parameter_name );
+ ::testing::ValuesIn( segmented_queue_push::get_test_parameters()), get_test_parameter_name );
#else
INSTANTIATE_TEST_CASE_P( SQ,
segmented_queue_push,
- ::testing::ValuesIn( segmented_queue_push::get_test_parameters() ) );
+ ::testing::ValuesIn( segmented_queue_push::get_test_parameters()) );
#endif
}
INSTANTIATE_TEST_CASE_P( SQ,
segmented_queue_push_pop,
- ::testing::ValuesIn( segmented_queue_push_pop::get_test_parameters() ), get_test_parameter_name );
+ ::testing::ValuesIn( segmented_queue_push_pop::get_test_parameters()), get_test_parameter_name );
#else
INSTANTIATE_TEST_CASE_P( SQ,
segmented_queue_push_pop,
- ::testing::ValuesIn( segmented_queue_push_pop::get_test_parameters() ) );
+ ::testing::ValuesIn( segmented_queue_push_pop::get_test_parameters()) );
#endif
} // namespace
#else
INSTANTIATE_TEST_CASE_P( SQ,
segmented_queue_random,
- ::testing::ValuesIn( segmented_queue_random::get_test_parameters() ));
+ ::testing::ValuesIn( segmented_queue_random::get_test_parameters()));
#endif
} // namespace
}
}
- if ( !m_Queue.pop_front() )
+ if ( !m_Queue.pop_front())
++m_nPopFrontFailed;
}
else {
++m_nPopEmpty;
if ( s_nProducerDone.load() != 0 ) {
- if ( m_Queue.empty() )
+ if ( m_Queue.empty())
break;
}
}
m_nPushFailed = 0;
for ( value_type v = 0; v < nPushCount; ++v ) {
- if ( !m_Queue.push( v ) ) {
+ if ( !m_Queue.push( v )) {
++m_nPushFailed;
--v;
}
}
bool push( const T& data )
- {
+ {
return enqueue( data );
}
}
bool pop( T& data )
- {
+ {
return dequeue( data );
}
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Set_Del3_LF, ::testing::ValuesIn( Set_Del3_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Set_Del3_LF, ::testing::ValuesIn( Set_Del3_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Set_Del3_LF, ::testing::ValuesIn( Set_Del3_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Set_Del3_LF, ::testing::ValuesIn( Set_Del3_LF::get_load_factors()) );
#endif
} // namespace set
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Set_DelOdd_LF, ::testing::ValuesIn( Set_DelOdd_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Set_DelOdd_LF, ::testing::ValuesIn( Set_DelOdd_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Set_DelOdd_LF, ::testing::ValuesIn( Set_DelOdd_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Set_DelOdd_LF, ::testing::ValuesIn( Set_DelOdd_LF::get_load_factors()) );
#endif
} // namespace set
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Set_InsDelFind_LF, ::testing::ValuesIn( Set_InsDelFind_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Set_InsDelFind_LF, ::testing::ValuesIn( Set_InsDelFind_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Set_InsDelFind_LF, ::testing::ValuesIn( Set_InsDelFind_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Set_InsDelFind_LF, ::testing::ValuesIn( Set_InsDelFind_LF::get_load_factors()) );
#endif
} // namespace set
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Set_InsDel_func_LF, ::testing::ValuesIn( Set_InsDel_func_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Set_InsDel_func_LF, ::testing::ValuesIn( Set_InsDel_func_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Set_InsDel_func_LF, ::testing::ValuesIn( Set_InsDel_func_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Set_InsDel_func_LF, ::testing::ValuesIn( Set_InsDel_func_LF::get_load_factors()) );
#endif
} // namespace set
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Set_InsDel_string_LF, ::testing::ValuesIn( Set_InsDel_string_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Set_InsDel_string_LF, ::testing::ValuesIn( Set_InsDel_string_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Set_InsDel_string_LF, ::testing::ValuesIn( Set_InsDel_string_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Set_InsDel_string_LF, ::testing::ValuesIn( Set_InsDel_string_LF::get_load_factors()) );
#endif
-
+
} // namespace set
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Set_Iter_Del3_LF, ::testing::ValuesIn( Set_Iter_Del3_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Set_Iter_Del3_LF, ::testing::ValuesIn( Set_Iter_Del3_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Set_Iter_Del3_LF, ::testing::ValuesIn( Set_Iter_Del3_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Set_Iter_Del3_LF, ::testing::ValuesIn( Set_Iter_Del3_LF::get_load_factors()) );
#endif
} // namespace set
{
return std::to_string( p.param );
}
- INSTANTIATE_TEST_CASE_P( a, Set_Iteration_LF, ::testing::ValuesIn( Set_Iteration_LF::get_load_factors() ), get_test_parameter_name );
+ INSTANTIATE_TEST_CASE_P( a, Set_Iteration_LF, ::testing::ValuesIn( Set_Iteration_LF::get_load_factors()), get_test_parameter_name );
#else
- INSTANTIATE_TEST_CASE_P( a, Set_Iteration_LF, ::testing::ValuesIn( Set_Iteration_LF::get_load_factors() ) );
+ INSTANTIATE_TEST_CASE_P( a, Set_Iteration_LF, ::testing::ValuesIn( Set_Iteration_LF::get_load_factors()) );
#endif
} // namespace set
{
typedef cc::FeldmanHashSet< GC, T, Traits > base_class;
-
+
template <typename GC2>
struct get_extracted_ptr
{
EXPECT_EQ( it->nKey, key );
EXPECT_EQ( ( *it ).nKey, key );
- EXPECT_TRUE( l.erase_at( it ) );
+ EXPECT_TRUE( l.erase_at( it ));
EXPECT_EQ( it->nKey, key );
EXPECT_EQ( ( *it ).nKey, key );
- EXPECT_FALSE( l.erase_at( it ) );
+ EXPECT_FALSE( l.erase_at( it ));
++key;
}
- EXPECT_TRUE( l.empty() );
+ EXPECT_TRUE( l.empty());
EXPECT_CONTAINER_SIZE( l, 0 );
List::gc::force_dispose();
// erase_at() test
for ( auto& i : data ) {
i.nDisposeCount = 0;
- ASSERT_TRUE( s.insert( i ) );
+ ASSERT_TRUE( s.insert( i ));
}
for ( auto it = s.begin(); it != s.end(); ++it ) {
EXPECT_TRUE( s.erase_at( it ));
EXPECT_FALSE( s.erase_at( it ));
}
- ASSERT_TRUE( s.empty() );
+ ASSERT_TRUE( s.empty());
ASSERT_CONTAINER_SIZE( s, 0 );
// Force retiring cycle
// erase_at()
for ( auto const& i : arrKeys )
- EXPECT_TRUE( m.insert( i ) );
- EXPECT_FALSE( m.empty() );
+ EXPECT_TRUE( m.insert( i ));
+ EXPECT_FALSE( m.empty());
EXPECT_CONTAINER_SIZE( m, kkSize );
for ( auto it = m.begin(); it != m.end(); ++it ) {
EXPECT_TRUE( m.erase_at( it ));
EXPECT_FALSE( m.erase_at( it ));
}
- EXPECT_TRUE( m.empty() );
+ EXPECT_TRUE( m.empty());
EXPECT_CONTAINER_SIZE( m, 0u );
}
};
{
Algo f;
for ( auto i : arr_ ) {
- EXPECT_EQ( cds::algo::bit_reversal::swar()( i ), f( i ) ) << "i=" << i;
+ EXPECT_EQ( cds::algo::bit_reversal::swar()( i ), f( i )) << "i=" << i;
}
}
};
#include <cds/algo/atomic.h>
#include "cxx11_convert_memory_order.h"
-#define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free() )
+#define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free())
namespace {
class cxx11_atomic_class: public ::testing::Test
size_t res;
// Trivial case
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
res = splitter.cut( sizeof( src ) * 8 );
EXPECT_EQ( res, src );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( sizeof( src ) * 8 ), 0u );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
splitter.reset();
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
res = splitter.cut( sizeof( src ) * 8 );
EXPECT_EQ( res, src );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( sizeof( src ) * 8 ), 0u );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( *splitter.source(), src );
size_t res;
// Trivial case
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
res = splitter.cut( sizeof( src ) * 8 );
ASSERT_EQ( res, src );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( sizeof( src ) * 8 ), 0u );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
splitter.reset();
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
res = splitter.cut( sizeof( src ) * 8 );
EXPECT_EQ( res, src );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( sizeof( src ) * 8 ), 0u );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( *splitter.source(), src );
ASSERT_FALSE( !splitter );
if ( i % 8 == 0 )
res = res << 8;
- res |= ( splitter.cut( 1 ) ) << ( i % 8 );
+ res |= ( splitter.cut( 1 )) << ( i % 8 );
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
ASSERT_FALSE( !splitter );
if ( i % 8 == 0 )
res = res << 8;
- res |= ( splitter.cut( 1 ) ) << ( i % 8 );
+ res |= ( splitter.cut( 1 )) << ( i % 8 );
}
ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
split_bitstring splitter( src );
// Trivial case
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
res = splitter.cut( int48_size * 8 );
- EXPECT_EQ( res, src.to64() );
- ASSERT_TRUE( splitter.eos() );
+ EXPECT_EQ( res, src.to64());
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( int48_size * 8 ), 0u );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
splitter.reset();
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
res = splitter.cut( int48_size * 8 );
- EXPECT_EQ( res, src.to64() );
- ASSERT_TRUE( splitter.eos() );
+ EXPECT_EQ( res, src.to64());
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( int48_size * 8 ), 0u );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
}
#endif
typedef cds::algo::split_bitstring< int48, int48_size, size_t > split_bitstring;
split_bitstring splitter( src );
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
EXPECT_EQ( splitter.bit_offset(), 0u );
// Cut each hex digit
splitter.reset();
for ( size_t i = 0; i < int48_size * 2; ++i ) {
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
ASSERT_EQ( splitter.cut( 4 ), i );
}
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_FALSE( splitter );
EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), 0u );
EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
// by one bit
{
splitter.reset();
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
EXPECT_EQ( splitter.bit_offset(), 0u );
res = 0;
for ( size_t i = 0; i < int48_size * 8; ++i ) {
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
#if CDS_BUILD_BITS == 64
res |= splitter.cut( 1 ) << i;
res |= static_cast<decltype(res)>( splitter.cut( 1 )) << i;
#endif
}
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
- EXPECT_EQ( res, src.to64() );
+ EXPECT_EQ( res, src.to64());
EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), 0u );
EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
}
{
for ( size_t k = 0; k < 100; ++k ) {
splitter.reset();
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
EXPECT_EQ( splitter.bit_offset(), 0u );
res = 0;
size_t shift = 0;
while ( splitter ) {
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
int bits = std::rand() % 16;
#if CDS_BUILD_BITS == 64
#endif
shift += bits;
}
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
- EXPECT_EQ( res, src.to64() );
+ EXPECT_EQ( res, src.to64());
EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), 0u );
EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
}
split_bitstring splitter( src );
// Trivial case
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
res = splitter.cut( int48_size * 8 );
- ASSERT_EQ( res, src.to64() );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_EQ( res, src.to64());
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( int48_size * 8 ), 0u );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
splitter.reset();
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
res = splitter.cut( int48_size * 8 );
- EXPECT_EQ( res, src.to64() );
- ASSERT_TRUE( splitter.eos() );
+ EXPECT_EQ( res, src.to64());
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( int48_size * 8 ), 0u );
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
}
#endif
typedef cds::algo::split_bitstring< int48, int48_size, size_t > split_bitstring;
split_bitstring splitter( src );
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
EXPECT_EQ( splitter.bit_offset(), 0u );
// Cut each hex digit
splitter.reset();
for ( size_t i = 0; i < int48_size * 2; ++i ) {
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
if ( i % 2 == 0 ) {
EXPECT_EQ( splitter.cut( 4 ), 0x0A - i );
EXPECT_EQ( splitter.cut( 4 ), 0x0B - i + 1 );
}
}
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), 0u );
EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
// by one bit
{
splitter.reset();
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
EXPECT_EQ( splitter.bit_offset(), 0u );
res = 0;
for ( size_t i = 0; i < int48_size * 8; ++i ) {
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
#if CDS_BUILD_BITS == 64
if ( i % 8 == 0 )
res = res << 8;
- res |= ( splitter.cut( 1 ) ) << ( i % 8 );
+ res |= ( splitter.cut( 1 )) << ( i % 8 );
#else
- res = ( res << 1 ) | static_cast<decltype(res)>( splitter.cut( 1 ) );
+ res = ( res << 1 ) | static_cast<decltype(res)>( splitter.cut( 1 ));
#endif
}
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
- EXPECT_EQ( res, src.to64() );
+ EXPECT_EQ( res, src.to64());
EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), 0u );
EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
}
{
for ( size_t k = 0; k < 100; ++k ) {
splitter.reset();
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), int48_size * 8 );
EXPECT_EQ( splitter.bit_offset(), 0u );
res = 0;
while ( splitter ) {
- ASSERT_FALSE( splitter.eos() );
+ ASSERT_FALSE( splitter.eos());
ASSERT_FALSE( !splitter );
unsigned bits = std::rand() % 16;
size_t shift = splitter.rest_count();
res = ( res << shift ) | static_cast<decltype(res)>( splitter.safe_cut( bits ));
#endif
}
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
ASSERT_TRUE( !splitter );
- EXPECT_EQ( res, src.to64() );
+ EXPECT_EQ( res, src.to64());
EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
- EXPECT_EQ( splitter.source()->to64(), src.to64() );
+ EXPECT_EQ( splitter.source()->to64(), src.to64());
EXPECT_EQ( splitter.rest_count(), 0u );
EXPECT_EQ( splitter.bit_offset(), int48_size * 8 );
}
typedef cds::algo::byte_splitter< size_t > splitter_type;
splitter_type splitter( src );
- ASSERT_TRUE( !splitter.eos() );
+ ASSERT_TRUE( !splitter.eos());
EXPECT_EQ( *splitter.source(), src );
EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
EXPECT_EQ( splitter.bit_offset(), 0u );
- EXPECT_TRUE( splitter.is_correct( 8 ) );
- EXPECT_FALSE( splitter.is_correct( 4 ) );
+ EXPECT_TRUE( splitter.is_correct( 8 ));
+ EXPECT_FALSE( splitter.is_correct( 4 ));
unsigned expected = 0x10;
for ( unsigned i = 0; i < splitter_type::c_bitstring_size; ++i ) {
expected += 0x22;
}
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
EXPECT_EQ( *splitter.source(), src );
EXPECT_EQ( splitter.rest_count(), 0u );
typedef cds::algo::byte_splitter< size_t > splitter_type;
splitter_type splitter( src );
- ASSERT_TRUE( !splitter.eos() );
+ ASSERT_TRUE( !splitter.eos());
EXPECT_EQ( *splitter.source(), src );
EXPECT_EQ( splitter.rest_count(), sizeof( src ) * 8 );
EXPECT_EQ( splitter.bit_offset(), 0u );
- EXPECT_TRUE( splitter.is_correct( 8 ) );
- EXPECT_FALSE( splitter.is_correct( 4 ) );
+ EXPECT_TRUE( splitter.is_correct( 8 ));
+ EXPECT_FALSE( splitter.is_correct( 4 ));
unsigned expected = 0xFE;
for ( unsigned i = 0; i < splitter_type::c_bitstring_size; ++i ) {
expected -= 0x22;
}
- ASSERT_TRUE( splitter.eos() );
+ ASSERT_TRUE( splitter.eos());
EXPECT_EQ( splitter.safe_cut( 8 ), 0u );
EXPECT_EQ( *splitter.source(), src );
EXPECT_EQ( splitter.rest_count(), 0u );
TEST_F( Split_bitstrig, cut_int48 )
{
- if ( is_big_endian() )
+ if ( is_big_endian())
cut_int48_be();
else
cut_int48_le();
TEST_F( Split_bitstrig, cut_byte )
{
- if ( is_big_endian() )
+ if ( is_big_endian())
cut_byte_be();
else
cut_byte_le();
const int nSize = 100;
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_EQ( q.size(), 0u );
// enqueue/dequeue
ASSERT_TRUE( q.enqueue( value_type( i )));
ASSERT_EQ( q.size(), static_cast<size_t>( i + 1 ));
}
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_EQ( q.size(), static_cast<size_t>( nSize ));
for ( int i = 0; i < nSize; ++i ) {
it.value = -1;
- ASSERT_TRUE( q.dequeue( it ) );
+ ASSERT_TRUE( q.dequeue( it ));
ASSERT_EQ( it.value, i );
ASSERT_EQ( q.size(), static_cast<size_t>( nSize - i - 1 ));
}
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_EQ( q.size(), 0u );
// push/pop
ASSERT_TRUE( q.push( value_type( i )));
ASSERT_EQ( q.size(), static_cast<size_t>( i + 1 ));
}
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_EQ( q.size(), static_cast<size_t>( nSize ));
for ( int i = 0; i < nSize; ++i ) {
it.value = -1;
- ASSERT_TRUE( q.pop( it ) );
+ ASSERT_TRUE( q.pop( it ));
ASSERT_EQ( it.value, i );
ASSERT_EQ( q.size(), static_cast<size_t>( nSize - i - 1 ));
}
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_EQ( q.size(), 0u );
// clear
for ( int i = 0; i < nSize; ++i ) {
ASSERT_TRUE( q.push( value_type( i )));
}
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_EQ( q.size(), static_cast<size_t>( nSize ));
q.clear();
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_EQ( q.size(), 0u );
// pop from empty queue
it = value_type( nSize * 2 );
- ASSERT_FALSE( q.pop( it ) );
+ ASSERT_FALSE( q.pop( it ));
ASSERT_EQ( it.value, nSize * 2 );
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_EQ( q.size(), 0u );
- ASSERT_FALSE( q.dequeue( it ) );
+ ASSERT_FALSE( q.dequeue( it ));
ASSERT_EQ( it.value, nSize * 2 );
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_EQ( q.size(), 0u );
}
for ( unsigned pass = 0; pass < 3; ++pass ) {
for ( size_t i = 0; i < nSize; ++i ) {
it = static_cast<value_type>( i );
- ASSERT_TRUE( q.enqueue( it ) );
+ ASSERT_TRUE( q.enqueue( it ));
ASSERT_CONTAINER_SIZE( q, i + 1 );
}
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_CONTAINER_SIZE( q, nSize );
- ASSERT_FALSE( q.enqueue( static_cast<value_type>( nSize ) * 2 ) );
+ ASSERT_FALSE( q.enqueue( static_cast<value_type>( nSize ) * 2 ));
for ( size_t i = 0; i < nSize; ++i ) {
it = -1;
- ASSERT_TRUE( q.dequeue( it ) );
- ASSERT_EQ( it, static_cast<value_type>( i ) );
+ ASSERT_TRUE( q.dequeue( it ));
+ ASSERT_EQ( it, static_cast<value_type>( i ));
ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
}
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_CONTAINER_SIZE( q, 0 );
}
for ( unsigned pass = 0; pass < 3; ++pass ) {
for ( size_t i = 0; i < nSize; ++i ) {
it = static_cast<value_type>( i );
- ASSERT_TRUE( q.push( it ) );
+ ASSERT_TRUE( q.push( it ));
ASSERT_CONTAINER_SIZE( q, i + 1 );
}
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_CONTAINER_SIZE( q, nSize );
for ( size_t i = 0; i < nSize; ++i ) {
it = -1;
- ASSERT_TRUE( q.pop( it ) );
- ASSERT_EQ( it, static_cast<value_type>( i ) );
+ ASSERT_TRUE( q.pop( it ));
+ ASSERT_EQ( it, static_cast<value_type>( i ));
ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
}
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_CONTAINER_SIZE( q, 0 );
}
ASSERT_NE( it, -1 );
auto f = [&it]( value_type& dest ) { dest = it; it = -1; };
if ( i & 1 )
- ASSERT_TRUE( q.enqueue_with( f ) );
+ ASSERT_TRUE( q.enqueue_with( f ));
else
- ASSERT_TRUE( q.push_with( f ) );
+ ASSERT_TRUE( q.push_with( f ));
ASSERT_EQ( it, -1 );
ASSERT_CONTAINER_SIZE( q, i + 1 );
}
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_CONTAINER_SIZE( q, nSize );
for ( size_t i = 0; i < nSize; ++i ) {
it = -1;
auto f = [&it]( value_type& src ) { it = src; src = -1; };
if ( i & 1 )
- ASSERT_TRUE( q.pop_with( f ) );
+ ASSERT_TRUE( q.pop_with( f ));
else
- ASSERT_TRUE( q.dequeue_with( f ) );
- ASSERT_EQ( it, static_cast<value_type>( i ) );
+ ASSERT_TRUE( q.dequeue_with( f ));
+ ASSERT_EQ( it, static_cast<value_type>( i ));
ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
}
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_CONTAINER_SIZE( q, 0u );
}
const size_t nSize = q.capacity();
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_CONTAINER_SIZE( q, 0 );
// enqueue/dequeue
for ( unsigned pass = 0; pass < 3; ++pass ) {
for ( size_t i = 0; i < nSize; ++i ) {
- ASSERT_TRUE( q.enqueue( static_cast<value_type>( i ) ) );
+ ASSERT_TRUE( q.enqueue( static_cast<value_type>( i )) );
ASSERT_CONTAINER_SIZE( q, i + 1 );
}
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_CONTAINER_SIZE( q, nSize );
- ASSERT_FALSE( q.enqueue( static_cast<value_type>( nSize ) * 2 ) );
+ ASSERT_FALSE( q.enqueue( static_cast<value_type>( nSize ) * 2 ));
for ( size_t i = 0; i < nSize; ++i ) {
value_type* fr = q.front();
ASSERT_TRUE( fr != nullptr );
- ASSERT_EQ( *fr, static_cast<value_type>( i ) );
- ASSERT_TRUE( q.pop_front() );
+ ASSERT_EQ( *fr, static_cast<value_type>( i ));
+ ASSERT_TRUE( q.pop_front());
ASSERT_CONTAINER_SIZE( q, nSize - i - 1 );
}
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_CONTAINER_SIZE( q, 0 );
ASSERT_TRUE( q.front() == nullptr );
- ASSERT_FALSE( q.pop_front() );
+ ASSERT_FALSE( q.pop_front());
}
}
};
el[k] = static_cast<value_type>( i + k );
if ( i + nArrSize <= nSize ) {
- ASSERT_TRUE( q.push( el, nArrSize ) );
+ ASSERT_TRUE( q.push( el, nArrSize ));
}
else {
- ASSERT_FALSE( q.push( el, nArrSize ) );
+ ASSERT_FALSE( q.push( el, nArrSize ));
}
}
- ASSERT_TRUE( !q.empty() );
+ ASSERT_TRUE( !q.empty());
if ( nSize % nArrSize != 0 ) {
- ASSERT_FALSE( q.full() );
+ ASSERT_FALSE( q.full());
ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
- ASSERT_TRUE( q.enqueue( static_cast<value_type>( i ) ) );
+ ASSERT_TRUE( q.enqueue( static_cast<value_type>( i )) );
}
}
- ASSERT_TRUE( q.full() );
+ ASSERT_TRUE( q.full());
ASSERT_CONTAINER_SIZE( q, nSize );
// batch pop
value_type expected = 0;
- while ( q.pop( el, nArrSize ) ) {
+ while ( q.pop( el, nArrSize )) {
for ( size_t i = 0; i < nArrSize; ++i ) {
ASSERT_EQ( el[i], expected );
++expected;
}
if ( nSize % nArrSize == 0 ) {
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
}
else {
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_CONTAINER_SIZE( q, nSize % nArrSize );
q.clear();
}
- ASSERT_TRUE( q.empty() );
- ASSERT_FALSE( q.full() );
+ ASSERT_TRUE( q.empty());
+ ASSERT_FALSE( q.full());
ASSERT_CONTAINER_SIZE( q, 0u );
}
}
el[k] = i + k;
if ( i + nArrSize <= nSize ) {
- ASSERT_TRUE( q.push( el, nArrSize, func_push ) );
+ ASSERT_TRUE( q.push( el, nArrSize, func_push ));
}
else {
- ASSERT_FALSE( q.push( el, nArrSize, func_push ) );
+ ASSERT_FALSE( q.push( el, nArrSize, func_push ));
}
}
- ASSERT_TRUE( !q.empty() );
+ ASSERT_TRUE( !q.empty());
if ( nSize % nArrSize != 0 ) {
- ASSERT_FALSE( q.full() );
+ ASSERT_FALSE( q.full());
ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
- ASSERT_TRUE( q.push( &i, 1, func_push ) );
+ ASSERT_TRUE( q.push( &i, 1, func_push ));
}
}
- ASSERT_TRUE( q.full() );
+ ASSERT_TRUE( q.full());
ASSERT_CONTAINER_SIZE( q, nSize );
// batch pop with functor
auto func_pop = []( size_t& dest, value_type src ) { dest = static_cast<size_t>( src / 10 ); };
size_t expected = 0;
- while ( q.pop( el, nArrSize, func_pop ) ) {
+ while ( q.pop( el, nArrSize, func_pop )) {
for ( size_t i = 0; i < nArrSize; ++i ) {
ASSERT_EQ( el[i], expected );
++expected;
}
if ( nSize % nArrSize == 0 ) {
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
}
else {
- ASSERT_FALSE( q.empty() );
+ ASSERT_FALSE( q.empty());
ASSERT_CONTAINER_SIZE( q, nSize % nArrSize );
size_t v;
- while ( q.pop( &v, 1, func_pop ) ) {
+ while ( q.pop( &v, 1, func_pop )) {
ASSERT_EQ( v, expected );
++expected;
}
}
- ASSERT_TRUE( q.empty() );
- ASSERT_FALSE( q.full() );
+ ASSERT_TRUE( q.empty());
+ ASSERT_FALSE( q.full());
ASSERT_CONTAINER_SIZE( q, 0u );
}
el[k] = i + k;
if ( i + nArrSize <= nSize ) {
- ASSERT_TRUE( q.push( el, nArrSize, func_push ) );
+ ASSERT_TRUE( q.push( el, nArrSize, func_push ));
}
else {
- ASSERT_FALSE( q.push( el, nArrSize, func_push ) );
+ ASSERT_FALSE( q.push( el, nArrSize, func_push ));
}
}
- ASSERT_TRUE( !q.empty() );
+ ASSERT_TRUE( !q.empty());
if ( nSize % nArrSize != 0 ) {
- ASSERT_FALSE( q.full() );
+ ASSERT_FALSE( q.full());
ASSERT_CONTAINER_SIZE( q, nArrCount * nArrSize );
for ( size_t i = nArrCount * nArrSize; i < nSize; ++i ) {
- ASSERT_TRUE( q.push( &i, 1, func_push ) );
+ ASSERT_TRUE( q.push( &i, 1, func_push ));
}
}
- ASSERT_TRUE( q.full() );
+ ASSERT_TRUE( q.full());
ASSERT_CONTAINER_SIZE( q, nSize );
value_type cur = 0;
- while ( !q.empty() ) {
+ while ( !q.empty()) {
value_type* front = q.front();
ASSERT_TRUE( front != nullptr );
ASSERT_EQ( cur, *front );
- ASSERT_TRUE( q.pop_front() );
+ ASSERT_TRUE( q.pop_front());
cur += 10;
}
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_TRUE( q.front() == nullptr );
- ASSERT_FALSE( q.pop_front() );
+ ASSERT_FALSE( q.pop_front());
}
}
}
{
size_t const capacity = q.capacity();
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_EQ( q.size(), 0u );
ASSERT_TRUE( q.front().first == nullptr );
- ASSERT_FALSE( q.pop_front() );
+ ASSERT_FALSE( q.pop_front());
size_t total_push = 0;
uint8_t chfill = 0;
for ( size_t i = 0; i < pair.second; ++i )
ASSERT_EQ( *reinterpret_cast<uint8_t*>( pair.first ), chfill );
- ASSERT_TRUE( q.pop_front() );
- ASSERT_FALSE( q.pop_front() );
+ ASSERT_TRUE( q.pop_front());
+ ASSERT_FALSE( q.pop_front());
}
- ASSERT_TRUE( q.empty() );
+ ASSERT_TRUE( q.empty());
ASSERT_EQ( q.size(), 0u );
ASSERT_TRUE( q.front().first == nullptr );
- ASSERT_FALSE( q.pop_front() );
+ ASSERT_FALSE( q.pop_front());
}
};
test_varsize_buffer( q );
}
-} // namespace
\ No newline at end of file
+} // namespace
// erase_at()
for ( auto& i : data ) {
- EXPECT_TRUE( s.insert( i ) );
+ EXPECT_TRUE( s.insert( i ));
}
- EXPECT_FALSE( s.empty() );
+ EXPECT_FALSE( s.empty());
EXPECT_CONTAINER_SIZE( s, nSetSize );
for ( auto it = s.begin(); it != s.end(); ++it ) {
EXPECT_FALSE( s.erase_at( it ));
}
- EXPECT_TRUE( s.empty() );
+ EXPECT_TRUE( s.empty());
EXPECT_CONTAINER_SIZE( s, 0 );
}
// erase_at()
for ( auto& i : data ) {
- EXPECT_TRUE( s.insert( i ) );
+ EXPECT_TRUE( s.insert( i ));
}
- EXPECT_FALSE( s.empty() );
+ EXPECT_FALSE( s.empty());
EXPECT_CONTAINER_SIZE( s, nSetSize );
for ( auto it = s.begin(); it != s.end(); ++it ) {
EXPECT_FALSE( s.erase_at( it ));
}
- EXPECT_TRUE( s.empty() );
+ EXPECT_TRUE( s.empty());
EXPECT_CONTAINER_SIZE( s, 0 );
}
Amila Jayasekara\r
blinkenlichten (https://github.com/blinkenlichten)\r
Eugeny Kalishenko (https://github.com/eugenyk)\r
+gtroxler (https://github.com/gtroxler)\r
Jelle van den Hooff\r
Kyle Hegeman (https://github.com/khegeman)\r
Lily Tsai (https://github.com/tslilyai)\r
Mykola Dimura\r
Mike Krinkin (https://github.com/krinkinmu)\r
Nikolai Rapotkin\r
-rwf (https://github.com/rfw)\r
+rfw (https://github.com/rfw)\r
Tamas Lengyel\r
Todd Lipcon\r