// 32-bit bit ops
template <> struct BitOps<4> {
- typedef atomic32u_t TUInt;
+ typedef uint32_t TUInt;
static int MSB( TUInt x ) { return bitop::platform::msb32( x ); }
static int LSB( TUInt x ) { return bitop::platform::lsb32( x ); }
namespace bitop { namespace platform { namespace gcc { namespace amd64 {
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
# define cds_bitop_msb32_DEFINED
- static inline int msb32( atomic32u_t nArg )
+ static inline int msb32( uint32_t nArg )
{
int nRet;
__asm__ __volatile__ (
}
# define cds_bitop_msb32nz_DEFINED
- static inline int msb32nz( atomic32u_t nArg )
+ static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
int nRet;
// LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb32_DEFINED
- static inline int lsb32( atomic32u_t nArg )
+ static inline int lsb32( uint32_t nArg )
{
int nRet;
// LSB - return index (0..31) of least significant bit in nArg.
// Condition: nArg != 0
# define cds_bitop_lsb32nz_DEFINED
- static inline int lsb32nz( atomic32u_t nArg )
+ static inline int lsb32nz( uint32_t nArg )
{
assert( nArg != 0 );
int nRet;
// MSB - return index (1..32) of most significant bit in x. If x == 0 return 0
# define cds_bitop_msb32_DEFINED
- static inline int msb32( atomic32u_t nArg )
+ static inline int msb32( uint32_t nArg )
{
if ( !nArg )
return 0;
// MSB - return index (0..31) of most significant bit in nArg.
// !!! nArg != 0
# define cds_bitop_msb32nz_DEFINED
- static inline int msb32nz( atomic32u_t nArg )
+ static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
long double d = nArg;
}
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
- static inline int sparc_msb32( atomic32u_t nArg )
+ static inline int sparc_msb32( uint32_t nArg )
{
return sparc_msb64( (atomic64u_t) nArg );
}
namespace bitop { namespace platform { namespace gcc { namespace x86 {
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
# define cds_bitop_msb32_DEFINED
- static inline int msb32( atomic32u_t nArg )
+ static inline int msb32( uint32_t nArg )
{
int nRet;
__asm__ __volatile__ (
}
# define cds_bitop_msb32nz_DEFINED
- static inline int msb32nz( atomic32u_t nArg )
+ static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
int nRet;
// LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb32_DEFINED
- static inline int lsb32( atomic32u_t nArg )
+ static inline int lsb32( uint32_t nArg )
{
int nRet;
// LSB - return index (0..31) of least significant bit in nArg.
// Condition: nArg != 0
# define cds_bitop_lsb32nz_DEFINED
- static inline int lsb32nz( atomic32u_t nArg )
+ static inline int lsb32nz( uint32_t nArg )
{
assert( nArg != 0 );
int nRet;
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
# define cds_bitop_msb32_DEFINED
- static inline int msb32( atomic32u_t nArg )
+ static inline int msb32( uint32_t nArg )
{
unsigned long nIndex;
if ( _BitScanReverse( &nIndex, nArg ))
}
# define cds_bitop_msb32nz_DEFINED
- static inline int msb32nz( atomic32u_t nArg )
+ static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
// LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb32_DEFINED
- static inline int lsb32( atomic32u_t nArg )
+ static inline int lsb32( uint32_t nArg )
{
unsigned long nIndex;
if ( _BitScanForward( &nIndex, nArg ))
}
# define cds_bitop_lsb32nz_DEFINED
- static inline int lsb32nz( atomic32u_t nArg )
+ static inline int lsb32nz( uint32_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
}
# define cds_bitop_complement32_DEFINED
- static inline bool complement32( atomic32u_t * pArg, unsigned int nBit )
+ static inline bool complement32( uint32_t * pArg, unsigned int nBit )
{
return _bittestandcomplement( reinterpret_cast<long *>( pArg ), nBit ) != 0;
}
namespace bitop { namespace platform { namespace vc { namespace x86 {
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
# define cds_bitop_msb32_DEFINED
- static inline int msb32( atomic32u_t nArg )
+ static inline int msb32( uint32_t nArg )
{
unsigned long nIndex;
if ( _BitScanReverse( &nIndex, nArg ))
}
# define cds_bitop_msb32nz_DEFINED
- static inline int msb32nz( atomic32u_t nArg )
+ static inline int msb32nz( uint32_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
// LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb32_DEFINED
- static inline int lsb32( atomic32u_t nArg )
+ static inline int lsb32( uint32_t nArg )
{
unsigned long nIndex;
if ( _BitScanForward( &nIndex, nArg ))
}
# define cds_bitop_lsb32nz_DEFINED
- static inline int lsb32nz( atomic32u_t nArg )
+ static inline int lsb32nz( uint32_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
// bswap - Reverses the byte order of a 32-bit word
# define cds_bitop_bswap32_DEFINED
- static inline atomic32u_t bswap32( atomic32u_t nArg )
+ static inline uint32_t bswap32( uint32_t nArg )
{
__asm {
mov eax, nArg;
}
# define cds_bitop_complement32_DEFINED
- static inline bool complement32( atomic32u_t * pArg, unsigned int nBit )
+ static inline bool complement32( uint32_t * pArg, unsigned int nBit )
{
return _bittestandcomplement( reinterpret_cast<long *>( pArg ), nBit ) != 0;
}
namespace bitop { namespace platform {
// Return true if x = 2 ** k, k >= 0
#ifndef cds_bitop_isPow2_32_DEFINED
- static inline bool isPow2_32( atomic32u_t x )
+ static inline bool isPow2_32( uint32_t x )
{
return (x & ( x - 1 )) == 0 && x;
}
// Return number (1..32) of most significant bit
// Return 0 if x == 0
// Source: Linux kernel
- static inline int msb32( atomic32u_t x )
+ static inline int msb32( uint32_t x )
{
int r = 32;
#endif
#ifndef cds_bitop_msb32nz_DEFINED
- static inline int msb32nz( atomic32u_t x )
+ static inline int msb32nz( uint32_t x )
{
return msb32( x ) - 1;
}
#ifndef cds_bitop_msb64_DEFINED
static inline int msb64( atomic64u_unaligned x )
{
- atomic32u_t h = (atomic32u_t) (x >> 32);
+ uint32_t h = (uint32_t) (x >> 32);
if ( h )
return msb32( h ) + 32;
- return msb32( (atomic32u_t) x );
+ return msb32( (uint32_t) x );
}
#endif
// Return number (1..32) of least significant bit
// Return 0 if x == 0
// Source: Linux kernel
- static inline int lsb32( atomic32u_t x )
+ static inline int lsb32( uint32_t x )
{
int r = 1;
#endif
#ifndef cds_bitop_lsb32nz_DEFINED
- static inline int lsb32nz( atomic32u_t x )
+ static inline int lsb32nz( uint32_t x )
{
return lsb32( x ) - 1;
}
if ( !x )
return 0;
if ( x & 0xffffffffu )
- return lsb32( (atomic32u_t) x );
- return lsb32( (atomic32u_t) (x >> 32) ) + 32;
+ return lsb32( (uint32_t) x );
+ return lsb32( (uint32_t) (x >> 32) ) + 32;
}
#endif
// Reverse bit order
//******************************************************
#ifndef cds_bitop_rbo32_DEFINED
- static inline atomic32u_t rbo32( atomic32u_t x )
+ static inline uint32_t rbo32( uint32_t x )
{
// swap odd and even bits
x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1);
static inline atomic64u_t rbo64( atomic64u_unaligned x )
{
// Low 32bit Hight 32bit
- return ( ((atomic64u_t) rbo32( (atomic32u_t) x )) << 32 ) | ((atomic64u_t) rbo32( (atomic32u_t) (x >> 32) ));
+ return ( ((atomic64u_t) rbo32( (uint32_t) x )) << 32 ) | ((atomic64u_t) rbo32( (uint32_t) (x >> 32) ));
}
#endif
// Set bit count. Return count of non-zero bits in word
//******************************************************
#ifndef cds_bitop_sbc32_DEFINED
- static inline int sbc32( atomic32u_t x )
+ static inline int sbc32( uint32_t x )
{
# ifdef cds_beans_zbc32_DEFINED
return 32 - zbc32( x );
# ifdef cds_beans_zbc64_DEFINED
return 64 - zbc64( x );
# else
- return sbc32( (atomic32u_t) (x >> 32) ) + sbc32( (atomic32u_t) x );
+ return sbc32( (uint32_t) (x >> 32) ) + sbc32( (uint32_t) x );
# endif
}
#endif
// Zero bit count. Return count of zero bits in word
//******************************************************
#ifndef cds_bitop_zbc32_DEFINED
- static inline int zbc32( atomic32u_t x )
+ static inline int zbc32( uint32_t x )
{
return 32 - sbc32( x );
}
// Bit complement
#ifndef cds_bitop_complement32_DEFINED
- static inline bool complement32( atomic32u_t * pArg, unsigned int nBit )
+ static inline bool complement32( uint32_t * pArg, unsigned int nBit )
{
assert( pArg );
- atomic32u_t nVal = *pArg & (1 << nBit);
+ uint32_t nVal = *pArg & (1 << nBit);
*pArg ^= 1 << nBit;
return nVal != 0;
}
*/
static inline uint32_t RandXorShift32(uint32_t x)
{
- //static atomic32u_t xRandom = 2463534242UL ; //rand() | 0x0100 ; // must be nonzero
- //atomic32u_t x = xRandom;
+ //static uint32_t xRandom = 2463534242UL ; //rand() | 0x0100 ; // must be nonzero
+ //uint32_t x = xRandom;
if ( !x )
x = ((rand() + 1) << 16) + rand() + 1;
x ^= x << 13;
//@cond
// typedefs for back compatibility
namespace cds {
- /// Atomic pointer
- typedef void * pointer_t;
-
/// 64bit unaligned int
typedef int64_t atomic64_unaligned;
/// 64bit atomic unsigned int (aligned)
typedef atomic64u_aligned atomic64u_t;
-
- /// 32bit atomic int
- typedef int32_t atomic32_t;
-
- /// 32bit atomic unsigned int
- typedef uint32_t atomic32u_t;
-
- /// atomic int
- typedef atomic32_t atomic_t;
-
- /// atomic unsigned int
- typedef atomic32u_t unsigned_atomic_t;
-
- /// atomic int sized as pointer
- typedef intptr_t ptr_atomic_t;
-
- /// atomic unsigned int sized as pointer
- typedef uintptr_t uptr_atomic_t;
} // namespace cds
//@endcond
template <int ALIGN, typename T>
static inline bool is_aligned(T const * p)
{
- return (((uptr_atomic_t)p) & uptr_atomic_t(ALIGN - 1)) == 0;
+ return (((uintptr_t)p) & uintptr_t(ALIGN - 1)) == 0;
}
/// Checks if the pointer \p p has \p nAlign byte alignment
template <typename T>
static inline bool is_aligned(T const * p, size_t nAlign)
{
- return (((uptr_atomic_t)p) & uptr_atomic_t(nAlign - 1)) == 0;
+ return (((uintptr_t)p) & uintptr_t(nAlign - 1)) == 0;
}
}} // namespace cds::details
bool enqueue( value_type& data )
{
value_type * pNewNode = &data;
- assert( (reinterpret_cast<ptr_atomic_t>( pNewNode ) & 1) == 0 );
+ assert( (reinterpret_cast<uintptr_t>(pNewNode) & 1) == 0 );
back_off bkoff;
const index_type nModulo = modulo();
continue;
}
- pNull = reinterpret_cast<value_type *>((reinterpret_cast<ptr_atomic_t>(tt) & 1) ? free0 : free1 );
+ pNull = reinterpret_cast<value_type *>((reinterpret_cast<uintptr_t>(tt) & 1) ? free0 : free1);
if ( th != m_nHead.load(memory_model::memory_order_relaxed) )
continue;
}
};
- /// Recursive spin-lock based on atomic32u_t
- typedef ReentrantSpinT<uint32_t, backoff::LockDefault> ReentrantSpin32;
+ /// Recursive 32bit spin-lock
+ typedef ReentrantSpinT<uint32_t, backoff::LockDefault> ReentrantSpin32;
- /// Recursive spin-lock based on atomic64u_t type
- typedef ReentrantSpinT<uint64_t, backoff::LockDefault> ReentrantSpin64;
+ /// Recursive 64bit spin-lock
+ typedef ReentrantSpinT<uint64_t, backoff::LockDefault> ReentrantSpin64;
- /// Recursive spin-lock based on atomic32_t type
- typedef ReentrantSpin32 ReentrantSpin;
+ /// Default recursive spin-lock type
+ typedef ReentrantSpin32 ReentrantSpin;
} // namespace lock
union {
superblock_desc * pDesc ; // pointer to superblock descriptor
- atomic32u_t nSize ; // block size (allocated form OS)
+ uint32_t nSize ; // block size (allocated form OS)
};
- atomic32u_t nFlags;
+ uint32_t nFlags;
public:
- void set( superblock_desc * pdesc, atomic32u_t isAligned )
+ void set( superblock_desc * pdesc, uint32_t isAligned )
{
pDesc = pdesc;
nFlags = isAligned ? bitAligned : 0;
// allocated from OS
marked_desc_ptr pDesc;
public:
- void set( superblock_desc * pdesc, atomic32u_t isAligned )
+ void set( superblock_desc * pdesc, uint32_t isAligned )
{
pDesc = marked_desc_ptr( pdesc, isAligned );
}
size_t getOSAllocSize() const
{
assert( isOSAllocated() );
- return reinterpret_cast<uptr_atomic_t>( pDesc.ptr() ) >> 2;
+ return reinterpret_cast<uintptr_t>( pDesc.ptr() ) >> 2;
}
};
class active_tag {
//@cond
superblock_desc * pDesc;
- atomic32u_t nCredits;
+ uint32_t nCredits;
public:
static const unsigned int c_nMaxCredits = 0 - 1;
void ptr( superblock_desc * p )
{
- assert( (reinterpret_cast<uptr_atomic_t>(p) & c_nMaxCredits) == 0 );
+ assert( (reinterpret_cast<uintptr_t>(p) & c_nMaxCredits) == 0 );
pDesc = marked_desc_ptr( p, pDesc.bits());
}
void set( superblock_desc * pSB, unsigned int n )
{
- assert( (reinterpret_cast<uptr_atomic_t>(pSB) & c_nMaxCredits) == 0 );
+ assert( (reinterpret_cast<uintptr_t>(pSB) & c_nMaxCredits) == 0 );
pDesc = marked_desc_ptr( pSB, n );
}
, pSizeClass( nullptr )
, pPartial( nullptr )
{
- assert( (reinterpret_cast<uptr_atomic_t>(this) & (c_nAlignment - 1)) == 0 );
+ assert( (reinterpret_cast<uintptr_t>(this) & (c_nAlignment - 1)) == 0 );
}
//@endcond
// initialize processor heaps
pDesc->arrProcHeap =
reinterpret_cast<processor_heap *>(
- reinterpret_cast<uptr_atomic_t>(reinterpret_cast<byte *>(pDesc + 1) + sizeof(pDesc->pageHeaps[0]) * nPageHeapCount + c_nAlignment - 1)
- & ~(uptr_atomic_t(c_nAlignment) - 1)
+ reinterpret_cast<uintptr_t>(reinterpret_cast<byte *>(pDesc + 1) + sizeof(pDesc->pageHeaps[0]) * nPageHeapCount + c_nAlignment - 1)
+ & ~(uintptr_t(c_nAlignment) - 1)
);
processor_heap * pProcHeap = pDesc->arrProcHeap;
superblock_desc * pDesc = pProcHeap->pProcDesc->listSBDescFree.pop();
if ( pDesc == nullptr ) {
pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
- assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 );
+ assert( (uintptr_t(pDesc) & (c_nAlignment - 1)) == 0 );
anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
anchor.tag = 0;
{
if ( nAlignment <= c_nDefaultBlockAlignment ) {
void * p = alloc( nSize );
- assert( (reinterpret_cast<uptr_atomic_t>(p) & (nAlignment - 1)) == 0 );
+ assert( (reinterpret_cast<uintptr_t>(p) & (nAlignment - 1)) == 0 );
return p;
}
block_header * pBlock = int_alloc( nSize + nAlignment + sizeof(block_header) + bound_checker::trailer_size );
block_header * pRedirect;
- if ( (reinterpret_cast<uptr_atomic_t>( pBlock + 1) & (nAlignment - 1)) != 0 ) {
- pRedirect = reinterpret_cast<block_header *>( (reinterpret_cast<uptr_atomic_t>( pBlock ) & ~(nAlignment - 1)) + nAlignment ) - 1;
+ if ( (reinterpret_cast<uintptr_t>( pBlock + 1) & (nAlignment - 1)) != 0 ) {
+ pRedirect = reinterpret_cast<block_header *>( (reinterpret_cast<uintptr_t>( pBlock ) & ~(nAlignment - 1)) + nAlignment ) - 1;
assert( pRedirect != pBlock );
pRedirect->set( reinterpret_cast<superblock_desc *>(pBlock), 1 );
- assert( (reinterpret_cast<uptr_atomic_t>(pRedirect + 1) & (nAlignment - 1)) == 0 );
+ assert( (reinterpret_cast<uintptr_t>(pRedirect + 1) & (nAlignment - 1)) == 0 );
}
else
pRedirect = pBlock;
memcpy( pArea + nAllocSize, &trailer, sizeof(trailer) );
// the next assignment is correct because pBlock is at least sizeof(size_t)-byte aligned
- assert( (reinterpret_cast<uptr_atomic_t>(pEndBlock) & (sizeof(size_t) - 1)) == 0 );
+ assert( (reinterpret_cast<uintptr_t>(pEndBlock) & (sizeof(size_t) - 1)) == 0 );
*(reinterpret_cast<size_t *>( pEndBlock ) - 1) = nAllocSize;
}
protected:
void bitop32()
{
- cds::atomic32u_t n;
+ uint32_t n;
n = 0;
CPPUNIT_ASSERT_EX( cds::bitop::MSB(n) == 0, "n=" << n );
CPPUNIT_ASSERT_EX( cds::bitop::LSB(n) == 0, "n=" << n );
memset( ((char *)(*pCell)) + nSize * sizeof(value_type) - 16, 0, 16 );
}
- CPPUNIT_ASSERT( (reinterpret_cast<cds::uptr_atomic_t>(*pCell) & (ALLOC::alignment - 1)) == 0 );
+ CPPUNIT_ASSERT( (reinterpret_cast<uintptr_t>(*pCell) & (ALLOC::alignment - 1)) == 0 );
}
pCell = m_arr;
for ( size_t i = 0; i < s_nBlockCount; ++i, ++pCell ) {
size_t nItem = m_rndGen( size_t(1), s_nBlocksPerThread ) - 1;
m_Alloc.deallocate( reinterpret_cast<value_type *>(m_arr[nItem]), 1 );
m_arr[nItem] = reinterpret_cast<char *>(m_Alloc.allocate( m_rndGen( s_nMinBlockSize, s_nMaxBlockSize ), nullptr ));
- CPPUNIT_ASSERT( (reinterpret_cast<cds::uptr_atomic_t>(m_arr[nItem]) & (ALLOC::alignment - 1)) == 0 );
+ CPPUNIT_ASSERT( (reinterpret_cast<uintptr_t>(m_arr[nItem]) & (ALLOC::alignment - 1)) == 0 );
}
}
};
= new char *[ s_nBlocksPerThread ];
for ( size_t i = 0; i < s_nBlocksPerThread; ++i ) {
thData[i] = reinterpret_cast<char *>(alloc.allocate( rndGen( s_nMinBlockSize, s_nMaxBlockSize ), nullptr ));
- CPPUNIT_ASSERT( (reinterpret_cast<cds::uptr_atomic_t>(thData[i]) & (ALLOC::alignment - 1)) == 0 );
+ CPPUNIT_ASSERT( (reinterpret_cast<uintptr_t>(thData[i]) & (ALLOC::alignment - 1)) == 0 );
}
}
CPPUNIT_MSG("Initializatin done" );
memset( p, 0, 16 );
memset( ((char *)p) + m_nSize * sizeof(*p) - 16, 0, 16 );
}
- CPPUNIT_ASSERT( (reinterpret_cast<cds::uptr_atomic_t>(p) & (ALLOC::alignment - 1)) == 0 );
+ CPPUNIT_ASSERT( (reinterpret_cast<uintptr_t>(p) & (ALLOC::alignment - 1)) == 0 );
m_Alloc.deallocate( p, 1 );
}
}