// 64-bit bit ops
template <> struct BitOps<8> {
- typedef atomic64u_unaligned TUInt;
+ typedef uint64_t TUInt;
static int MSB( TUInt x ) { return bitop::platform::msb64( x ); }
static int LSB( TUInt x ) { return bitop::platform::lsb64( x ); }
}
# define cds_bitop_msb64_DEFINED
- static inline int msb64( atomic64u_unaligned nArg )
+ static inline int msb64( uint64_t nArg )
{
- atomic64u_unaligned nRet;
+ uint64_t nRet;
asm volatile (
"bsrq %[nArg], %[nRet] ;\n\t"
"jnz 1f ;\n\t"
}
# define cds_bitop_msb64nz_DEFINED
- static inline int msb64nz( atomic64u_unaligned nArg )
+ static inline int msb64nz( uint64_t nArg )
{
assert( nArg != 0 );
- atomic64u_unaligned nRet;
+ uint64_t nRet;
__asm__ __volatile__ (
"bsrq %[nArg], %[nRet] ;"
: [nRet] "=a" (nRet)
// LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U
# define cds_bitop_lsb64_DEFINED
- static inline int lsb64( atomic64u_unaligned nArg )
+ static inline int lsb64( uint64_t nArg )
{
-
- atomic64u_unaligned nRet;
+ uint64_t nRet;
__asm__ __volatile__ (
"bsfq %[nArg], %[nRet] ;"
"jnz 1f ;"
// LSB - return index (0..31) of least significant bit in nArg.
// Condition: nArg != 0
# define cds_bitop_lsb64nz_DEFINED
- static inline int lsb64nz( atomic64u_unaligned nArg )
+ static inline int lsb64nz( uint64_t nArg )
{
assert( nArg != 0 );
- atomic64u_unaligned nRet;
+ uint64_t nRet;
__asm__ __volatile__ (
"bsfq %[nArg], %[nRet] ;"
: [nRet] "=a" (nRet)
{
if ( !nArg )
return 0;
- atomic64u_t x = nArg;
+ uint64_t x = nArg;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
- atomic64u_t nRes;
+ uint64_t nRes;
asm __volatile__( "popcnt %0=%1\n\t" : "=r" (nRes) : "r" (x) );
return (int) nRes;
}
// MSB - return index (0..63) of most significant bit in nArg.
// !!! nArg != 0
# define cds_bitop_msb64nz_DEFINED
- static inline int msb64nz( atomic64u_t nArg )
+ static inline int msb64nz( uint64_t nArg )
{
assert( nArg != 0 );
long double d = nArg;
// Source: UltraSPARC Architecture 2007
//
// Test result: this variant and its variation about 100 times slower then generic implementation :-(
- static inline int sparc_msb64( atomic64u_t nArg )
+ static inline int sparc_msb64( uint64_t nArg )
{
- atomic64u_t result;
+ uint64_t result;
asm volatile (
"neg %[nArg], %[result] \n\t"
"xnor %[nArg], %[result], %%g5 \n\t"
// MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0
static inline int sparc_msb32( uint32_t nArg )
{
- return sparc_msb64( (atomic64u_t) nArg );
+ return sparc_msb64( (uint64_t) nArg );
}
}} // namespace gcc::Sparc
# define cds_bitop_msb64_DEFINED
- static inline int msb64( atomic64u_unaligned nArg )
+ static inline int msb64( uint64_t nArg )
{
unsigned long nIndex;
if ( _BitScanReverse64( &nIndex, nArg ))
}
# define cds_bitop_msb64nz_DEFINED
- static inline int msb64nz( atomic64u_unaligned nArg )
+ static inline int msb64nz( uint64_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
}
# define cds_bitop_lsb64_DEFINED
- static inline int lsb64( atomic64u_unaligned nArg )
+ static inline int lsb64( uint64_t nArg )
{
unsigned long nIndex;
if ( _BitScanForward64( &nIndex, nArg ))
}
# define cds_bitop_lsb64nz_DEFINED
- static inline int lsb64nz( atomic64u_unaligned nArg )
+ static inline int lsb64nz( uint64_t nArg )
{
assert( nArg != 0 );
unsigned long nIndex;
}
# define cds_bitop_complement64_DEFINED
- static inline bool complement64( atomic64u_t * pArg, unsigned int nBit )
+ static inline bool complement64( uint64_t * pArg, unsigned int nBit )
{
return _bittestandcomplement64( reinterpret_cast<__int64 *>( pArg ), nBit ) != 0;
}
}
# define cds_bitop_complement64_DEFINED
- static inline bool complement64( atomic64u_t * pArg, unsigned int nBit )
+ static inline bool complement64( uint64_t * pArg, unsigned int nBit )
{
if ( nBit < 32 )
return _bittestandcomplement( reinterpret_cast<long *>( pArg ), nBit ) != 0;
#endif
#ifndef cds_bitop_isPow2_64_DEFINED
- static inline bool isPow2_64( atomic64_unaligned x )
+ static inline bool isPow2_64( uint64_t x )
{
return (x & ( x - 1 )) == 0 && x;
}
#endif
#ifndef cds_bitop_msb64_DEFINED
- static inline int msb64( atomic64u_unaligned x )
+ static inline int msb64( uint64_t x )
{
uint32_t h = (uint32_t) (x >> 32);
if ( h )
#endif
#ifndef cds_bitop_msb64nz_DEFINED
- static inline int msb64nz( atomic64u_unaligned x )
+ static inline int msb64nz( uint64_t x )
{
return msb64( x ) - 1;
}
#endif
#ifndef cds_bitop_lsb64_DEFINED
- static inline int lsb64( atomic64u_unaligned x )
+ static inline int lsb64( uint64_t x )
{
if ( !x )
return 0;
#endif
#ifndef cds_bitop_lsb64nz_DEFINED
- static inline int lsb64nz( atomic64u_unaligned x )
+ static inline int lsb64nz( uint64_t x )
{
return lsb64( x ) - 1;
}
#endif
#ifndef cds_bitop_rbo64_DEFINED
- static inline atomic64u_t rbo64( atomic64u_unaligned x )
+ static inline uint64_t rbo64( uint64_t x )
{
// Low 32bit Hight 32bit
- return ( ((atomic64u_t) rbo32( (uint32_t) x )) << 32 ) | ((atomic64u_t) rbo32( (uint32_t) (x >> 32) ));
+ return ( static_cast<uint64_t>(rbo32( (uint32_t) x )) << 32 ) | ( static_cast<uint64_t>( rbo32( (uint32_t) (x >> 32) )));
}
#endif
#endif
#ifndef cds_bitop_sbc64_DEFINED
- static inline int sbc64( atomic64u_unaligned x )
+ static inline int sbc64( uint64_t x )
{
# ifdef cds_beans_zbc64_DEFINED
return 64 - zbc64( x );
#endif
#ifndef cds_bitop_zbc64_DEFINED
- static inline int zbc64( atomic64u_unaligned x )
+ static inline int zbc64( uint64_t x )
{
return 64 - sbc64( x );
}
#endif
#ifndef cds_bitop_complement64_DEFINED
- static inline bool complement64( atomic64u_t * pArg, unsigned int nBit )
+ static inline bool complement64( uint64_t * pArg, unsigned int nBit )
{
assert( pArg );
- atomic64u_t nVal = *pArg & (atomic64u_t(1) << nBit);
- *pArg ^= atomic64u_t(1) << nBit;
+ uint64_t nVal = *pArg & (uint64_t(1) << nBit);
+ *pArg ^= uint64_t(1) << nBit;
return nVal != 0;
}
#endif
static inline uint64_t RandXorShift64(uint64_t x)
{
- //static atomic64u_t xRandom = 88172645463325252LL;
- //atomic64u_t x = xRandom;
+ //static uint64_t xRandom = 88172645463325252LL;
+ //uint64_t x = xRandom;
if ( !x )
x = 88172645463325252LL;
x ^= x << 13;
# define CDS_CXX11_INLINE_NAMESPACE
#endif
-//@cond
-// typedefs for back compatibility
-namespace cds {
- /// 64bit unaligned int
- typedef int64_t atomic64_unaligned;
-
- /// 64bit unaligned unsigned int
- typedef uint64_t atomic64u_unaligned;
-
- /// 64bit aligned int
- typedef atomic64_unaligned CDS_TYPE_ALIGNMENT(8) atomic64_aligned;
-
- /// 64bit aligned unsigned int
- typedef atomic64u_unaligned CDS_TYPE_ALIGNMENT(8) atomic64u_aligned;
-
- /// 64bit atomic int (aligned)
- typedef atomic64_aligned atomic64_t;
-
- /// 64bit atomic unsigned int (aligned)
- typedef atomic64u_aligned atomic64u_t;
-} // namespace cds
-//@endcond
-
/*************************************************************************
Common things
**************************************************************************/
/// Finds \p key using \p pred predicate for searching
/**
- The function is an analog of \ref cds_intrusive_MichaelList_rcu_find_func "find(Q&, Func)"
+ The function is an analog of \p find(Q&, Func)
but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p pred must imply the same element order as the comparator used for building the list.
size_t nPageDeallocCount ; ///< Count of page (superblock) deallocated
size_t nDescAllocCount ; ///< Count of superblock descriptors
size_t nDescFull ; ///< Count of full superblock
- atomic64u_t nBytesAllocated ; ///< Count of allocated bytes (for heap managed memory blocks)
- atomic64u_t nBytesDeallocated ; ///< Count of deallocated bytes (for heap managed memory blocks)
+ uint64_t nBytesAllocated ; ///< Count of allocated bytes (for heap managed memory blocks)
+ uint64_t nBytesDeallocated ; ///< Count of deallocated bytes (for heap managed memory blocks)
size_t nSysAllocCount ; ///< Count of \p alloc and \p alloc_aligned function call (for large memory blocks that allocated directly from OS)
size_t nSysFreeCount ; ///< Count of \p free and \p free_aligned function call (for large memory blocks that allocated directly from OS)
- atomic64u_t nSysBytesAllocated ; ///< Count of allocated bytes (for large memory blocks that allocated directly from OS)
- atomic64_t nSysBytesDeallocated; ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS)
+ uint64_t nSysBytesAllocated ; ///< Count of allocated bytes (for large memory blocks that allocated directly from OS)
+ int64_t nSysBytesDeallocated; ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS)
// Internal contention indicators
/// CAS failure counter for updating active field of active block of \p alloc_from_active Heap internal function
class bound_checker
{
protected:
- typedef atomic64u_t trailer_type;
+ typedef uint64_t trailer_type;
static const trailer_type s_BoundCheckerTrailer = 0xbadcafeedeadc0feULL;
public:
}
/// Returns current value of nBytesAllocated counter
- atomic64u_t allocatedBytes() const
+ uint64_t allocatedBytes() const
{
return nBytesAllocated.load(atomics::memory_order_relaxed);
}
/// Returns current value of nBytesAllocated counter
- atomic64u_t deallocatedBytes() const
+ uint64_t deallocatedBytes() const
{
return nBytesDeallocated.load(atomics::memory_order_relaxed);
}
}
/// Returns current value of nBytesAllocated counter
- atomic64u_t allocatedBytes() const
+ uint64_t allocatedBytes() const
{
return 0;
}
/// Returns current value of nBytesAllocated counter
- atomic64u_t deallocatedBytes() const
+ uint64_t deallocatedBytes() const
{
return 0;
}
To get count of bytes allocated but not yet deallocated you should call
\code allocatedBytes() - deallocatedBytes() \endcode
*/
- atomic64u_t allocatedBytes() const
+ uint64_t allocatedBytes() const
{
return nBytesAllocated.load(atomics::memory_order_relaxed);
}
See \ref allocatedBytes notes
*/
- atomic64u_t deallocatedBytes() const
+ uint64_t deallocatedBytes() const
{
return nBytesDeallocated.load(atomics::memory_order_relaxed);
}
{ return 0; }
size_t descFull() const
{ return 0; }
- atomic64u_t allocatedBytes() const
+ uint64_t allocatedBytes() const
{ return 0; }
- atomic64u_t deallocatedBytes() const
+ uint64_t deallocatedBytes() const
{ return 0; }
size_t activeDescCASFailureCount() const
{ return 0; }
2011.01.23 khizmax Created
*/
+#include <stdlib.h> // rand, srand
+
#include <cds/details/aligned_type.h>
#include <cds/user_setup/allocator.h>
#include <cds/user_setup/cache_line.h>
#include <cds/algo/atomic.h>
-#include <stdlib.h> // rand, srand
namespace cds {
void bitop64()
{
- cds::atomic64u_t n;
+ uint64_t n;
n = 0;
CPPUNIT_ASSERT_EX( cds::bitop::MSB(n) == 0, "n=" << n );
CPPUNIT_ASSERT_EX( cds::bitop::LSB(n) == 0, "n=" << n );