/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <gtest/gtest.h>
+#include <cds_test/ext_gtest.h>
#include <cds/algo/atomic.h>
#ifndef CDS_USE_BOOST_ATOMIC
#include "cxx11_convert_memory_order.h"
+#define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( atomics::atomic_is_lock_free( &x ));
+
namespace misc {
class cxx11_atomic_func: public ::testing::Test
{
atomics::memory_order mo_clear = convert_to_store_order(order);
- f.clear( convert_to_store_order(order) );
+ f.clear( convert_to_store_order(order));
for ( int i = 0; i < 5; ++i ) {
EXPECT_FALSE( atomics::atomic_flag_test_and_set_explicit( &f, order ));
- EXPECT_TRUE( atomics::atomic_flag_test_and_set_explicit( &f, order ) );
+ EXPECT_TRUE( atomics::atomic_flag_test_and_set_explicit( &f, order ));
atomics::atomic_flag_clear_explicit( &f, mo_clear );
atomics::atomic_flag_clear_explicit( &f, mo_clear );
}
for ( int i = 0; i < 5; ++i ) {
EXPECT_FALSE( atomics::atomic_flag_test_and_set( &f ));
- EXPECT_TRUE( atomics::atomic_flag_test_and_set( &f ) );
+ EXPECT_TRUE( atomics::atomic_flag_test_and_set( &f ));
atomics::atomic_flag_clear(&f);
atomics::atomic_flag_clear(&f);
}
{
typedef Integral integral_type;
- EXPECT_TRUE( atomics::atomic_is_lock_free( &a ) );
+ EXPECT_ATOMIC_IS_LOCK_FREE( a );
atomics::atomic_store( &a, (integral_type) 0 );
- EXPECT_EQ( a, 0 );
- EXPECT_EQ( atomics::atomic_load( &a ), 0 );
+ EXPECT_EQ( atomics::atomic_load( &a ), integral_type( 0 ));
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
- EXPECT_EQ( atomics::atomic_exchange( &a, n ), 0 );
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
+ EXPECT_EQ( atomics::atomic_exchange( &a, n ), (integral_type) 0 );
EXPECT_EQ( atomics::atomic_load( &a ), n );
EXPECT_EQ( atomics::atomic_exchange( &a, (integral_type) 0 ), n );
- EXPECT_EQ( atomics::atomic_load( &a ), 0 );
+ EXPECT_EQ( atomics::atomic_load( &a ), (integral_type) 0 );
}
integral_type prev = atomics::atomic_load( &a );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( atomics::atomic_compare_exchange_weak( &a, &expected, n));
EXPECT_EQ( expected, prev );
EXPECT_NE( expected, n );
- EXPECT_FALSE( atomics::atomic_compare_exchange_weak( &a, &expected, n) );
+ EXPECT_FALSE( atomics::atomic_compare_exchange_weak( &a, &expected, n));
EXPECT_EQ( expected, n );
prev = n;
prev = atomics::atomic_load( &a );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( atomics::atomic_compare_exchange_strong( &a, &expected, n));
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
integral_type prev = atomics::atomic_load( &a );
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( atomics::atomic_fetch_add( &a, n ), prev );
}
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
integral_type prev = atomics::atomic_load( &a );
- integral_type n = integral_type(42) << ((nByte - 1) * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
EXPECT_EQ( atomics::atomic_fetch_sub( &a, n ), prev );
}
- EXPECT_EQ( atomics::atomic_load( &a ), 0 );
+ EXPECT_EQ( atomics::atomic_load( &a ), (integral_type) 0 );
// fetch_or / fetc_xor / fetch_and
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
integral_type prev = atomics::atomic_load( &a );
- integral_type mask = integral_type(1) << nBit;
+ integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
EXPECT_EQ( atomics::atomic_fetch_or( &a, mask ), prev );
prev = atomics::atomic_load( &a );
const atomics::memory_order oLoad = convert_to_load_order( order );
const atomics::memory_order oStore = convert_to_store_order( order );
- EXPECT_TRUE( atomics::atomic_is_lock_free( &a ) );
+ EXPECT_ATOMIC_IS_LOCK_FREE( a );
atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
- EXPECT_EQ( a, 0 );
- EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), 0 );
+ EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), (integral_type) 0 );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
- EXPECT_EQ( atomics::atomic_exchange_explicit( &a, n, order ), 0 );
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
+ EXPECT_EQ( atomics::atomic_exchange_explicit( &a, n, order ), (integral_type) 0 );
EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), n );
EXPECT_EQ( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ), n );
- EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), 0 );
+ EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), (integral_type) 0 );
}
integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
prev = atomics::atomic_load_explicit( &a, oLoad );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
integral_type expected = prev;
EXPECT_TRUE( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
- integral_type n = integral_type(42) << (nByte * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
EXPECT_EQ( atomics::atomic_fetch_add_explicit( &a, n, order), prev);
}
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
- integral_type n = integral_type(42) << ((nByte - 1) * 8);
+ integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
EXPECT_EQ( atomics::atomic_fetch_sub_explicit( &a, n, order ), prev);
}
- EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), 0 );
+ EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), integral_type( 0 ));
// fetch_or / fetc_xor / fetch_and
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
integral_type prev = atomics::atomic_load_explicit( &a, oLoad ) ;;
- integral_type mask = integral_type(1) << nBit;
+ integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
EXPECT_EQ( atomics::atomic_fetch_or_explicit( &a, mask, order ), prev );
prev = atomics::atomic_load_explicit( &a, oLoad );
EXPECT_EQ( atomics::atomic_fetch_and_explicit( &a, (integral_type) ~mask, order ), prev );
prev = atomics::atomic_load_explicit( &a, oLoad );
- EXPECT_EQ( ( prev & mask), 0);
+ EXPECT_EQ( ( prev & mask), integral_type( 0 ));
EXPECT_EQ( atomics::atomic_fetch_xor_explicit( &a, mask, order ), prev );
prev = atomics::atomic_load_explicit( &a, oLoad );
template <class AtomicBool>
void do_test_atomic_bool(AtomicBool& a)
{
- EXPECT_TRUE( atomics::atomic_is_lock_free( &a ) );
+ EXPECT_ATOMIC_IS_LOCK_FREE( a );
atomics::atomic_store( &a, false );
EXPECT_FALSE( a );
EXPECT_FALSE( atomics::atomic_load( &a ));
const atomics::memory_order oStore = convert_to_store_order( order );
const atomics::memory_order oExchange = convert_to_exchange_order( order );
- EXPECT_TRUE( atomics::atomic_is_lock_free( &a ) );
+ EXPECT_ATOMIC_IS_LOCK_FREE( a );
atomics::atomic_store_explicit( &a, false, oStore );
EXPECT_FALSE( a == false );
EXPECT_FALSE( atomics::atomic_load_explicit( &a, oLoad ));
EXPECT_EQ( *atomics::atomic_load_explicit( &a, oLoad ), 1 );
for ( integral_type i = 1; i < aSize; ++i ) {
- integral_type * p = atomics::atomic_load_explicit( &a, oLoad );
+ p = atomics::atomic_load_explicit( &a, oLoad );
EXPECT_EQ( *p, i );
EXPECT_EQ( atomics::atomic_fetch_add_explicit( &a, 1, order ), p );
EXPECT_EQ( *atomics::atomic_load_explicit( &a, oLoad ), i + 1 );
}
for ( integral_type i = aSize; i > 1; --i ) {
- integral_type * p = atomics::atomic_load_explicit( &a, oLoad );
+ p = atomics::atomic_load_explicit( &a, oLoad );
EXPECT_EQ( *p, i );
EXPECT_EQ( atomics::atomic_fetch_sub_explicit( &a, 1, order ), p );
EXPECT_EQ( *atomics::atomic_load_explicit( &a, oLoad ), i - 1 );
integral_type arr[8];
const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
for ( integral_type i = 0; i < aSize; ++i ) {
- arr[size_t(i)] = i + 1;
+ arr[static_cast<size_t>(i)] = i + 1;
}
atomic_pointer a;
EXPECT_EQ( *atomics::atomic_load( &a ), 1 );
for ( integral_type i = 1; i < aSize; ++i ) {
- integral_type * p = atomics::atomic_load( &a );
+ p = atomics::atomic_load( &a );
EXPECT_EQ( *p, i );
EXPECT_EQ( atomics::atomic_fetch_add( &a, 1 ), p );
EXPECT_EQ( *atomics::atomic_load( &a ), i + 1 );
}
for ( integral_type i = aSize; i > 1; --i ) {
- integral_type * p = atomics::atomic_load( &a );
+ p = atomics::atomic_load( &a );
EXPECT_EQ( *p, i );
EXPECT_EQ( atomics::atomic_fetch_sub( &a, 1 ), p );
EXPECT_EQ( *atomics::atomic_load( &a ), i - 1 );
template <typename Atomic>
void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
{
+ CDS_UNUSED( aSize );
+
atomics::memory_order oLoad = convert_to_load_order(order);
atomics::memory_order oStore = convert_to_store_order(order);
char * p;
EXPECT_EQ( reinterpret_cast<char *>(atomics::atomic_exchange_explicit( &a, (void *) arr, order )), arr + 3 );
EXPECT_EQ( reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), arr );
EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), 1 );
-
- for ( char i = 1; i < aSize; ++i ) {
- EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), i );
- atomics::atomic_fetch_add_explicit( &a, 1, order );
- EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), i + 1 );
- }
-
- for ( char i = aSize; i > 1; --i ) {
- EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), i );
- atomics::atomic_fetch_sub_explicit( &a, 1, order );
- EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), i - 1 );
- }
}
template <bool Volatile>
char arr[8];
const char aSize = sizeof(arr)/sizeof(arr[0]);
for ( char i = 0; i < aSize; ++i ) {
- arr[unsigned(i)] = i + 1;
+ arr[static_cast<size_t>(i)] = i + 1;
}
atomic_pointer a;
EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), 1 );
p = arr;
- EXPECT_TRUE( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5) ));
+ EXPECT_TRUE( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5)));
EXPECT_EQ( p, arr + 0 );
- EXPECT_FALSE( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3) ));
+ EXPECT_FALSE( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3)));
EXPECT_EQ( p, arr + 5 );
- EXPECT_TRUE( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3) ));
+ EXPECT_TRUE( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3)));
EXPECT_EQ( p, arr + 5 );
- EXPECT_FALSE( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5) ));
+ EXPECT_FALSE( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5)));
EXPECT_EQ( p, arr + 3 );
EXPECT_EQ( reinterpret_cast<char *>( atomics::atomic_exchange( &a, (void *) arr )), arr + 3 );
EXPECT_EQ( reinterpret_cast<char *>( atomics::atomic_load( &a )), arr );
EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), 1 );
- for ( char i = 1; i < aSize; ++i ) {
- EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), i );
- atomics::atomic_fetch_add( &a, 1 );
- EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), i + 1 );
- }
-
- for ( char i = aSize; i > 1; --i ) {
- EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), i );
- atomics::atomic_fetch_sub( &a, 1 );
- EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), i - 1 );
- }
-
do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );