2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <gtest/gtest.h>
32 #include <cds/algo/atomic.h>
33 #include "cxx11_convert_memory_order.h"
36 class cxx11_atomic_class: public ::testing::Test
39 template <typename AtomicFlag>
40 void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
42 atomics::memory_order mo_clear = convert_to_store_order(order);
43 for ( int i = 0; i < 5; ++i ) {
44 EXPECT_TRUE( !f.test_and_set( order ));
45 EXPECT_TRUE( f.test_and_set( order ));
50 template <typename AtomicFlag>
51 void do_test_atomic_flag( AtomicFlag& f)
55 for ( int i = 0; i < 5; ++i ) {
56 EXPECT_TRUE( !f.test_and_set());
57 EXPECT_TRUE( f.test_and_set());
61 do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
62 //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
63 do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
64 do_test_atomic_flag_mo( f, atomics::memory_order_release );
65 do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
66 do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
69 template <class Atomic, typename Integral>
70 void do_test_atomic_type(Atomic& a)
72 typedef Integral integral_type;
74 EXPECT_TRUE( a.is_lock_free());
75 a.store( (integral_type) 0 );
76 //EXPECT_EQ( a, static_cast<integral_type>( 0 ));
77 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
79 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
80 integral_type n = integral_type(42) << (nByte * 8);
81 EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
82 EXPECT_EQ( a.load(), n );
83 EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
84 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
87 integral_type prev = a.load();
88 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
89 integral_type n = integral_type(42) << (nByte * 8);
90 integral_type expected = prev;
92 EXPECT_TRUE( a.compare_exchange_weak( expected, n));
93 EXPECT_EQ( expected, prev );
94 EXPECT_FALSE( a.compare_exchange_weak( expected, n));
95 EXPECT_EQ( expected, n );
98 EXPECT_EQ( a.load(), n );
101 a = (integral_type) 0;
104 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
105 integral_type n = integral_type(42) << (nByte * 8);
106 integral_type expected = prev;
108 EXPECT_TRUE( a.compare_exchange_strong( expected, n));
109 EXPECT_EQ( expected, prev );
110 EXPECT_FALSE( a.compare_exchange_strong( expected, n));
111 EXPECT_EQ( expected, n );
114 EXPECT_EQ( a.load(), n );
117 EXPECT_EQ( a.exchange( (integral_type) 0 ), prev );
120 template <class Atomic, typename Integral>
121 void do_test_atomic_integral(Atomic& a)
123 do_test_atomic_type< Atomic, Integral >(a);
125 typedef Integral integral_type;
128 a.store( (integral_type) 0 );
131 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
133 integral_type prev = a.load();
134 integral_type n = integral_type(42) << (nByte * 8);
136 EXPECT_EQ( a.fetch_add(n), prev);
140 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
142 integral_type prev = a.load();
143 integral_type n = integral_type(42) << ((nByte - 1) * 8);
145 EXPECT_EQ( a.fetch_sub(n), prev);
147 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
149 // fetch_or / fetc_xor / fetch_and
150 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
152 integral_type prev = a.load() ;;
153 integral_type mask = integral_type(1) << nBit;
155 EXPECT_EQ( a.fetch_or( mask ), prev );
157 EXPECT_EQ( ( prev & mask), mask);
159 EXPECT_EQ( a.fetch_and( (integral_type) ~mask ), prev );
161 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
163 EXPECT_EQ( a.fetch_xor( mask ), prev );
165 EXPECT_EQ( integral_type( prev & mask), mask);
167 EXPECT_EQ( a.load(), (integral_type) -1 );
171 a = (integral_type) 0;
174 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
176 integral_type prev = a;
177 integral_type n = integral_type(42) << (nByte * 8);
179 EXPECT_EQ( (a += n), (prev + n));
183 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
185 integral_type prev = a;
186 integral_type n = integral_type(42) << ((nByte - 1) * 8);
188 EXPECT_EQ( (a -= n), prev - n );
190 EXPECT_EQ( a.load(), (integral_type) 0 );
193 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
195 integral_type prev = a;
196 integral_type mask = integral_type(1) << nBit;
198 EXPECT_EQ( (a |= mask ), (prev | mask ));
200 EXPECT_EQ( ( prev & mask), mask);
202 EXPECT_EQ( (a &= (integral_type) ~mask ), ( prev & (integral_type) ~mask ));
204 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
206 EXPECT_EQ( (a ^= mask ), (prev ^ mask ));
208 EXPECT_EQ( ( prev & mask), mask);
210 EXPECT_EQ( a.load(), (integral_type) -1 );
213 template <class Atomic, typename Integral>
214 void do_test_atomic_type( Atomic& a, atomics::memory_order order )
216 typedef Integral integral_type;
218 const atomics::memory_order oLoad = convert_to_load_order( order );
219 const atomics::memory_order oStore = convert_to_store_order( order );
221 EXPECT_TRUE( a.is_lock_free());
222 a.store((integral_type) 0, oStore );
223 //EXPECT_EQ( a, integral_type( 0 ));
224 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
226 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
227 integral_type n = integral_type(42) << (nByte * 8);
228 EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
229 EXPECT_EQ( a.load( oLoad ), n );
230 EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
231 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
234 integral_type prev = a.load( oLoad );
235 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
236 integral_type n = integral_type(42) << (nByte * 8);
237 integral_type expected = prev;
239 EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
240 EXPECT_EQ( expected, prev );
241 EXPECT_FALSE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
242 EXPECT_EQ( expected, n );
245 EXPECT_EQ( a.load( oLoad ), n );
248 a.store( (integral_type) 0, oStore );
250 prev = a.load( oLoad );
251 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
252 integral_type n = integral_type(42) << (nByte * 8);
253 integral_type expected = prev;
255 EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
256 EXPECT_EQ( expected, prev );
257 EXPECT_FALSE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
258 EXPECT_EQ( expected, n );
261 EXPECT_EQ( a.load( oLoad ), n );
264 EXPECT_EQ( a.exchange( (integral_type) 0, order ), prev );
267 template <class Atomic, typename Integral>
268 void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
270 do_test_atomic_type< Atomic, Integral >( a, order );
272 typedef Integral integral_type;
274 const atomics::memory_order oLoad = convert_to_load_order( order );
275 const atomics::memory_order oStore = convert_to_store_order( order );
278 a.store( (integral_type) 0, oStore );
281 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
283 integral_type prev = a.load( oLoad );
284 integral_type n = integral_type(42) << (nByte * 8);
286 EXPECT_EQ( a.fetch_add( n, order), prev);
290 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
292 integral_type prev = a.load( oLoad );
293 integral_type n = integral_type(42) << ((nByte - 1) * 8);
295 EXPECT_EQ( a.fetch_sub( n, order ), prev);
297 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
299 // fetch_or / fetc_xor / fetch_and
300 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
302 integral_type prev = a.load( oLoad ) ;;
303 integral_type mask = integral_type(1) << nBit;
305 EXPECT_EQ( a.fetch_or( mask, order ), prev );
306 prev = a.load( oLoad );
307 EXPECT_EQ( ( prev & mask), mask);
309 EXPECT_EQ( a.fetch_and( (integral_type) ~mask, order ), prev );
310 prev = a.load( oLoad );
311 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
313 EXPECT_EQ( a.fetch_xor( mask, order ), prev );
314 prev = a.load( oLoad );
315 EXPECT_EQ( ( prev & mask), mask);
317 EXPECT_EQ( a.load( oLoad ), (integral_type) -1 );
322 template <typename Atomic, typename Integral>
323 void test_atomic_integral_(Atomic& a)
325 do_test_atomic_integral<Atomic, Integral >(a);
327 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
328 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
329 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
330 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
331 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
334 template <typename Integral>
335 void test_atomic_integral()
337 typedef atomics::atomic<Integral> atomic_type;
340 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
341 test_atomic_integral_<atomic_type, Integral>( a[i] );
344 template <typename Integral>
345 void test_atomic_integral_volatile()
347 typedef atomics::atomic<Integral> volatile atomic_type;
350 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
351 test_atomic_integral_<atomic_type, Integral>( a[i] );
355 template <class AtomicBool>
356 void do_test_atomic_bool( AtomicBool& a )
358 EXPECT_TRUE( a.is_lock_free());
361 EXPECT_FALSE( a.load());
363 EXPECT_FALSE( a.exchange( true ));
364 EXPECT_TRUE( a.load());
365 EXPECT_TRUE( a.exchange( false ));
366 EXPECT_FALSE( a.load());
368 bool expected = false;
369 EXPECT_TRUE( a.compare_exchange_weak( expected, true));
370 EXPECT_FALSE( expected );
371 EXPECT_FALSE( a.compare_exchange_weak( expected, false));
372 EXPECT_TRUE( expected );
373 EXPECT_TRUE( a.load());
378 EXPECT_TRUE( a.compare_exchange_strong( expected, true));
379 EXPECT_FALSE( expected );
380 EXPECT_FALSE( a.compare_exchange_strong( expected, false));
381 EXPECT_TRUE( expected );
383 EXPECT_TRUE( a.load());
385 EXPECT_TRUE( a.exchange( false ));
388 template <class AtomicBool>
389 void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
391 const atomics::memory_order oLoad = convert_to_load_order( order );
392 const atomics::memory_order oStore = convert_to_store_order( order );
393 const atomics::memory_order oExchange = convert_to_exchange_order( order );
395 EXPECT_TRUE( a.is_lock_free());
396 a.store( false, oStore );
398 EXPECT_FALSE( a.load( oLoad ));
400 EXPECT_FALSE( a.exchange( true, oExchange ));
401 EXPECT_TRUE( a.load( oLoad ));
402 EXPECT_TRUE( a.exchange( false, oExchange ));
403 EXPECT_FALSE( a.load( oLoad ));
405 bool expected = false;
406 EXPECT_TRUE( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
407 EXPECT_FALSE( expected );
408 EXPECT_FALSE( a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
409 EXPECT_TRUE( expected );
410 EXPECT_TRUE( a.load( oLoad ));
413 a.store( false, oStore );
416 EXPECT_TRUE( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
417 EXPECT_FALSE( expected );
418 EXPECT_FALSE( a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
419 EXPECT_TRUE( expected );
421 EXPECT_TRUE( a.load( oLoad ));
423 EXPECT_TRUE( a.exchange( false, oExchange ));
427 template <typename Atomic>
428 void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
430 atomics::memory_order oLoad = convert_to_load_order(order);
431 atomics::memory_order oStore = convert_to_store_order(order);
434 a.store( (void *) arr, oStore );
435 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
438 EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
439 EXPECT_EQ( p, arr + 0 );
440 EXPECT_EQ( *reinterpret_cast<char *>(p), 1 );
441 EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
442 EXPECT_EQ( p, arr + 5 );
443 EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
445 EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
446 EXPECT_EQ( p, arr + 5 );
447 EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
448 EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
449 EXPECT_EQ( p, arr + 3 );
450 EXPECT_EQ( *reinterpret_cast<char *>(p), 4 );
452 EXPECT_EQ( reinterpret_cast<char *>(a.exchange( (void *) arr, order )), arr + 3 );
453 EXPECT_EQ( reinterpret_cast<char *>(a.load( oLoad )), arr );
454 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
456 for ( char i = 1; i < aSize; ++i ) {
457 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i );
458 a.fetch_add( 1, order );
459 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i + 1 );
462 for ( char i = aSize; i > 1; --i ) {
463 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i );
464 a.fetch_sub( 1, order );
465 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), i - 1 );
469 template <bool Volatile>
470 void do_test_atomic_pointer_void()
472 typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type atomic_pointer;
475 const char aSize = sizeof(arr)/sizeof(arr[0]);
476 for ( char i = 0; i < aSize; ++i ) {
477 arr[unsigned(i)] = i + 1;
483 a.store( (void *) arr );
484 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), 1 );
487 EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5)));
488 EXPECT_EQ( p, arr + 0 );
489 EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3)));
490 EXPECT_EQ( p, arr + 5 );
492 EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3)));
493 EXPECT_EQ( p, arr + 5 );
494 EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5)));
495 EXPECT_EQ( p, arr + 3 );
497 EXPECT_EQ( reinterpret_cast<char *>( a.exchange( (void *) arr )), arr + 3 );
498 EXPECT_EQ( reinterpret_cast<char *>( a.load()), arr );
499 EXPECT_EQ( *reinterpret_cast<char *>( a.load()), 1 );
501 for ( char i = 1; i < aSize; ++i ) {
502 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i );
504 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i + 1 );
507 for ( char i = aSize; i > 1; --i ) {
508 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i );
510 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), i - 1 );
513 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
514 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
515 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
516 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
517 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
520 template <typename Atomic, typename Integral>
521 void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
523 typedef Integral integral_type;
524 atomics::memory_order oLoad = convert_to_load_order(order);
525 atomics::memory_order oStore = convert_to_store_order(order);
528 a.store( arr, oStore );
529 EXPECT_EQ( *a.load( oLoad ), 1 );
532 EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
533 EXPECT_EQ( p, arr + 0 );
535 EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
536 EXPECT_EQ( p, arr + 5 );
539 EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
540 EXPECT_EQ( p, arr + 5 );
542 EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
543 EXPECT_EQ( p, arr + 3 );
546 EXPECT_EQ( a.exchange( arr, order ), arr + 3 );
547 EXPECT_EQ( a.load( oLoad ), arr );
548 EXPECT_EQ( *a.load( oLoad ), 1 );
550 for ( integral_type i = 1; i < aSize; ++i ) {
551 integral_type * p = a.load();
553 EXPECT_EQ( a.fetch_add( 1, order ), p );
554 EXPECT_EQ( *a.load( oLoad ), i + 1 );
557 for ( integral_type i = aSize; i > 1; --i ) {
558 integral_type * p = a.load();
560 EXPECT_EQ( a.fetch_sub( 1, order ), p );
561 EXPECT_EQ( *a.load( oLoad ), i - 1 );
565 template <typename Integral, bool Volatile>
566 void test_atomic_pointer_for()
568 typedef Integral integral_type;
569 typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type atomic_pointer;
571 integral_type arr[8];
572 const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
573 for ( integral_type i = 0; i < aSize; ++i ) {
574 arr[size_t(i)] = i + 1;
581 EXPECT_EQ( *a.load(), 1 );
584 EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5 ));
585 EXPECT_EQ( p, arr + 0 );
587 EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3 ));
588 EXPECT_EQ( p, arr + 5 );
591 EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3 ));
592 EXPECT_EQ( p, arr + 5 );
594 EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5 ));
595 EXPECT_EQ( p, arr + 3 );
598 EXPECT_EQ( a.exchange( arr ), arr + 3 );
599 EXPECT_EQ( a.load(), arr );
600 EXPECT_EQ( *a.load(), 1 );
602 for ( integral_type i = 1; i < aSize; ++i ) {
603 integral_type * p = a.load();
605 integral_type * pa = a.fetch_add( 1 );
607 EXPECT_EQ( *a.load(), i + 1 );
610 for ( integral_type i = aSize; i > 1; --i ) {
611 integral_type * p = a.load();
613 EXPECT_EQ( a.fetch_sub( 1 ), p );
614 EXPECT_EQ( *a.load(), i - 1 );
617 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
618 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
619 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
620 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
621 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
625 void test_atomic_flag()
627 // Array to test different alignment
629 atomics::atomic_flag flags[8];
630 for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
631 do_test_atomic_flag( flags[i] );
634 void test_atomic_flag_volatile()
636 // Array to test different alignment
638 atomics::atomic_flag volatile flags[8];
639 for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
640 do_test_atomic_flag( flags[i] );
643 template <typename AtomicBool>
644 void test_atomic_bool_()
646 // Array to test different alignment
649 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
650 do_test_atomic_bool( a[i] );
652 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
653 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
654 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
655 do_test_atomic_bool( a[i], atomics::memory_order_release );
656 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
657 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
661 void test_atomic_bool()
663 test_atomic_bool_< atomics::atomic<bool> >();
665 void test_atomic_bool_volatile()
667 test_atomic_bool_< atomics::atomic<bool> volatile >();
671 TEST_F( cxx11_atomic_class, atomic_char )
673 test_atomic_integral<char>();
676 TEST_F( cxx11_atomic_class, atomic_signed_char )
678 test_atomic_integral<signed char>();
681 TEST_F( cxx11_atomic_class, atomic_unsigned_char )
683 test_atomic_integral<unsigned char>();
686 TEST_F( cxx11_atomic_class, atomic_short_int )
688 test_atomic_integral<short int>();
691 TEST_F( cxx11_atomic_class, atomic_signed_short_int )
693 test_atomic_integral<signed short int>();
696 TEST_F( cxx11_atomic_class, atomic_unsigned_short_int )
698 test_atomic_integral<unsigned short int>();
701 TEST_F( cxx11_atomic_class, atomic_int )
703 test_atomic_integral<int>();
706 TEST_F( cxx11_atomic_class, atomic_unsigned_int )
708 test_atomic_integral<unsigned int>();
711 TEST_F( cxx11_atomic_class, atomic_long )
713 test_atomic_integral<long>();
716 TEST_F( cxx11_atomic_class, atomic_unsigned_long )
718 test_atomic_integral<unsigned long>();
721 TEST_F( cxx11_atomic_class, atomic_long_long )
723 test_atomic_integral<long long>();
726 TEST_F( cxx11_atomic_class, atomic_unsigned_long_long )
728 test_atomic_integral<unsigned long long>();
731 TEST_F( cxx11_atomic_class, atomic_char_volatile )
733 test_atomic_integral_volatile<char>();
736 TEST_F( cxx11_atomic_class, atomic_signed_char_volatile )
738 test_atomic_integral_volatile<signed char>();
741 TEST_F( cxx11_atomic_class, atomic_unsigned_char_volatile )
743 test_atomic_integral_volatile<unsigned char>();
746 TEST_F( cxx11_atomic_class, atomic_short_int_volatile )
748 test_atomic_integral_volatile<short int>();
751 TEST_F( cxx11_atomic_class, atomic_signed_short_int_volatile )
753 test_atomic_integral_volatile<signed short int>();
756 TEST_F( cxx11_atomic_class, atomic_unsigned_short_int_volatile )
758 test_atomic_integral_volatile<unsigned short int>();
761 TEST_F( cxx11_atomic_class, atomic_int_volatile )
763 test_atomic_integral_volatile<int>();
766 TEST_F( cxx11_atomic_class, atomic_unsigned_int_volatile )
768 test_atomic_integral_volatile<unsigned int>();
771 TEST_F( cxx11_atomic_class, atomic_long_volatile )
773 test_atomic_integral_volatile<long>();
776 TEST_F( cxx11_atomic_class, atomic_unsigned_long_volatile )
778 test_atomic_integral_volatile<unsigned long>();
781 TEST_F( cxx11_atomic_class, atomic_long_long_volatile )
783 test_atomic_integral_volatile<long long>();
786 TEST_F( cxx11_atomic_class, atomic_unsigned_long_long_volatile )
788 test_atomic_integral_volatile<unsigned long long>();
791 #if !( CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 40000 )
792 //clang error with atomic<void*> fetch_add/fetch_sub
793 TEST_F( cxx11_atomic_class, atomic_pointer_void )
795 do_test_atomic_pointer_void<false>();
798 TEST_F( cxx11_atomic_class, atomic_pointer_void_volatile )
800 do_test_atomic_pointer_void<true>();
804 TEST_F( cxx11_atomic_class, atomic_pointer_char )
806 test_atomic_pointer_for<char, false>();
809 TEST_F( cxx11_atomic_class, atomic_pointer_char_volatile )
811 test_atomic_pointer_for<char, true>();
814 TEST_F( cxx11_atomic_class, atomic_pointer_short )
816 test_atomic_pointer_for<short int, false>();
819 TEST_F( cxx11_atomic_class, atomic_pointer_short_volatile )
821 test_atomic_pointer_for<short int, true>();
824 TEST_F( cxx11_atomic_class, atomic_pointer_int )
826 test_atomic_pointer_for<int, false>();
829 TEST_F( cxx11_atomic_class, atomic_pointer_int_volatile )
831 test_atomic_pointer_for<int, true>();
834 TEST_F( cxx11_atomic_class, atomic_pointer_long )
836 test_atomic_pointer_for<long, false>();
839 TEST_F( cxx11_atomic_class, atomic_pointer_long_volatile )
841 test_atomic_pointer_for<long, true>();
844 TEST_F( cxx11_atomic_class, atomic_pointer_long_long )
846 test_atomic_pointer_for<long long, false>();
849 TEST_F( cxx11_atomic_class, atomic_pointer_long_long_volatile )
851 test_atomic_pointer_for<long long, true>();