2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <gtest/gtest.h>
32 #include <cds/algo/atomic.h>
33 #include "cxx11_convert_memory_order.h"
35 #define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free() )
38 class cxx11_atomic_class: public ::testing::Test
41 template <typename AtomicFlag>
42 void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
44 atomics::memory_order mo_clear = convert_to_store_order(order);
45 for ( int i = 0; i < 5; ++i ) {
46 EXPECT_TRUE( !f.test_and_set( order ));
47 EXPECT_TRUE( f.test_and_set( order ));
52 template <typename AtomicFlag>
53 void do_test_atomic_flag( AtomicFlag& f)
57 for ( int i = 0; i < 5; ++i ) {
58 EXPECT_TRUE( !f.test_and_set());
59 EXPECT_TRUE( f.test_and_set());
63 do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
64 //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
65 do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
66 do_test_atomic_flag_mo( f, atomics::memory_order_release );
67 do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
68 do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
71 template <class Atomic, typename Integral>
72 void do_test_atomic_type(Atomic& a)
74 typedef Integral integral_type;
76 EXPECT_ATOMIC_IS_LOCK_FREE( a );
77 a.store( (integral_type) 0 );
78 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
80 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
81 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
82 EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
83 EXPECT_EQ( a.load(), n );
84 EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
85 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
88 integral_type prev = a.load();
89 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
90 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
91 integral_type expected = prev;
93 EXPECT_TRUE( a.compare_exchange_weak( expected, n));
94 EXPECT_EQ( expected, prev );
95 EXPECT_FALSE( a.compare_exchange_weak( expected, n));
96 EXPECT_EQ( expected, n );
99 EXPECT_EQ( a.load(), n );
102 a = (integral_type) 0;
105 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
106 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
107 integral_type expected = prev;
109 EXPECT_TRUE( a.compare_exchange_strong( expected, n));
110 EXPECT_EQ( expected, prev );
111 EXPECT_FALSE( a.compare_exchange_strong( expected, n));
112 EXPECT_EQ( expected, n );
115 EXPECT_EQ( a.load(), n );
118 EXPECT_EQ( a.exchange( (integral_type) 0 ), prev );
121 template <class Atomic, typename Integral>
122 void do_test_atomic_integral(Atomic& a)
124 do_test_atomic_type< Atomic, Integral >(a);
126 typedef Integral integral_type;
129 a.store( (integral_type) 0 );
132 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
134 integral_type prev = a.load();
135 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
137 EXPECT_EQ( a.fetch_add(n), prev);
141 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
143 integral_type prev = a.load();
144 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
146 EXPECT_EQ( a.fetch_sub(n), prev);
148 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
150 // fetch_or / fetc_xor / fetch_and
151 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
153 integral_type prev = a.load() ;;
154 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
156 EXPECT_EQ( a.fetch_or( mask ), prev );
158 EXPECT_EQ( ( prev & mask), mask);
160 EXPECT_EQ( a.fetch_and( (integral_type) ~mask ), prev );
162 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
164 EXPECT_EQ( a.fetch_xor( mask ), prev );
166 EXPECT_EQ( integral_type( prev & mask), mask);
168 EXPECT_EQ( a.load(), (integral_type) -1 );
172 a = (integral_type) 0;
175 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
177 integral_type prev = a;
178 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
180 EXPECT_EQ( (a += n), (prev + n));
184 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
186 integral_type prev = a;
187 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
189 EXPECT_EQ( (a -= n), prev - n );
191 EXPECT_EQ( a.load(), (integral_type) 0 );
194 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
196 integral_type prev = a;
197 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
199 EXPECT_EQ( (a |= mask ), (prev | mask ));
201 EXPECT_EQ( ( prev & mask), mask);
203 EXPECT_EQ( (a &= (integral_type) ~mask ), ( prev & (integral_type) ~mask ));
205 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
207 EXPECT_EQ( (a ^= mask ), (prev ^ mask ));
209 EXPECT_EQ( ( prev & mask), mask);
211 EXPECT_EQ( a.load(), (integral_type) -1 );
214 template <class Atomic, typename Integral>
215 void do_test_atomic_type( Atomic& a, atomics::memory_order order )
217 typedef Integral integral_type;
219 const atomics::memory_order oLoad = convert_to_load_order( order );
220 const atomics::memory_order oStore = convert_to_store_order( order );
222 EXPECT_ATOMIC_IS_LOCK_FREE( a );
223 a.store((integral_type) 0, oStore );
224 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
226 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
227 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
228 EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
229 EXPECT_EQ( a.load( oLoad ), n );
230 EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
231 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
234 integral_type prev = a.load( oLoad );
235 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
236 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
237 integral_type expected = prev;
239 EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
240 EXPECT_EQ( expected, prev );
241 EXPECT_FALSE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
242 EXPECT_EQ( expected, n );
245 EXPECT_EQ( a.load( oLoad ), n );
248 a.store( (integral_type) 0, oStore );
250 prev = a.load( oLoad );
251 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
252 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
253 integral_type expected = prev;
255 EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
256 EXPECT_EQ( expected, prev );
257 EXPECT_FALSE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
258 EXPECT_EQ( expected, n );
261 EXPECT_EQ( a.load( oLoad ), n );
264 EXPECT_EQ( a.exchange( (integral_type) 0, order ), prev );
267 template <class Atomic, typename Integral>
268 void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
270 do_test_atomic_type< Atomic, Integral >( a, order );
272 typedef Integral integral_type;
274 const atomics::memory_order oLoad = convert_to_load_order( order );
275 const atomics::memory_order oStore = convert_to_store_order( order );
278 a.store( (integral_type) 0, oStore );
281 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
283 integral_type prev = a.load( oLoad );
284 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
286 EXPECT_EQ( a.fetch_add( n, order), prev);
290 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
292 integral_type prev = a.load( oLoad );
293 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
295 EXPECT_EQ( a.fetch_sub( n, order ), prev);
297 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
299 // fetch_or / fetc_xor / fetch_and
300 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
302 integral_type prev = a.load( oLoad ) ;;
303 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
305 EXPECT_EQ( a.fetch_or( mask, order ), prev );
306 prev = a.load( oLoad );
307 EXPECT_EQ( ( prev & mask), mask);
309 EXPECT_EQ( a.fetch_and( (integral_type) ~mask, order ), prev );
310 prev = a.load( oLoad );
311 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
313 EXPECT_EQ( a.fetch_xor( mask, order ), prev );
314 prev = a.load( oLoad );
315 EXPECT_EQ( ( prev & mask), mask);
317 EXPECT_EQ( a.load( oLoad ), (integral_type) -1 );
322 template <typename Atomic, typename Integral>
323 void test_atomic_integral_(Atomic& a)
325 do_test_atomic_integral<Atomic, Integral >(a);
327 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
328 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
329 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
330 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
331 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
334 template <typename Integral>
335 void test_atomic_integral()
337 typedef atomics::atomic<Integral> atomic_type;
340 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
341 test_atomic_integral_<atomic_type, Integral>( a[i] );
344 template <typename Integral>
345 void test_atomic_integral_volatile()
347 typedef atomics::atomic<Integral> volatile atomic_type;
350 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
351 test_atomic_integral_<atomic_type, Integral>( a[i] );
355 template <class AtomicBool>
356 void do_test_atomic_bool( AtomicBool& a )
358 EXPECT_ATOMIC_IS_LOCK_FREE( a );
361 EXPECT_FALSE( a.load());
363 EXPECT_FALSE( a.exchange( true ));
364 EXPECT_TRUE( a.load());
365 EXPECT_TRUE( a.exchange( false ));
366 EXPECT_FALSE( a.load());
368 bool expected = false;
369 EXPECT_TRUE( a.compare_exchange_weak( expected, true));
370 EXPECT_FALSE( expected );
371 EXPECT_FALSE( a.compare_exchange_weak( expected, false));
372 EXPECT_TRUE( expected );
373 EXPECT_TRUE( a.load());
378 EXPECT_TRUE( a.compare_exchange_strong( expected, true));
379 EXPECT_FALSE( expected );
380 EXPECT_FALSE( a.compare_exchange_strong( expected, false));
381 EXPECT_TRUE( expected );
383 EXPECT_TRUE( a.load());
385 EXPECT_TRUE( a.exchange( false ));
388 template <class AtomicBool>
389 void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
391 const atomics::memory_order oLoad = convert_to_load_order( order );
392 const atomics::memory_order oStore = convert_to_store_order( order );
393 const atomics::memory_order oExchange = convert_to_exchange_order( order );
395 EXPECT_ATOMIC_IS_LOCK_FREE( a );
396 a.store( false, oStore );
398 EXPECT_FALSE( a.load( oLoad ));
400 EXPECT_FALSE( a.exchange( true, oExchange ));
401 EXPECT_TRUE( a.load( oLoad ));
402 EXPECT_TRUE( a.exchange( false, oExchange ));
403 EXPECT_FALSE( a.load( oLoad ));
405 bool expected = false;
406 EXPECT_TRUE( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
407 EXPECT_FALSE( expected );
408 EXPECT_FALSE( a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
409 EXPECT_TRUE( expected );
410 EXPECT_TRUE( a.load( oLoad ));
413 a.store( false, oStore );
416 EXPECT_TRUE( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
417 EXPECT_FALSE( expected );
418 EXPECT_FALSE( a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
419 EXPECT_TRUE( expected );
421 EXPECT_TRUE( a.load( oLoad ));
423 EXPECT_TRUE( a.exchange( false, oExchange ));
427 template <typename Atomic>
428 void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
430 atomics::memory_order oLoad = convert_to_load_order(order);
431 atomics::memory_order oStore = convert_to_store_order(order);
434 a.store( (void *) arr, oStore );
435 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
438 EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
439 EXPECT_EQ( p, arr + 0 );
440 EXPECT_EQ( *reinterpret_cast<char *>(p), 1 );
441 EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
442 EXPECT_EQ( p, arr + 5 );
443 EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
445 EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
446 EXPECT_EQ( p, arr + 5 );
447 EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
448 EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
449 EXPECT_EQ( p, arr + 3 );
450 EXPECT_EQ( *reinterpret_cast<char *>(p), 4 );
452 EXPECT_EQ( reinterpret_cast<char *>(a.exchange( (void *) arr, order )), arr + 3 );
453 EXPECT_EQ( reinterpret_cast<char *>(a.load( oLoad )), arr );
454 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
457 template <bool Volatile>
458 void do_test_atomic_pointer_void()
460 typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type atomic_pointer;
463 const char aSize = sizeof(arr)/sizeof(arr[0]);
464 for ( char i = 0; i < aSize; ++i ) {
465 arr[static_cast<unsigned>( i )] = i + 1;
471 a.store( (void *) arr );
472 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), 1 );
475 EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5)));
476 EXPECT_EQ( p, arr + 0 );
477 EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3)));
478 EXPECT_EQ( p, arr + 5 );
480 EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3)));
481 EXPECT_EQ( p, arr + 5 );
482 EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5)));
483 EXPECT_EQ( p, arr + 3 );
485 EXPECT_EQ( reinterpret_cast<char *>( a.exchange( (void *) arr )), arr + 3 );
486 EXPECT_EQ( reinterpret_cast<char *>( a.load()), arr );
487 EXPECT_EQ( *reinterpret_cast<char *>( a.load()), 1 );
489 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
490 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
491 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
492 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
493 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
496 template <typename Atomic, typename Integral>
497 void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
499 typedef Integral integral_type;
500 atomics::memory_order oLoad = convert_to_load_order(order);
501 atomics::memory_order oStore = convert_to_store_order(order);
504 a.store( arr, oStore );
505 EXPECT_EQ( *a.load( oLoad ), 1 );
508 EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
509 EXPECT_EQ( p, arr + 0 );
511 EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
512 EXPECT_EQ( p, arr + 5 );
515 EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
516 EXPECT_EQ( p, arr + 5 );
518 EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
519 EXPECT_EQ( p, arr + 3 );
522 EXPECT_EQ( a.exchange( arr, order ), arr + 3 );
523 EXPECT_EQ( a.load( oLoad ), arr );
524 EXPECT_EQ( *a.load( oLoad ), 1 );
526 for ( integral_type i = 1; i < aSize; ++i ) {
527 integral_type * p = a.load();
529 EXPECT_EQ( a.fetch_add( 1, order ), p );
530 EXPECT_EQ( *a.load( oLoad ), i + 1 );
533 for ( integral_type i = aSize; i > 1; --i ) {
534 integral_type * p = a.load();
536 EXPECT_EQ( a.fetch_sub( 1, order ), p );
537 EXPECT_EQ( *a.load( oLoad ), i - 1 );
541 template <typename Integral, bool Volatile>
542 void test_atomic_pointer_for()
544 typedef Integral integral_type;
545 typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type atomic_pointer;
547 integral_type arr[8];
548 const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
549 for ( integral_type i = 0; i < aSize; ++i ) {
550 arr[static_cast<size_t>(i)] = i + 1;
557 EXPECT_EQ( *a.load(), 1 );
560 EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5 ));
561 EXPECT_EQ( p, arr + 0 );
563 EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3 ));
564 EXPECT_EQ( p, arr + 5 );
567 EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3 ));
568 EXPECT_EQ( p, arr + 5 );
570 EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5 ));
571 EXPECT_EQ( p, arr + 3 );
574 EXPECT_EQ( a.exchange( arr ), arr + 3 );
575 EXPECT_EQ( a.load(), arr );
576 EXPECT_EQ( *a.load(), 1 );
578 for ( integral_type i = 1; i < aSize; ++i ) {
579 integral_type * p = a.load();
581 integral_type * pa = a.fetch_add( 1 );
583 EXPECT_EQ( *a.load(), i + 1 );
586 for ( integral_type i = aSize; i > 1; --i ) {
587 integral_type * p = a.load();
589 EXPECT_EQ( a.fetch_sub( 1 ), p );
590 EXPECT_EQ( *a.load(), i - 1 );
593 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
594 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
595 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
596 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
597 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
601 void test_atomic_flag()
603 // Array to test different alignment
605 atomics::atomic_flag flags[8];
606 for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
607 do_test_atomic_flag( flags[i] );
610 void test_atomic_flag_volatile()
612 // Array to test different alignment
614 atomics::atomic_flag volatile flags[8];
615 for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
616 do_test_atomic_flag( flags[i] );
619 template <typename AtomicBool>
620 void test_atomic_bool_()
622 // Array to test different alignment
625 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
626 do_test_atomic_bool( a[i] );
628 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
629 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
630 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
631 do_test_atomic_bool( a[i], atomics::memory_order_release );
632 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
633 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
637 void test_atomic_bool()
639 test_atomic_bool_< atomics::atomic<bool> >();
641 void test_atomic_bool_volatile()
643 test_atomic_bool_< atomics::atomic<bool> volatile >();
647 TEST_F( cxx11_atomic_class, atomic_char )
649 test_atomic_integral<char>();
652 TEST_F( cxx11_atomic_class, atomic_signed_char )
654 test_atomic_integral<signed char>();
657 TEST_F( cxx11_atomic_class, atomic_unsigned_char )
659 test_atomic_integral<unsigned char>();
662 TEST_F( cxx11_atomic_class, atomic_short_int )
664 test_atomic_integral<short int>();
667 TEST_F( cxx11_atomic_class, atomic_signed_short_int )
669 test_atomic_integral<signed short int>();
672 TEST_F( cxx11_atomic_class, atomic_unsigned_short_int )
674 test_atomic_integral<unsigned short int>();
677 TEST_F( cxx11_atomic_class, atomic_int )
679 test_atomic_integral<int>();
682 TEST_F( cxx11_atomic_class, atomic_unsigned_int )
684 test_atomic_integral<unsigned int>();
687 TEST_F( cxx11_atomic_class, atomic_long )
689 test_atomic_integral<long>();
692 TEST_F( cxx11_atomic_class, atomic_unsigned_long )
694 test_atomic_integral<unsigned long>();
697 TEST_F( cxx11_atomic_class, atomic_long_long )
699 test_atomic_integral<long long>();
702 TEST_F( cxx11_atomic_class, atomic_unsigned_long_long )
704 test_atomic_integral<unsigned long long>();
707 TEST_F( cxx11_atomic_class, atomic_char_volatile )
709 test_atomic_integral_volatile<char>();
712 TEST_F( cxx11_atomic_class, atomic_signed_char_volatile )
714 test_atomic_integral_volatile<signed char>();
717 TEST_F( cxx11_atomic_class, atomic_unsigned_char_volatile )
719 test_atomic_integral_volatile<unsigned char>();
722 TEST_F( cxx11_atomic_class, atomic_short_int_volatile )
724 test_atomic_integral_volatile<short int>();
727 TEST_F( cxx11_atomic_class, atomic_signed_short_int_volatile )
729 test_atomic_integral_volatile<signed short int>();
732 TEST_F( cxx11_atomic_class, atomic_unsigned_short_int_volatile )
734 test_atomic_integral_volatile<unsigned short int>();
737 TEST_F( cxx11_atomic_class, atomic_int_volatile )
739 test_atomic_integral_volatile<int>();
742 TEST_F( cxx11_atomic_class, atomic_unsigned_int_volatile )
744 test_atomic_integral_volatile<unsigned int>();
747 TEST_F( cxx11_atomic_class, atomic_long_volatile )
749 test_atomic_integral_volatile<long>();
752 TEST_F( cxx11_atomic_class, atomic_unsigned_long_volatile )
754 test_atomic_integral_volatile<unsigned long>();
757 TEST_F( cxx11_atomic_class, atomic_long_long_volatile )
759 test_atomic_integral_volatile<long long>();
762 TEST_F( cxx11_atomic_class, atomic_unsigned_long_long_volatile )
764 test_atomic_integral_volatile<unsigned long long>();
767 TEST_F( cxx11_atomic_class, atomic_pointer_void )
769 do_test_atomic_pointer_void<false>();
772 TEST_F( cxx11_atomic_class, atomic_pointer_void_volatile )
774 do_test_atomic_pointer_void<true>();
777 TEST_F( cxx11_atomic_class, atomic_pointer_char )
779 test_atomic_pointer_for<char, false>();
782 TEST_F( cxx11_atomic_class, atomic_pointer_char_volatile )
784 test_atomic_pointer_for<char, true>();
787 TEST_F( cxx11_atomic_class, atomic_pointer_short )
789 test_atomic_pointer_for<short int, false>();
792 TEST_F( cxx11_atomic_class, atomic_pointer_short_volatile )
794 test_atomic_pointer_for<short int, true>();
797 TEST_F( cxx11_atomic_class, atomic_pointer_int )
799 test_atomic_pointer_for<int, false>();
802 TEST_F( cxx11_atomic_class, atomic_pointer_int_volatile )
804 test_atomic_pointer_for<int, true>();
807 TEST_F( cxx11_atomic_class, atomic_pointer_long )
809 test_atomic_pointer_for<long, false>();
812 TEST_F( cxx11_atomic_class, atomic_pointer_long_volatile )
814 test_atomic_pointer_for<long, true>();
817 TEST_F( cxx11_atomic_class, atomic_pointer_long_long )
819 test_atomic_pointer_for<long long, false>();
822 TEST_F( cxx11_atomic_class, atomic_pointer_long_long_volatile )
824 test_atomic_pointer_for<long long, true>();