2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <cds_test/ext_gtest.h>
32 #include <cds/algo/atomic.h>
33 #include "cxx11_convert_memory_order.h"
35 #define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free())
38 class cxx11_atomic_class: public ::testing::Test
41 template <typename AtomicFlag>
42 void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
44 atomics::memory_order mo_clear = convert_to_store_order(order);
45 for ( int i = 0; i < 5; ++i ) {
46 EXPECT_TRUE( !f.test_and_set( order ));
47 EXPECT_TRUE( f.test_and_set( order ));
52 template <typename AtomicFlag>
53 void do_test_atomic_flag( AtomicFlag& f)
57 for ( int i = 0; i < 5; ++i ) {
58 EXPECT_TRUE( !f.test_and_set());
59 EXPECT_TRUE( f.test_and_set());
63 do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
64 //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
65 do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
66 do_test_atomic_flag_mo( f, atomics::memory_order_release );
67 do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
68 do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
71 template <class Atomic, typename Integral>
72 void do_test_atomic_type(Atomic& a)
74 typedef Integral integral_type;
76 EXPECT_ATOMIC_IS_LOCK_FREE( a );
77 a.store( (integral_type) 0 );
78 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
80 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
81 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
82 EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
83 EXPECT_EQ( a.load(), n );
84 EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
85 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
88 integral_type prev = a.load();
89 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
90 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
91 integral_type expected = prev;
93 EXPECT_TRUE( a.compare_exchange_weak( expected, n));
94 EXPECT_EQ( expected, prev );
95 EXPECT_FALSE( a.compare_exchange_weak( expected, n));
96 EXPECT_EQ( expected, n );
99 EXPECT_EQ( a.load(), n );
102 a = (integral_type) 0;
105 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
106 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
107 integral_type expected = prev;
109 EXPECT_TRUE( a.compare_exchange_strong( expected, n));
110 EXPECT_EQ( expected, prev );
111 EXPECT_FALSE( a.compare_exchange_strong( expected, n));
112 EXPECT_EQ( expected, n );
115 EXPECT_EQ( a.load(), n );
118 EXPECT_EQ( a.exchange( (integral_type) 0 ), prev );
121 template <class Atomic, typename Integral>
122 void do_test_atomic_integral(Atomic& a)
124 do_test_atomic_type< Atomic, Integral >(a);
126 typedef Integral integral_type;
129 a.store( (integral_type) 0 );
132 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
134 integral_type prev = a.load();
135 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
137 EXPECT_EQ( a.fetch_add(n), prev);
141 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
143 integral_type prev = a.load();
144 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
146 EXPECT_EQ( a.fetch_sub(n), prev);
148 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
150 // fetch_or / fetc_xor / fetch_and
151 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
153 integral_type prev = a.load() ;;
154 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
156 EXPECT_EQ( a.fetch_or( mask ), prev );
158 EXPECT_EQ( ( prev & mask), mask);
160 EXPECT_EQ( a.fetch_and( (integral_type) ~mask ), prev );
162 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
164 EXPECT_EQ( a.fetch_xor( mask ), prev );
166 EXPECT_EQ( integral_type( prev & mask), mask);
168 EXPECT_EQ( a.load(), (integral_type) -1 );
172 a = (integral_type) 0;
175 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
177 integral_type prev = a;
178 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
180 EXPECT_EQ( (a += n), (prev + n));
184 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
186 integral_type prev = a;
187 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
189 EXPECT_EQ( (a -= n), prev - n );
191 EXPECT_EQ( a.load(), (integral_type) 0 );
194 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
196 integral_type prev = a;
197 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
199 EXPECT_EQ( (a |= mask ), (prev | mask ));
201 EXPECT_EQ( ( prev & mask), mask);
203 EXPECT_EQ( (a &= (integral_type) ~mask ), ( prev & (integral_type) ~mask ));
205 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
207 EXPECT_EQ( (a ^= mask ), (prev ^ mask ));
209 EXPECT_EQ( ( prev & mask), mask);
211 EXPECT_EQ( a.load(), (integral_type) -1 );
214 template <class Atomic, typename Integral>
215 void do_test_atomic_type( Atomic& a, atomics::memory_order order )
217 typedef Integral integral_type;
219 const atomics::memory_order oLoad = convert_to_load_order( order );
220 const atomics::memory_order oStore = convert_to_store_order( order );
222 EXPECT_ATOMIC_IS_LOCK_FREE( a );
223 a.store((integral_type) 0, oStore );
224 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
226 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
227 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
228 EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
229 EXPECT_EQ( a.load( oLoad ), n );
230 EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
231 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
234 integral_type prev = a.load( oLoad );
235 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
236 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
237 integral_type expected = prev;
239 EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
240 EXPECT_EQ( expected, prev );
241 EXPECT_FALSE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
242 EXPECT_EQ( expected, n );
245 EXPECT_EQ( a.load( oLoad ), n );
248 a.store( (integral_type) 0, oStore );
250 prev = a.load( oLoad );
251 for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
252 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
253 integral_type expected = prev;
255 EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
256 EXPECT_EQ( expected, prev );
257 EXPECT_FALSE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
258 EXPECT_EQ( expected, n );
261 EXPECT_EQ( a.load( oLoad ), n );
264 EXPECT_EQ( a.exchange( (integral_type) 0, order ), prev );
267 template <class Atomic, typename Integral>
268 void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
270 do_test_atomic_type< Atomic, Integral >( a, order );
272 typedef Integral integral_type;
274 const atomics::memory_order oLoad = convert_to_load_order( order );
275 const atomics::memory_order oStore = convert_to_store_order( order );
278 a.store( (integral_type) 0, oStore );
281 for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
283 integral_type prev = a.load( oLoad );
284 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
286 EXPECT_EQ( a.fetch_add( n, order), prev);
290 for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
292 integral_type prev = a.load( oLoad );
293 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
295 EXPECT_EQ( a.fetch_sub( n, order ), prev);
297 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
299 // fetch_or / fetc_xor / fetch_and
300 for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
302 integral_type prev = a.load( oLoad ) ;;
303 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
305 EXPECT_EQ( a.fetch_or( mask, order ), prev );
306 prev = a.load( oLoad );
307 EXPECT_EQ( ( prev & mask), mask);
309 EXPECT_EQ( a.fetch_and( (integral_type) ~mask, order ), prev );
310 prev = a.load( oLoad );
311 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
313 EXPECT_EQ( a.fetch_xor( mask, order ), prev );
314 prev = a.load( oLoad );
315 EXPECT_EQ( ( prev & mask), mask);
317 EXPECT_EQ( a.load( oLoad ), (integral_type) -1 );
322 template <typename Atomic, typename Integral>
323 void test_atomic_integral_(Atomic& a)
325 do_test_atomic_integral<Atomic, Integral >(a);
327 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
328 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
329 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
330 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
331 do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
334 template <typename Integral>
335 void test_atomic_integral()
337 typedef atomics::atomic<Integral> atomic_type;
340 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
341 test_atomic_integral_<atomic_type, Integral>( a[i] );
344 template <typename Integral>
345 void test_atomic_integral_volatile()
347 typedef atomics::atomic<Integral> volatile atomic_type;
350 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
351 test_atomic_integral_<atomic_type, Integral>( a[i] );
355 template <class AtomicBool>
356 void do_test_atomic_bool( AtomicBool& a )
358 EXPECT_ATOMIC_IS_LOCK_FREE( a );
361 EXPECT_FALSE( a.load());
363 EXPECT_FALSE( a.exchange( true ));
364 EXPECT_TRUE( a.load());
365 EXPECT_TRUE( a.exchange( false ));
366 EXPECT_FALSE( a.load());
368 bool expected = false;
369 EXPECT_TRUE( a.compare_exchange_weak( expected, true));
370 EXPECT_FALSE( expected );
371 EXPECT_FALSE( a.compare_exchange_weak( expected, false));
372 EXPECT_TRUE( expected );
373 EXPECT_TRUE( a.load());
378 EXPECT_TRUE( a.compare_exchange_strong( expected, true));
379 EXPECT_FALSE( expected );
380 EXPECT_FALSE( a.compare_exchange_strong( expected, false));
381 EXPECT_TRUE( expected );
383 EXPECT_TRUE( a.load());
385 EXPECT_TRUE( a.exchange( false ));
388 template <class AtomicBool>
389 void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
391 const atomics::memory_order oLoad = convert_to_load_order( order );
392 const atomics::memory_order oStore = convert_to_store_order( order );
393 const atomics::memory_order oExchange = convert_to_exchange_order( order );
395 EXPECT_ATOMIC_IS_LOCK_FREE( a );
396 a.store( false, oStore );
398 EXPECT_FALSE( a.load( oLoad ));
400 EXPECT_FALSE( a.exchange( true, oExchange ));
401 EXPECT_TRUE( a.load( oLoad ));
402 EXPECT_TRUE( a.exchange( false, oExchange ));
403 EXPECT_FALSE( a.load( oLoad ));
405 bool expected = false;
406 EXPECT_TRUE( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
407 EXPECT_FALSE( expected );
408 EXPECT_FALSE( a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
409 EXPECT_TRUE( expected );
410 EXPECT_TRUE( a.load( oLoad ));
413 a.store( false, oStore );
416 EXPECT_TRUE( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
417 EXPECT_FALSE( expected );
418 EXPECT_FALSE( a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
419 EXPECT_TRUE( expected );
421 EXPECT_TRUE( a.load( oLoad ));
423 EXPECT_TRUE( a.exchange( false, oExchange ));
427 template <typename Atomic>
428 void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
432 atomics::memory_order oLoad = convert_to_load_order(order);
433 atomics::memory_order oStore = convert_to_store_order(order);
436 a.store( (void *) arr, oStore );
437 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
440 EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
441 EXPECT_EQ( p, arr + 0 );
442 EXPECT_EQ( *reinterpret_cast<char *>(p), 1 );
443 EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
444 EXPECT_EQ( p, arr + 5 );
445 EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
447 EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
448 EXPECT_EQ( p, arr + 5 );
449 EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
450 EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
451 EXPECT_EQ( p, arr + 3 );
452 EXPECT_EQ( *reinterpret_cast<char *>(p), 4 );
454 EXPECT_EQ( reinterpret_cast<char *>(a.exchange( (void *) arr, order )), arr + 3 );
455 EXPECT_EQ( reinterpret_cast<char *>(a.load( oLoad )), arr );
456 EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
459 template <bool Volatile>
460 void do_test_atomic_pointer_void()
462 typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type atomic_pointer;
465 const char aSize = sizeof(arr)/sizeof(arr[0]);
466 for ( char i = 0; i < aSize; ++i ) {
467 arr[static_cast<unsigned>( i )] = i + 1;
473 a.store( (void *) arr );
474 EXPECT_EQ( *reinterpret_cast<char *>(a.load()), 1 );
477 EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5)));
478 EXPECT_EQ( p, arr + 0 );
479 EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3)));
480 EXPECT_EQ( p, arr + 5 );
482 EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3)));
483 EXPECT_EQ( p, arr + 5 );
484 EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5)));
485 EXPECT_EQ( p, arr + 3 );
487 EXPECT_EQ( reinterpret_cast<char *>( a.exchange( (void *) arr )), arr + 3 );
488 EXPECT_EQ( reinterpret_cast<char *>( a.load()), arr );
489 EXPECT_EQ( *reinterpret_cast<char *>( a.load()), 1 );
491 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
492 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
493 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
494 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
495 do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
498 template <typename Atomic, typename Integral>
499 void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
501 typedef Integral integral_type;
502 atomics::memory_order oLoad = convert_to_load_order(order);
503 atomics::memory_order oStore = convert_to_store_order(order);
506 a.store( arr, oStore );
507 EXPECT_EQ( *a.load( oLoad ), 1 );
510 EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
511 EXPECT_EQ( p, arr + 0 );
513 EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
514 EXPECT_EQ( p, arr + 5 );
517 EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
518 EXPECT_EQ( p, arr + 5 );
520 EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
521 EXPECT_EQ( p, arr + 3 );
524 EXPECT_EQ( a.exchange( arr, order ), arr + 3 );
525 EXPECT_EQ( a.load( oLoad ), arr );
526 EXPECT_EQ( *a.load( oLoad ), 1 );
528 for ( integral_type i = 1; i < aSize; ++i ) {
529 integral_type * p = a.load();
531 EXPECT_EQ( a.fetch_add( 1, order ), p );
532 EXPECT_EQ( *a.load( oLoad ), i + 1 );
535 for ( integral_type i = aSize; i > 1; --i ) {
536 integral_type * p = a.load();
538 EXPECT_EQ( a.fetch_sub( 1, order ), p );
539 EXPECT_EQ( *a.load( oLoad ), i - 1 );
543 template <typename Integral, bool Volatile>
544 void test_atomic_pointer_for()
546 typedef Integral integral_type;
547 typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type atomic_pointer;
549 integral_type arr[8];
550 const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
551 for ( integral_type i = 0; i < aSize; ++i ) {
552 arr[static_cast<size_t>(i)] = i + 1;
559 EXPECT_EQ( *a.load(), 1 );
562 EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5 ));
563 EXPECT_EQ( p, arr + 0 );
565 EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3 ));
566 EXPECT_EQ( p, arr + 5 );
569 EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3 ));
570 EXPECT_EQ( p, arr + 5 );
572 EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5 ));
573 EXPECT_EQ( p, arr + 3 );
576 EXPECT_EQ( a.exchange( arr ), arr + 3 );
577 EXPECT_EQ( a.load(), arr );
578 EXPECT_EQ( *a.load(), 1 );
580 for ( integral_type i = 1; i < aSize; ++i ) {
581 integral_type * p = a.load();
583 integral_type * pa = a.fetch_add( 1 );
585 EXPECT_EQ( *a.load(), i + 1 );
588 for ( integral_type i = aSize; i > 1; --i ) {
589 integral_type * p = a.load();
591 EXPECT_EQ( a.fetch_sub( 1 ), p );
592 EXPECT_EQ( *a.load(), i - 1 );
595 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
596 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
597 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
598 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
599 test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
603 void test_atomic_flag()
605 // Array to test different alignment
607 atomics::atomic_flag flags[8];
608 for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
609 do_test_atomic_flag( flags[i] );
612 void test_atomic_flag_volatile()
614 // Array to test different alignment
616 atomics::atomic_flag volatile flags[8];
617 for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
618 do_test_atomic_flag( flags[i] );
621 template <typename AtomicBool>
622 void test_atomic_bool_()
624 // Array to test different alignment
627 for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
628 do_test_atomic_bool( a[i] );
630 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
631 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
632 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
633 do_test_atomic_bool( a[i], atomics::memory_order_release );
634 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
635 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
639 void test_atomic_bool()
641 test_atomic_bool_< atomics::atomic<bool> >();
643 void test_atomic_bool_volatile()
645 test_atomic_bool_< atomics::atomic<bool> volatile >();
649 TEST_F( cxx11_atomic_class, atomic_char )
651 test_atomic_integral<char>();
654 TEST_F( cxx11_atomic_class, atomic_signed_char )
656 test_atomic_integral<signed char>();
659 TEST_F( cxx11_atomic_class, atomic_unsigned_char )
661 test_atomic_integral<unsigned char>();
664 TEST_F( cxx11_atomic_class, atomic_short_int )
666 test_atomic_integral<short int>();
669 TEST_F( cxx11_atomic_class, atomic_signed_short_int )
671 test_atomic_integral<signed short int>();
674 TEST_F( cxx11_atomic_class, atomic_unsigned_short_int )
676 test_atomic_integral<unsigned short int>();
679 TEST_F( cxx11_atomic_class, atomic_int )
681 test_atomic_integral<int>();
684 TEST_F( cxx11_atomic_class, atomic_unsigned_int )
686 test_atomic_integral<unsigned int>();
689 TEST_F( cxx11_atomic_class, atomic_long )
691 test_atomic_integral<long>();
694 TEST_F( cxx11_atomic_class, atomic_unsigned_long )
696 test_atomic_integral<unsigned long>();
699 TEST_F( cxx11_atomic_class, atomic_long_long )
701 test_atomic_integral<long long>();
704 TEST_F( cxx11_atomic_class, atomic_unsigned_long_long )
706 test_atomic_integral<unsigned long long>();
709 TEST_F( cxx11_atomic_class, atomic_char_volatile )
711 test_atomic_integral_volatile<char>();
714 TEST_F( cxx11_atomic_class, atomic_signed_char_volatile )
716 test_atomic_integral_volatile<signed char>();
719 TEST_F( cxx11_atomic_class, atomic_unsigned_char_volatile )
721 test_atomic_integral_volatile<unsigned char>();
724 TEST_F( cxx11_atomic_class, atomic_short_int_volatile )
726 test_atomic_integral_volatile<short int>();
729 TEST_F( cxx11_atomic_class, atomic_signed_short_int_volatile )
731 test_atomic_integral_volatile<signed short int>();
734 TEST_F( cxx11_atomic_class, atomic_unsigned_short_int_volatile )
736 test_atomic_integral_volatile<unsigned short int>();
739 TEST_F( cxx11_atomic_class, atomic_int_volatile )
741 test_atomic_integral_volatile<int>();
744 TEST_F( cxx11_atomic_class, atomic_unsigned_int_volatile )
746 test_atomic_integral_volatile<unsigned int>();
749 TEST_F( cxx11_atomic_class, atomic_long_volatile )
751 test_atomic_integral_volatile<long>();
754 TEST_F( cxx11_atomic_class, atomic_unsigned_long_volatile )
756 test_atomic_integral_volatile<unsigned long>();
759 TEST_F( cxx11_atomic_class, atomic_long_long_volatile )
761 test_atomic_integral_volatile<long long>();
764 TEST_F( cxx11_atomic_class, atomic_unsigned_long_long_volatile )
766 test_atomic_integral_volatile<unsigned long long>();
769 TEST_F( cxx11_atomic_class, atomic_pointer_void )
771 do_test_atomic_pointer_void<false>();
774 TEST_F( cxx11_atomic_class, atomic_pointer_void_volatile )
776 do_test_atomic_pointer_void<true>();
779 TEST_F( cxx11_atomic_class, atomic_pointer_char )
781 test_atomic_pointer_for<char, false>();
784 TEST_F( cxx11_atomic_class, atomic_pointer_char_volatile )
786 test_atomic_pointer_for<char, true>();
789 TEST_F( cxx11_atomic_class, atomic_pointer_short )
791 test_atomic_pointer_for<short int, false>();
794 TEST_F( cxx11_atomic_class, atomic_pointer_short_volatile )
796 test_atomic_pointer_for<short int, true>();
799 TEST_F( cxx11_atomic_class, atomic_pointer_int )
801 test_atomic_pointer_for<int, false>();
804 TEST_F( cxx11_atomic_class, atomic_pointer_int_volatile )
806 test_atomic_pointer_for<int, true>();
809 TEST_F( cxx11_atomic_class, atomic_pointer_long )
811 test_atomic_pointer_for<long, false>();
814 TEST_F( cxx11_atomic_class, atomic_pointer_long_volatile )
816 test_atomic_pointer_for<long, true>();
819 TEST_F( cxx11_atomic_class, atomic_pointer_long_long )
821 test_atomic_pointer_for<long long, false>();
824 TEST_F( cxx11_atomic_class, atomic_pointer_long_long_volatile )
826 test_atomic_pointer_for<long long, true>();