Removed prohibited atomic<void*>::fetch_xxx() tests
[libcds.git] / test / unit / misc / cxx11_atomic_class.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <gtest/gtest.h>
32 #include <cds/algo/atomic.h>
33 #include "cxx11_convert_memory_order.h"
34
35 #define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( x.is_lock_free() )
36
37 namespace {
38     class cxx11_atomic_class: public ::testing::Test
39     {
40     protected:
41         template <typename AtomicFlag>
42         void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
43         {
44             atomics::memory_order mo_clear = convert_to_store_order(order);
45             for ( int i = 0; i < 5; ++i ) {
46                 EXPECT_TRUE( !f.test_and_set( order ));
47                 EXPECT_TRUE( f.test_and_set( order ));
48                 f.clear( mo_clear );
49             }
50         }
51
52         template <typename AtomicFlag>
53         void do_test_atomic_flag( AtomicFlag& f)
54         {
55             f.clear();
56
57             for ( int i = 0; i < 5; ++i ) {
58                 EXPECT_TRUE( !f.test_and_set());
59                 EXPECT_TRUE( f.test_and_set());
60                 f.clear();
61             }
62
63             do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
64             //do_test_atomic_flag_mo( f, atomics::memory_order_consume );
65             do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
66             do_test_atomic_flag_mo( f, atomics::memory_order_release );
67             do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
68             do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
69         }
70
71         template <class Atomic, typename Integral>
72         void do_test_atomic_type(Atomic& a)
73         {
74             typedef Integral    integral_type;
75
76             EXPECT_ATOMIC_IS_LOCK_FREE( a );
77             a.store( (integral_type) 0 );
78             EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
79
80             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
81                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
82                 EXPECT_EQ( a.exchange( n ), static_cast<integral_type>( 0 ));
83                 EXPECT_EQ( a.load(), n );
84                 EXPECT_EQ( a.exchange( (integral_type) 0 ), n );
85                 EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
86             }
87
88             integral_type prev = a.load();
89             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
90                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
91                 integral_type expected = prev;
92
93                 EXPECT_TRUE( a.compare_exchange_weak( expected, n));
94                 EXPECT_EQ( expected, prev );
95                 EXPECT_FALSE( a.compare_exchange_weak( expected, n));
96                 EXPECT_EQ( expected, n );
97
98                 prev = n;
99                 EXPECT_EQ( a.load(), n );
100             }
101
102             a = (integral_type) 0;
103
104             prev = a;
105             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
106                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
107                 integral_type expected = prev;
108
109                 EXPECT_TRUE( a.compare_exchange_strong( expected, n));
110                 EXPECT_EQ( expected, prev );
111                 EXPECT_FALSE( a.compare_exchange_strong( expected, n));
112                 EXPECT_EQ( expected, n );
113
114                 prev = n;
115                 EXPECT_EQ( a.load(), n );
116             }
117
118             EXPECT_EQ( a.exchange( (integral_type) 0 ), prev );
119         }
120
121         template <class Atomic, typename Integral>
122         void do_test_atomic_integral(Atomic& a)
123         {
124             do_test_atomic_type< Atomic, Integral >(a);
125
126             typedef Integral    integral_type;
127
128             // fetch_xxx testing
129             a.store( (integral_type) 0 );
130
131             // fetch_add
132             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
133             {
134                 integral_type prev = a.load();
135                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
136
137                 EXPECT_EQ( a.fetch_add(n), prev);
138             }
139
140             // fetch_sub
141             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
142             {
143                 integral_type prev = a.load();
144                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
145
146                 EXPECT_EQ( a.fetch_sub(n), prev);
147             }
148             EXPECT_EQ( a.load(), static_cast<integral_type>( 0 ));
149
150             // fetch_or / fetc_xor / fetch_and
151             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
152             {
153                 integral_type prev = a.load()  ;;
154                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
155
156                 EXPECT_EQ( a.fetch_or( mask ), prev );
157                 prev = a.load();
158                 EXPECT_EQ( ( prev & mask), mask);
159
160                 EXPECT_EQ( a.fetch_and( (integral_type) ~mask ), prev );
161                 prev = a.load();
162                 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
163
164                 EXPECT_EQ( a.fetch_xor( mask ), prev );
165                 prev = a.load();
166                 EXPECT_EQ( integral_type( prev & mask), mask);
167             }
168             EXPECT_EQ( a.load(), (integral_type) -1 );
169
170
171             // op= testing
172             a = (integral_type) 0;
173
174             // +=
175             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
176             {
177                 integral_type prev = a;
178                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
179
180                 EXPECT_EQ( (a += n), (prev + n));
181             }
182
183             // -=
184             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
185             {
186                 integral_type prev = a;
187                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
188
189                 EXPECT_EQ( (a -= n),  prev - n );
190             }
191             EXPECT_EQ( a.load(), (integral_type) 0 );
192
193             // |= / ^= / &=
194             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
195             {
196                 integral_type prev = a;
197                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
198
199                 EXPECT_EQ( (a |= mask ), (prev | mask ));
200                 prev = a;
201                 EXPECT_EQ( ( prev & mask), mask);
202
203                 EXPECT_EQ( (a &= (integral_type) ~mask ), ( prev & (integral_type) ~mask ));
204                 prev = a;
205                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
206
207                 EXPECT_EQ( (a ^= mask ), (prev ^ mask ));
208                 prev = a;
209                 EXPECT_EQ( ( prev & mask), mask);
210             }
211             EXPECT_EQ( a.load(), (integral_type) -1 );
212         }
213
214         template <class Atomic, typename Integral>
215         void do_test_atomic_type( Atomic& a, atomics::memory_order order )
216         {
217             typedef Integral    integral_type;
218
219             const atomics::memory_order oLoad = convert_to_load_order( order );
220             const atomics::memory_order oStore = convert_to_store_order( order );
221
222             EXPECT_ATOMIC_IS_LOCK_FREE( a );
223             a.store((integral_type) 0, oStore );
224             EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
225
226             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
227                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
228                 EXPECT_EQ( a.exchange( n, order ), integral_type( 0 ));
229                 EXPECT_EQ( a.load( oLoad ), n );
230                 EXPECT_EQ( a.exchange( (integral_type) 0, order ), n );
231                 EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
232             }
233
234             integral_type prev = a.load( oLoad );
235             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
236                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
237                 integral_type expected = prev;
238
239                 EXPECT_TRUE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
240                 EXPECT_EQ( expected, prev );
241                 EXPECT_FALSE( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
242                 EXPECT_EQ( expected, n );
243
244                 prev = n;
245                 EXPECT_EQ( a.load( oLoad ), n );
246             }
247
248             a.store( (integral_type) 0, oStore );
249
250             prev = a.load( oLoad );
251             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
252                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
253                 integral_type expected = prev;
254
255                 EXPECT_TRUE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
256                 EXPECT_EQ( expected, prev );
257                 EXPECT_FALSE( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
258                 EXPECT_EQ( expected, n );
259
260                 prev = n;
261                 EXPECT_EQ( a.load( oLoad ), n );
262             }
263
264             EXPECT_EQ( a.exchange( (integral_type) 0, order ), prev );
265         }
266
267         template <class Atomic, typename Integral>
268         void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
269         {
270             do_test_atomic_type< Atomic, Integral >( a, order );
271
272             typedef Integral    integral_type;
273
274             const atomics::memory_order oLoad = convert_to_load_order( order );
275             const atomics::memory_order oStore = convert_to_store_order( order );
276
277             // fetch_xxx testing
278             a.store( (integral_type) 0, oStore );
279
280             // fetch_add
281             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
282             {
283                 integral_type prev = a.load( oLoad );
284                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
285
286                 EXPECT_EQ( a.fetch_add( n, order), prev);
287             }
288
289             // fetch_sub
290             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
291             {
292                 integral_type prev = a.load( oLoad );
293                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
294
295                 EXPECT_EQ( a.fetch_sub( n, order ), prev);
296             }
297             EXPECT_EQ( a.load( oLoad ), integral_type( 0 ));
298
299             // fetch_or / fetc_xor / fetch_and
300             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
301             {
302                 integral_type prev = a.load( oLoad )  ;;
303                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
304
305                 EXPECT_EQ( a.fetch_or( mask, order ), prev );
306                 prev = a.load( oLoad );
307                 EXPECT_EQ( ( prev & mask), mask);
308
309                 EXPECT_EQ( a.fetch_and( (integral_type) ~mask, order ), prev );
310                 prev = a.load( oLoad );
311                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
312
313                 EXPECT_EQ( a.fetch_xor( mask, order ), prev );
314                 prev = a.load( oLoad );
315                 EXPECT_EQ( ( prev & mask), mask);
316             }
317             EXPECT_EQ( a.load( oLoad ), (integral_type) -1 );
318         }
319
320
321
322         template <typename Atomic, typename Integral>
323         void test_atomic_integral_(Atomic& a)
324         {
325             do_test_atomic_integral<Atomic, Integral >(a);
326
327             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
328             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
329             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
330             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
331             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
332         }
333
334         template <typename Integral>
335         void test_atomic_integral()
336         {
337             typedef atomics::atomic<Integral> atomic_type;
338
339             atomic_type a[8];
340             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
341                 test_atomic_integral_<atomic_type, Integral>( a[i] );
342             }
343         }
344         template <typename Integral>
345         void test_atomic_integral_volatile()
346         {
347             typedef atomics::atomic<Integral> volatile atomic_type;
348
349             atomic_type a[8];
350             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
351                 test_atomic_integral_<atomic_type, Integral>( a[i] );
352             }
353         }
354
355         template <class AtomicBool>
356         void do_test_atomic_bool( AtomicBool& a )
357         {
358             EXPECT_ATOMIC_IS_LOCK_FREE( a );
359             a.store( false );
360             EXPECT_FALSE( a );
361             EXPECT_FALSE( a.load());
362
363             EXPECT_FALSE( a.exchange( true ));
364             EXPECT_TRUE( a.load());
365             EXPECT_TRUE( a.exchange( false ));
366             EXPECT_FALSE( a.load());
367
368             bool expected = false;
369             EXPECT_TRUE( a.compare_exchange_weak( expected, true));
370             EXPECT_FALSE( expected );
371             EXPECT_FALSE( a.compare_exchange_weak( expected, false));
372             EXPECT_TRUE( expected );
373             EXPECT_TRUE( a.load());
374
375             a.store( false );
376
377             expected = false;
378             EXPECT_TRUE( a.compare_exchange_strong( expected, true));
379             EXPECT_FALSE( expected );
380             EXPECT_FALSE( a.compare_exchange_strong( expected, false));
381             EXPECT_TRUE( expected );
382
383             EXPECT_TRUE( a.load());
384
385             EXPECT_TRUE( a.exchange( false ));
386         }
387
388         template <class AtomicBool>
389         void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
390         {
391             const atomics::memory_order oLoad = convert_to_load_order( order );
392             const atomics::memory_order oStore = convert_to_store_order( order );
393             const atomics::memory_order oExchange = convert_to_exchange_order( order );
394
395             EXPECT_ATOMIC_IS_LOCK_FREE( a );
396             a.store( false, oStore );
397             EXPECT_FALSE( a );
398             EXPECT_FALSE( a.load( oLoad ));
399
400             EXPECT_FALSE( a.exchange( true, oExchange ));
401             EXPECT_TRUE( a.load( oLoad ));
402             EXPECT_TRUE( a.exchange( false, oExchange ));
403             EXPECT_FALSE( a.load( oLoad ));
404
405             bool expected = false;
406             EXPECT_TRUE( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
407             EXPECT_FALSE( expected );
408             EXPECT_FALSE( a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
409             EXPECT_TRUE( expected );
410             EXPECT_TRUE( a.load( oLoad ));
411
412             //a = bool(false);
413             a.store( false, oStore );
414
415             expected = false;
416             EXPECT_TRUE( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
417             EXPECT_FALSE( expected );
418             EXPECT_FALSE( a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
419             EXPECT_TRUE( expected );
420
421             EXPECT_TRUE( a.load( oLoad ));
422
423             EXPECT_TRUE( a.exchange( false, oExchange ));
424         }
425
426
427         template <typename Atomic>
428         void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
429         {
430             atomics::memory_order oLoad = convert_to_load_order(order);
431             atomics::memory_order oStore = convert_to_store_order(order);
432             void *  p;
433
434             a.store( (void *) arr, oStore );
435             EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
436
437             p = arr;
438             EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
439             EXPECT_EQ( p, arr + 0 );
440             EXPECT_EQ( *reinterpret_cast<char *>(p), 1 );
441             EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
442             EXPECT_EQ( p, arr + 5 );
443             EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
444
445             EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
446             EXPECT_EQ( p, arr + 5 );
447             EXPECT_EQ( *reinterpret_cast<char *>(p), 6 );
448             EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
449             EXPECT_EQ( p, arr + 3 );
450             EXPECT_EQ( *reinterpret_cast<char *>(p), 4 );
451
452             EXPECT_EQ( reinterpret_cast<char *>(a.exchange( (void *) arr, order )), arr + 3 );
453             EXPECT_EQ( reinterpret_cast<char *>(a.load( oLoad )), arr );
454             EXPECT_EQ( *reinterpret_cast<char *>(a.load( oLoad )), 1 );
455         }
456
457         template <bool Volatile>
458         void do_test_atomic_pointer_void()
459         {
460             typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
461
462             char   arr[8];
463             const char aSize = sizeof(arr)/sizeof(arr[0]);
464             for ( char i = 0; i < aSize; ++i ) {
465                 arr[static_cast<unsigned>( i )] = i + 1;
466             }
467
468             atomic_pointer  a;
469             void *  p;
470
471             a.store( (void *) arr );
472             EXPECT_EQ( *reinterpret_cast<char *>(a.load()), 1 );
473
474             p = arr;
475             EXPECT_TRUE( a.compare_exchange_weak( p, (void *)(arr + 5)));
476             EXPECT_EQ( p, arr + 0 );
477             EXPECT_FALSE( a.compare_exchange_weak( p, (void *)(arr + 3)));
478             EXPECT_EQ( p, arr + 5 );
479
480             EXPECT_TRUE( a.compare_exchange_strong( p, (void *)(arr + 3)));
481             EXPECT_EQ( p, arr + 5 );
482             EXPECT_FALSE( a.compare_exchange_strong( p, (void *)(arr + 5)));
483             EXPECT_EQ( p, arr + 3 );
484
485             EXPECT_EQ( reinterpret_cast<char *>( a.exchange( (void *) arr )), arr + 3 );
486             EXPECT_EQ( reinterpret_cast<char *>( a.load()), arr );
487             EXPECT_EQ( *reinterpret_cast<char *>( a.load()), 1 );
488
489             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
490             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
491             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
492             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
493             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
494         }
495
496         template <typename Atomic, typename Integral>
497         void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
498         {
499             typedef Integral integral_type;
500             atomics::memory_order oLoad = convert_to_load_order(order);
501             atomics::memory_order oStore = convert_to_store_order(order);
502             integral_type *  p;
503
504             a.store( arr, oStore );
505             EXPECT_EQ( *a.load( oLoad ), 1 );
506
507             p = arr;
508             EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
509             EXPECT_EQ( p, arr + 0 );
510             EXPECT_EQ( *p, 1 );
511             EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
512             EXPECT_EQ( p, arr + 5 );
513             EXPECT_EQ( *p, 6 );
514
515             EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
516             EXPECT_EQ( p, arr + 5 );
517             EXPECT_EQ( *p, 6 );
518             EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
519             EXPECT_EQ( p, arr + 3 );
520             EXPECT_EQ( *p, 4 );
521
522             EXPECT_EQ( a.exchange( arr, order ), arr + 3 );
523             EXPECT_EQ( a.load( oLoad ), arr );
524             EXPECT_EQ( *a.load( oLoad ), 1 );
525
526             for ( integral_type i = 1; i < aSize; ++i ) {
527                 integral_type * p = a.load();
528                 EXPECT_EQ( *p, i );
529                 EXPECT_EQ( a.fetch_add( 1, order ), p );
530                 EXPECT_EQ( *a.load( oLoad ), i + 1 );
531             }
532
533             for ( integral_type i = aSize; i > 1; --i ) {
534                 integral_type * p = a.load();
535                 EXPECT_EQ( *p, i  );
536                 EXPECT_EQ( a.fetch_sub( 1, order ), p );
537                 EXPECT_EQ( *a.load( oLoad ), i - 1 );
538             }
539         }
540
541         template <typename Integral, bool Volatile>
542         void test_atomic_pointer_for()
543         {
544             typedef Integral integral_type;
545             typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
546
547             integral_type   arr[8];
548             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
549             for ( integral_type i = 0; i < aSize; ++i ) {
550                 arr[static_cast<size_t>(i)] = i + 1;
551             }
552
553             atomic_pointer  a;
554             integral_type *  p;
555
556             a.store( arr );
557             EXPECT_EQ( *a.load(), 1 );
558
559             p = arr;
560             EXPECT_TRUE( a.compare_exchange_weak( p, arr + 5 ));
561             EXPECT_EQ( p, arr + 0 );
562             EXPECT_EQ( *p, 1 );
563             EXPECT_FALSE( a.compare_exchange_weak( p, arr + 3 ));
564             EXPECT_EQ( p, arr + 5 );
565             EXPECT_EQ( *p, 6 );
566
567             EXPECT_TRUE( a.compare_exchange_strong( p, arr + 3 ));
568             EXPECT_EQ( p, arr + 5 );
569             EXPECT_EQ( *p, 6 );
570             EXPECT_FALSE( a.compare_exchange_strong( p, arr + 5 ));
571             EXPECT_EQ( p, arr + 3 );
572             EXPECT_EQ( *p, 4 );
573
574             EXPECT_EQ( a.exchange( arr ), arr + 3 );
575             EXPECT_EQ( a.load(), arr );
576             EXPECT_EQ( *a.load(), 1 );
577
578             for ( integral_type i = 1; i < aSize; ++i ) {
579                 integral_type * p = a.load();
580                 EXPECT_EQ( *p, i );
581                 integral_type * pa = a.fetch_add( 1 );
582                 EXPECT_EQ( pa, p );
583                 EXPECT_EQ( *a.load(), i + 1 );
584             }
585
586             for ( integral_type i = aSize; i > 1; --i ) {
587                 integral_type * p = a.load();
588                 EXPECT_EQ( *p, i  );
589                 EXPECT_EQ( a.fetch_sub( 1 ), p );
590                 EXPECT_EQ( *a.load(), i - 1 );
591             }
592
593             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
594             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
595             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
596             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
597             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
598         }
599
600     public:
601         void test_atomic_flag()
602         {
603             // Array to test different alignment
604
605             atomics::atomic_flag flags[8];
606             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
607                 do_test_atomic_flag( flags[i] );
608         }
609
610         void test_atomic_flag_volatile()
611         {
612             // Array to test different alignment
613
614             atomics::atomic_flag volatile flags[8];
615             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
616                 do_test_atomic_flag( flags[i] );
617         }
618
619         template <typename AtomicBool>
620         void test_atomic_bool_()
621         {
622             // Array to test different alignment
623             AtomicBool  a[8];
624
625             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
626                 do_test_atomic_bool( a[i] );
627
628                 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
629                 //do_test_atomic_bool( a[i], atomics::memory_order_consume );
630                 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
631                 do_test_atomic_bool( a[i], atomics::memory_order_release );
632                 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
633                 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
634             }
635         }
636
637         void test_atomic_bool()
638         {
639             test_atomic_bool_< atomics::atomic<bool> >();
640         }
641         void test_atomic_bool_volatile()
642         {
643             test_atomic_bool_< atomics::atomic<bool> volatile >();
644         }
645     };
646
647     TEST_F( cxx11_atomic_class, atomic_char )
648     {
649         test_atomic_integral<char>();
650     }
651
652     TEST_F( cxx11_atomic_class, atomic_signed_char )
653     {
654         test_atomic_integral<signed char>();
655     }
656
657     TEST_F( cxx11_atomic_class, atomic_unsigned_char )
658     {
659         test_atomic_integral<unsigned char>();
660     }
661
662     TEST_F( cxx11_atomic_class, atomic_short_int )
663     {
664         test_atomic_integral<short int>();
665     }
666
667     TEST_F( cxx11_atomic_class, atomic_signed_short_int )
668     {
669         test_atomic_integral<signed short int>();
670     }
671
672     TEST_F( cxx11_atomic_class, atomic_unsigned_short_int )
673     {
674         test_atomic_integral<unsigned short int>();
675     }
676
677     TEST_F( cxx11_atomic_class, atomic_int )
678     {
679         test_atomic_integral<int>();
680     }
681
682     TEST_F( cxx11_atomic_class, atomic_unsigned_int )
683     {
684         test_atomic_integral<unsigned int>();
685     }
686
687     TEST_F( cxx11_atomic_class, atomic_long )
688     {
689         test_atomic_integral<long>();
690     }
691
692     TEST_F( cxx11_atomic_class, atomic_unsigned_long )
693     {
694         test_atomic_integral<unsigned long>();
695     }
696
697     TEST_F( cxx11_atomic_class, atomic_long_long )
698     {
699         test_atomic_integral<long long>();
700     }
701
702     TEST_F( cxx11_atomic_class, atomic_unsigned_long_long )
703     {
704         test_atomic_integral<unsigned long long>();
705     }
706
707     TEST_F( cxx11_atomic_class, atomic_char_volatile )
708     {
709         test_atomic_integral_volatile<char>();
710     }
711
712     TEST_F( cxx11_atomic_class, atomic_signed_char_volatile )
713     {
714         test_atomic_integral_volatile<signed char>();
715     }
716
717     TEST_F( cxx11_atomic_class, atomic_unsigned_char_volatile )
718     {
719         test_atomic_integral_volatile<unsigned char>();
720     }
721
722     TEST_F( cxx11_atomic_class, atomic_short_int_volatile )
723     {
724         test_atomic_integral_volatile<short int>();
725     }
726
727     TEST_F( cxx11_atomic_class, atomic_signed_short_int_volatile )
728     {
729         test_atomic_integral_volatile<signed short int>();
730     }
731
732     TEST_F( cxx11_atomic_class, atomic_unsigned_short_int_volatile )
733     {
734         test_atomic_integral_volatile<unsigned short int>();
735     }
736
737     TEST_F( cxx11_atomic_class, atomic_int_volatile )
738     {
739         test_atomic_integral_volatile<int>();
740     }
741
742     TEST_F( cxx11_atomic_class, atomic_unsigned_int_volatile )
743     {
744         test_atomic_integral_volatile<unsigned int>();
745     }
746
747     TEST_F( cxx11_atomic_class, atomic_long_volatile )
748     {
749         test_atomic_integral_volatile<long>();
750     }
751
752     TEST_F( cxx11_atomic_class, atomic_unsigned_long_volatile )
753     {
754         test_atomic_integral_volatile<unsigned long>();
755     }
756
757     TEST_F( cxx11_atomic_class, atomic_long_long_volatile )
758     {
759         test_atomic_integral_volatile<long long>();
760     }
761
762     TEST_F( cxx11_atomic_class, atomic_unsigned_long_long_volatile )
763     {
764         test_atomic_integral_volatile<unsigned long long>();
765     }
766
767     TEST_F( cxx11_atomic_class, atomic_pointer_void )
768     {
769         do_test_atomic_pointer_void<false>();
770     }
771
772     TEST_F( cxx11_atomic_class, atomic_pointer_void_volatile )
773     {
774         do_test_atomic_pointer_void<true>();
775     }
776
777     TEST_F( cxx11_atomic_class, atomic_pointer_char )
778     {
779         test_atomic_pointer_for<char, false>();
780     }
781
782     TEST_F( cxx11_atomic_class, atomic_pointer_char_volatile )
783     {
784         test_atomic_pointer_for<char, true>();
785     }
786
787     TEST_F( cxx11_atomic_class, atomic_pointer_short )
788     {
789         test_atomic_pointer_for<short int, false>();
790     }
791
792     TEST_F( cxx11_atomic_class, atomic_pointer_short_volatile )
793     {
794         test_atomic_pointer_for<short int, true>();
795     }
796
797     TEST_F( cxx11_atomic_class, atomic_pointer_int )
798     {
799         test_atomic_pointer_for<int, false>();
800     }
801
802     TEST_F( cxx11_atomic_class, atomic_pointer_int_volatile )
803     {
804         test_atomic_pointer_for<int, true>();
805     }
806
807     TEST_F( cxx11_atomic_class, atomic_pointer_long )
808     {
809         test_atomic_pointer_for<long, false>();
810     }
811
812     TEST_F( cxx11_atomic_class, atomic_pointer_long_volatile )
813     {
814         test_atomic_pointer_for<long, true>();
815     }
816
817     TEST_F( cxx11_atomic_class, atomic_pointer_long_long )
818     {
819         test_atomic_pointer_for<long long, false>();
820     }
821
822     TEST_F( cxx11_atomic_class, atomic_pointer_long_long_volatile )
823     {
824         test_atomic_pointer_for<long long, true>();
825     }
826 }   // namespace