Fixed CLang incompatibility
[libcds.git] / test / unit / misc / cxx11_atomic_func.cpp
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <gtest/gtest.h>
32 #include <cds/algo/atomic.h>
33
34 #ifndef CDS_USE_BOOST_ATOMIC
35 // Skip this test for boost.atomic
36 // Boost.atomic has no free atomic functions implementation.
37
38 #include "cxx11_convert_memory_order.h"
39
40 #if CDS_COMPILER == CDS_COMPILER_CLANG && !defined( _LIBCPP_VERSION )
41     // CLang (at least 3.6) without libc++ has no gcc-specific __atomic_is_lock_free function
42 #   define EXPECT_ATOMIC_IS_LOCK_FREE( x )
43 #else
44 #   define EXPECT_ATOMIC_IS_LOCK_FREE( x ) EXPECT_TRUE( atomics::atomic_is_lock_free( &x ));
45 #endif
46
47
48 namespace misc {
49
50     class cxx11_atomic_func: public ::testing::Test
51     {
52     protected:
53         template <typename AtomicFlag>
54         void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
55         {
56             atomics::memory_order mo_clear = convert_to_store_order(order);
57
58             f.clear( convert_to_store_order(order));
59
60             for ( int i = 0; i < 5; ++i ) {
61                 EXPECT_FALSE( atomics::atomic_flag_test_and_set_explicit( &f, order ));
62                 EXPECT_TRUE( atomics::atomic_flag_test_and_set_explicit( &f, order ));
63                 atomics::atomic_flag_clear_explicit( &f, mo_clear );
64                 atomics::atomic_flag_clear_explicit( &f, mo_clear );
65             }
66         }
67
68         template <typename AtomicFlag>
69         void do_test_atomic_flag( AtomicFlag& f )
70         {
71             f.clear();
72
73             for ( int i = 0; i < 5; ++i ) {
74                 EXPECT_FALSE( atomics::atomic_flag_test_and_set( &f ));
75                 EXPECT_TRUE( atomics::atomic_flag_test_and_set( &f ));
76                 atomics::atomic_flag_clear(&f);
77                 atomics::atomic_flag_clear(&f);
78             }
79
80             do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
81             do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
82             do_test_atomic_flag_mo( f, atomics::memory_order_release );
83             do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
84             do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
85         }
86
87         template <class Atomic, typename Integral>
88         void do_test_atomic_type(Atomic& a )
89         {
90             typedef Integral    integral_type;
91
92             EXPECT_ATOMIC_IS_LOCK_FREE( a );
93             atomics::atomic_store( &a, (integral_type) 0 );
94             EXPECT_EQ( atomics::atomic_load( &a ), integral_type( 0 ));
95
96             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
97                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
98                 EXPECT_EQ( atomics::atomic_exchange( &a, n ), (integral_type) 0 );
99                 EXPECT_EQ( atomics::atomic_load( &a ), n );
100                 EXPECT_EQ( atomics::atomic_exchange( &a, (integral_type) 0 ), n );
101                 EXPECT_EQ( atomics::atomic_load( &a ), (integral_type) 0 );
102             }
103
104             integral_type prev = atomics::atomic_load( &a );
105             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
106                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
107                 integral_type expected = prev;
108
109                 EXPECT_TRUE( atomics::atomic_compare_exchange_weak( &a, &expected, n));
110                 EXPECT_EQ( expected, prev );
111                 EXPECT_NE( expected, n );
112                 EXPECT_FALSE( atomics::atomic_compare_exchange_weak( &a, &expected, n));
113                 EXPECT_EQ( expected, n );
114
115                 prev = n;
116                 EXPECT_EQ( atomics::atomic_load( &a ), n );
117             }
118
119             atomics::atomic_store( &a, (integral_type) 0 );
120
121             prev = atomics::atomic_load( &a );
122             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
123                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
124                 integral_type expected = prev;
125
126                 EXPECT_TRUE( atomics::atomic_compare_exchange_strong( &a, &expected, n));
127                 EXPECT_EQ( expected, prev );
128                 EXPECT_FALSE( atomics::atomic_compare_exchange_strong( &a, &expected, n));
129                 EXPECT_EQ( expected, n );
130
131                 prev = n;
132                 EXPECT_EQ( atomics::atomic_load( &a ), n );
133             }
134
135             EXPECT_EQ( atomics::atomic_exchange( &a, (integral_type) 0 ), prev );
136         }
137
138         template <class Atomic, typename Integral>
139         void do_test_atomic_integral( Atomic& a )
140         {
141             do_test_atomic_type< Atomic, Integral >( a );
142
143             typedef Integral    integral_type;
144
145             // fetch_xxx testing
146             atomics::atomic_store( &a, (integral_type) 0 );
147
148             // fetch_add
149             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
150             {
151                 integral_type prev = atomics::atomic_load( &a );
152                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
153
154                 EXPECT_EQ( atomics::atomic_fetch_add( &a, n ), prev );
155             }
156
157             // fetch_sub
158             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
159             {
160                 integral_type prev = atomics::atomic_load( &a );
161                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
162
163                 EXPECT_EQ( atomics::atomic_fetch_sub( &a, n ), prev );
164             }
165             EXPECT_EQ( atomics::atomic_load( &a ), (integral_type) 0 );
166
167             // fetch_or / fetc_xor / fetch_and
168             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
169             {
170                 integral_type prev = atomics::atomic_load( &a );
171                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
172
173                 EXPECT_EQ( atomics::atomic_fetch_or( &a, mask ), prev );
174                 prev = atomics::atomic_load( &a );
175                 EXPECT_EQ( ( prev & mask ), mask );
176
177                 EXPECT_EQ( atomics::atomic_fetch_and( &a, (integral_type) ~mask ), prev );
178                 prev = atomics::atomic_load( &a );
179                 EXPECT_EQ( integral_type(prev & mask), integral_type(0));
180
181                 EXPECT_EQ( atomics::atomic_fetch_xor( &a, mask ), prev );
182                 prev = atomics::atomic_load( &a );
183                 EXPECT_EQ( ( prev & mask), mask);
184             }
185             EXPECT_EQ( atomics::atomic_load( &a ), (integral_type) -1 );
186         }
187
188         template <class Atomic, typename Integral>
189         void do_test_atomic_type( Atomic& a, atomics::memory_order order )
190         {
191             typedef Integral    integral_type;
192
193             const atomics::memory_order oLoad = convert_to_load_order( order );
194             const atomics::memory_order oStore = convert_to_store_order( order );
195
196             EXPECT_ATOMIC_IS_LOCK_FREE( a );
197             atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
198             EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), (integral_type) 0 );
199
200             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
201                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
202                 EXPECT_EQ( atomics::atomic_exchange_explicit( &a, n, order ), (integral_type) 0 );
203                 EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), n );
204                 EXPECT_EQ( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ), n );
205                 EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), (integral_type) 0 );
206             }
207
208             integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
209             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
210                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
211                 integral_type expected = prev;
212
213                 EXPECT_TRUE( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
214                 EXPECT_EQ( expected, prev );
215                 EXPECT_FALSE( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
216                 EXPECT_EQ( expected, n );
217
218                 prev = n;
219                 EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), n );
220             }
221
222             atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
223
224             prev = atomics::atomic_load_explicit( &a, oLoad );
225             for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
226                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
227                 integral_type expected = prev;
228
229                 EXPECT_TRUE( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
230                 EXPECT_EQ( expected, prev );
231                 EXPECT_FALSE( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
232                 EXPECT_EQ( expected, n );
233
234                 prev = n;
235                 EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), n );
236             }
237
238             EXPECT_EQ( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ), prev );
239         }
240
241         template <class Atomic, typename Integral>
242         void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
243         {
244             do_test_atomic_type< Atomic, Integral >( a, order );
245             typedef Integral    integral_type;
246
247             const atomics::memory_order oLoad = convert_to_load_order( order );
248             const atomics::memory_order oStore = convert_to_store_order( order );
249
250             // fetch_xxx testing
251             atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
252
253             // fetch_add
254             for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
255             {
256                 integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
257                 integral_type n = static_cast<integral_type>( integral_type(42) << (nByte * 8));
258
259                 EXPECT_EQ( atomics::atomic_fetch_add_explicit( &a, n, order), prev);
260             }
261
262             // fetch_sub
263             for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
264             {
265                 integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
266                 integral_type n = static_cast<integral_type>( integral_type(42) << ((nByte - 1) * 8));
267
268                 EXPECT_EQ( atomics::atomic_fetch_sub_explicit( &a, n, order ), prev);
269             }
270             EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), integral_type( 0 ));
271
272             // fetch_or / fetc_xor / fetch_and
273             for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
274             {
275                 integral_type prev = atomics::atomic_load_explicit( &a, oLoad )  ;;
276                 integral_type mask = static_cast<integral_type>( integral_type(1) << nBit );
277
278                 EXPECT_EQ( atomics::atomic_fetch_or_explicit( &a, mask, order ), prev );
279                 prev = atomics::atomic_load_explicit( &a, oLoad );
280                 EXPECT_EQ( ( prev & mask), mask);
281
282                 EXPECT_EQ( atomics::atomic_fetch_and_explicit( &a, (integral_type) ~mask, order ), prev );
283                 prev = atomics::atomic_load_explicit( &a, oLoad );
284                 EXPECT_EQ( ( prev & mask), integral_type( 0 ));
285
286                 EXPECT_EQ( atomics::atomic_fetch_xor_explicit( &a, mask, order ), prev );
287                 prev = atomics::atomic_load_explicit( &a, oLoad );
288                 EXPECT_EQ( ( prev & mask), mask);
289             }
290             EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), (integral_type) -1 );
291         }
292
293         template <typename Atomic, typename Integral>
294         void test_atomic_integral_(Atomic& a)
295         {
296             do_test_atomic_integral<Atomic, Integral >(a);
297
298             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
299             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
300             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
301             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
302             do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
303         }
304
305         template <typename Integral>
306         void test_atomic_integral()
307         {
308             typedef atomics::atomic<Integral>    atomic_type;
309             atomic_type a[8];
310             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
311                 test_atomic_integral_<atomic_type, Integral>( a[i] );
312             }
313         }
314         template <typename Integral>
315         void test_atomic_integral_volatile()
316         {
317             typedef atomics::atomic<Integral> volatile atomic_type;
318             atomic_type a[8];
319             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
320                 test_atomic_integral_<atomic_type, Integral>( a[i] );
321             }
322         }
323
324         template <class AtomicBool>
325         void do_test_atomic_bool(AtomicBool& a)
326         {
327             EXPECT_ATOMIC_IS_LOCK_FREE( a );
328             atomics::atomic_store( &a, false );
329             EXPECT_FALSE( a );
330             EXPECT_FALSE( atomics::atomic_load( &a ));
331
332             EXPECT_FALSE( atomics::atomic_exchange( &a, true ));
333             EXPECT_TRUE( atomics::atomic_load( &a ));
334             EXPECT_TRUE( atomics::atomic_exchange( &a, false ));
335             EXPECT_FALSE( atomics::atomic_load( &a ));
336
337             bool expected = false;
338             EXPECT_TRUE( atomics::atomic_compare_exchange_weak( &a, &expected, true));
339             EXPECT_FALSE( expected );
340             EXPECT_FALSE( atomics::atomic_compare_exchange_weak( &a, &expected, false));
341             EXPECT_TRUE( expected );
342             EXPECT_TRUE( atomics::atomic_load( &a ));
343
344             atomics::atomic_store( &a, false );
345
346             expected = false;
347             EXPECT_TRUE( atomics::atomic_compare_exchange_strong( &a, &expected, true));
348             EXPECT_FALSE( expected );
349             EXPECT_FALSE( atomics::atomic_compare_exchange_strong( &a, &expected, false));
350             EXPECT_TRUE( expected );
351
352             EXPECT_TRUE( atomics::atomic_load( &a ));
353
354             EXPECT_TRUE( atomics::atomic_exchange( &a, false ));
355         }
356
357         template <class AtomicBool>
358         void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
359         {
360             const atomics::memory_order oLoad = convert_to_load_order( order );
361             const atomics::memory_order oStore = convert_to_store_order( order );
362             const atomics::memory_order oExchange = convert_to_exchange_order( order );
363
364             EXPECT_ATOMIC_IS_LOCK_FREE( a );
365             atomics::atomic_store_explicit( &a, false, oStore );
366             EXPECT_FALSE( a == false );
367             EXPECT_FALSE( atomics::atomic_load_explicit( &a, oLoad ));
368
369             EXPECT_FALSE( atomics::atomic_exchange_explicit( &a, true, oExchange ));
370             EXPECT_TRUE( atomics::atomic_load_explicit( &a, oLoad ));
371             EXPECT_TRUE( atomics::atomic_exchange_explicit( &a, false, oExchange ));
372             EXPECT_FALSE( atomics::atomic_load_explicit( &a, oLoad ));
373
374             bool expected = false;
375             EXPECT_TRUE( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, true, order, atomics::memory_order_relaxed));
376             EXPECT_FALSE( expected );
377             EXPECT_FALSE( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, false, order, atomics::memory_order_relaxed));
378             EXPECT_TRUE( expected );
379             EXPECT_TRUE( atomics::atomic_load_explicit( &a, oLoad ));
380
381             atomics::atomic_store( &a, false );
382
383             expected = false;
384             EXPECT_TRUE( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, true, order, atomics::memory_order_relaxed));
385             EXPECT_FALSE( expected );
386             EXPECT_FALSE( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, false, order, atomics::memory_order_relaxed));
387             EXPECT_TRUE( expected );
388
389             EXPECT_TRUE( atomics::atomic_load_explicit( &a, oLoad ));
390
391             EXPECT_TRUE( atomics::atomic_exchange_explicit( &a, false, oExchange ));
392         }
393
394         template <typename Atomic, typename Integral>
395         void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
396         {
397             typedef Integral integral_type;
398             atomics::memory_order oLoad = convert_to_load_order(order);
399             atomics::memory_order oStore = convert_to_store_order(order);
400             integral_type *  p;
401
402             atomics::atomic_store_explicit( &a, arr, oStore );
403             EXPECT_EQ( *atomics::atomic_load_explicit( &a, oLoad ), 1 );
404
405             p = arr;
406             EXPECT_TRUE( atomics::atomic_compare_exchange_weak_explicit( &a, &p, arr + 5, order, atomics::memory_order_relaxed ));
407             EXPECT_EQ( p, arr + 0 );
408             EXPECT_EQ( *p, 1 );
409             EXPECT_FALSE( atomics::atomic_compare_exchange_weak_explicit( &a, &p, arr + 3, order, atomics::memory_order_relaxed ));
410             EXPECT_EQ( p, arr + 5 );
411             EXPECT_EQ( *p, 6 );
412
413             EXPECT_TRUE( atomics::atomic_compare_exchange_strong_explicit( &a, &p, arr + 3, order, atomics::memory_order_relaxed ));
414             EXPECT_EQ( p, arr + 5 );
415             EXPECT_EQ( *p, 6 );
416             EXPECT_FALSE( atomics::atomic_compare_exchange_strong_explicit( &a, &p, arr + 5, order, atomics::memory_order_relaxed ));
417             EXPECT_EQ( p, arr + 3 );
418             EXPECT_EQ( *p, 4 );
419
420             EXPECT_EQ( atomics::atomic_exchange_explicit( &a, arr, order ), arr + 3 );
421             EXPECT_EQ( atomics::atomic_load_explicit( &a, oLoad ), arr );
422             EXPECT_EQ( *atomics::atomic_load_explicit( &a, oLoad ), 1 );
423
424             for ( integral_type i = 1; i < aSize; ++i ) {
425                 integral_type * p = atomics::atomic_load_explicit( &a, oLoad );
426                 EXPECT_EQ( *p, i );
427                 EXPECT_EQ( atomics::atomic_fetch_add_explicit( &a, 1, order ), p );
428                 EXPECT_EQ( *atomics::atomic_load_explicit( &a, oLoad ), i + 1 );
429             }
430
431             for ( integral_type i = aSize; i > 1; --i ) {
432                 integral_type * p = atomics::atomic_load_explicit( &a, oLoad );
433                 EXPECT_EQ( *p, i  );
434                 EXPECT_EQ( atomics::atomic_fetch_sub_explicit( &a, 1, order ), p );
435                 EXPECT_EQ( *atomics::atomic_load_explicit( &a, oLoad ), i - 1 );
436             }
437         }
438
439         template <typename Integral, bool Volatile>
440         void test_atomic_pointer_for()
441         {
442             typedef Integral integral_type;
443             typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type    atomic_pointer;
444
445             integral_type   arr[8];
446             const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
447             for ( integral_type i = 0; i < aSize; ++i ) {
448                 arr[static_cast<size_t>(i)] = i + 1;
449             }
450
451             atomic_pointer  a;
452             integral_type *  p;
453
454             atomics::atomic_store( &a, arr );
455             EXPECT_EQ( *atomics::atomic_load( &a ), 1 );
456
457             p = arr;
458             EXPECT_TRUE( atomics::atomic_compare_exchange_weak( &a, &p, arr + 5 ));
459             EXPECT_EQ( p, arr + 0 );
460             EXPECT_FALSE( atomics::atomic_compare_exchange_weak( &a, &p, arr + 3 ));
461             EXPECT_EQ( p, arr + 5 );
462
463             EXPECT_TRUE( atomics::atomic_compare_exchange_strong( &a, &p, arr + 3 ));
464             EXPECT_EQ( p, arr + 5 );
465             EXPECT_FALSE( atomics::atomic_compare_exchange_strong( &a, &p, arr + 5 ));
466             EXPECT_EQ( p, arr + 3 );
467
468             EXPECT_EQ( atomics::atomic_exchange( &a, arr ), arr + 3 );
469             EXPECT_EQ( atomics::atomic_load( &a ), arr );
470             EXPECT_EQ( *atomics::atomic_load( &a ), 1 );
471
472             for ( integral_type i = 1; i < aSize; ++i ) {
473                 integral_type * p = atomics::atomic_load( &a );
474                 EXPECT_EQ( *p, i );
475                 EXPECT_EQ( atomics::atomic_fetch_add( &a, 1 ), p );
476                 EXPECT_EQ( *atomics::atomic_load( &a ), i + 1 );
477             }
478
479             for ( integral_type i = aSize; i > 1; --i ) {
480                 integral_type * p = atomics::atomic_load( &a );
481                 EXPECT_EQ( *p, i );
482                 EXPECT_EQ( atomics::atomic_fetch_sub( &a, 1 ), p );
483                 EXPECT_EQ( *atomics::atomic_load( &a ), i - 1 );
484             }
485
486             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
487             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
488             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
489             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
490             test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
491
492         }
493
494         template <typename Atomic>
495         void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
496         {
497             atomics::memory_order oLoad = convert_to_load_order(order);
498             atomics::memory_order oStore = convert_to_store_order(order);
499             char *  p;
500
501             atomics::atomic_store_explicit( &a, (void *) arr, oStore );
502             EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), 1 );
503
504             p = arr;
505             EXPECT_TRUE( atomics::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
506             EXPECT_EQ( p, arr + 0 );
507             EXPECT_EQ( *p, 1 );
508             EXPECT_FALSE( atomics::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
509             EXPECT_EQ( p, arr + 5 );
510             EXPECT_EQ( *p, 6 );
511
512             EXPECT_TRUE( atomics::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
513             EXPECT_EQ( p, arr + 5 );
514             EXPECT_EQ( *p, 6 );
515             EXPECT_FALSE( atomics::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
516             EXPECT_EQ( p, arr + 3 );
517             EXPECT_EQ( *p, 4 );
518
519             EXPECT_EQ( reinterpret_cast<char *>(atomics::atomic_exchange_explicit( &a, (void *) arr, order )), arr + 3 );
520             EXPECT_EQ( reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), arr );
521             EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), 1 );
522
523             for ( char i = 1; i < aSize; ++i ) {
524                 EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), i );
525                 atomics::atomic_fetch_add_explicit( &a, 1, order );
526                 EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), i + 1 );
527             }
528
529             for ( char i = aSize; i > 1; --i ) {
530                 EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), i );
531                 atomics::atomic_fetch_sub_explicit( &a, 1, order );
532                 EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )), i - 1 );
533             }
534         }
535
536         template <bool Volatile>
537         void do_test_atomic_pointer_void()
538         {
539             typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type    atomic_pointer;
540
541             char   arr[8];
542             const char aSize = sizeof(arr)/sizeof(arr[0]);
543             for ( char i = 0; i < aSize; ++i ) {
544                 arr[static_cast<size_t>(i)] = i + 1;
545             }
546
547             atomic_pointer  a;
548             char *  p;
549
550             atomics::atomic_store( &a, (void *) arr );
551             EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), 1 );
552
553             p = arr;
554             EXPECT_TRUE( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5)));
555             EXPECT_EQ( p, arr + 0 );
556             EXPECT_FALSE( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3)));
557             EXPECT_EQ( p, arr + 5 );
558
559             EXPECT_TRUE( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3)));
560             EXPECT_EQ( p, arr + 5 );
561             EXPECT_FALSE( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5)));
562             EXPECT_EQ( p, arr + 3 );
563
564             EXPECT_EQ( reinterpret_cast<char *>( atomics::atomic_exchange( &a, (void *) arr )), arr + 3 );
565             EXPECT_EQ( reinterpret_cast<char *>( atomics::atomic_load( &a )), arr );
566             EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), 1 );
567
568             for ( char i = 1; i < aSize; ++i ) {
569                 EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), i );
570                 atomics::atomic_fetch_add( &a, 1 );
571                 EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), i + 1 );
572             }
573
574             for ( char i = aSize; i > 1; --i ) {
575                 EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), i );
576                 atomics::atomic_fetch_sub( &a, 1 );
577                 EXPECT_EQ( *reinterpret_cast<char *>(atomics::atomic_load( &a )), i - 1 );
578             }
579
580             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
581             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
582             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
583             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
584             do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
585         }
586
587     public:
588         void test_atomic_flag()
589         {
590             atomics::atomic_flag flags[8];
591             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
592                 do_test_atomic_flag( flags[i] );
593         }
594         void test_atomic_flag_volatile()
595         {
596             atomics::atomic_flag volatile flags[8];
597             for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
598                 do_test_atomic_flag( flags[i] );
599         }
600
601         template <typename AtomicBool>
602         void test_atomic_bool_()
603         {
604             AtomicBool a[8];
605             for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
606                 do_test_atomic_bool( a[i] );
607
608                 do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
609                 do_test_atomic_bool( a[i], atomics::memory_order_acquire );
610                 do_test_atomic_bool( a[i], atomics::memory_order_release );
611                 do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
612                 do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
613             }
614         }
615
616         void test_atomic_bool()
617         {
618             test_atomic_bool_<atomics::atomic<bool> >();
619         }
620         void test_atomic_bool_volatile()
621         {
622             test_atomic_bool_<atomics::atomic<bool> volatile >();
623         }
624     };
625
626     TEST_F( cxx11_atomic_func, atomic_char )
627     {
628         test_atomic_integral<char>();
629     }
630     TEST_F( cxx11_atomic_func, atomic_char_volatile )
631     {
632         test_atomic_integral_volatile<char>();
633     }
634     TEST_F( cxx11_atomic_func, atomic_unsigned_char )
635     {
636         test_atomic_integral<unsigned char>();
637     }
638     TEST_F( cxx11_atomic_func, atomic_unsigned_char_volatile )
639     {
640         test_atomic_integral_volatile<unsigned char>();
641     }
642     TEST_F( cxx11_atomic_func, atomic_signed_char )
643     {
644         test_atomic_integral<signed char>();
645     }
646     TEST_F( cxx11_atomic_func, atomic_signed_char_volatile )
647     {
648         test_atomic_integral_volatile<signed char>();
649     }
650     TEST_F( cxx11_atomic_func, atomic_short_int )
651     {
652         test_atomic_integral<short int>();
653     }
654     TEST_F( cxx11_atomic_func, atomic_short_int_volatile )
655     {
656         test_atomic_integral_volatile<short int>();
657     }
658     TEST_F( cxx11_atomic_func, atomic_unsigned_short_int )
659     {
660         test_atomic_integral<unsigned short int>();
661     }
662     TEST_F( cxx11_atomic_func, atomic_unsigned_short_int_volatile )
663     {
664         test_atomic_integral_volatile<unsigned short int>();
665     }
666     TEST_F( cxx11_atomic_func, atomic_int )
667     {
668         test_atomic_integral<int>();
669     }
670     TEST_F( cxx11_atomic_func, atomic_int_volatile )
671     {
672         test_atomic_integral_volatile<int>();
673     }
674     TEST_F( cxx11_atomic_func, atomic_unsigned_int )
675     {
676         test_atomic_integral<unsigned int>();
677     }
678     TEST_F( cxx11_atomic_func, atomic_unsigned_int_volatile )
679     {
680         test_atomic_integral_volatile<unsigned int>();
681     }
682     TEST_F( cxx11_atomic_func, atomic_long )
683     {
684         test_atomic_integral<long>();
685     }
686     TEST_F( cxx11_atomic_func, atomic_long_volatile )
687     {
688         test_atomic_integral_volatile<long>();
689     }
690     TEST_F( cxx11_atomic_func, atomic_unsigned_long )
691     {
692         test_atomic_integral<unsigned long>();
693     }
694     TEST_F( cxx11_atomic_func, atomic_unsigned_long_volatile )
695     {
696         test_atomic_integral_volatile<unsigned long>();
697     }
698     TEST_F( cxx11_atomic_func, atomic_long_long )
699     {
700         test_atomic_integral<long long>();
701     }
702     TEST_F( cxx11_atomic_func, atomic_long_long_volatile )
703     {
704         test_atomic_integral_volatile<long long>();
705     }
706     TEST_F( cxx11_atomic_func, atomic_unsigned_long_long )
707     {
708         test_atomic_integral<unsigned long long>();
709     }
710     TEST_F( cxx11_atomic_func, atomic_unsigned_long_long_volatile )
711     {
712         test_atomic_integral_volatile<unsigned long long>();
713     }
714
715 #if !( CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 40000 )
716     //clang error with atomic<void*> fetch_add/fetch_sub
717     TEST_F( cxx11_atomic_func, atomic_pointer_void )
718     {
719         do_test_atomic_pointer_void<false>();
720     }
721     TEST_F( cxx11_atomic_func, atomic_pointer_void_volatile )
722     {
723         do_test_atomic_pointer_void<true>();
724     }
725 #endif
726
727     TEST_F( cxx11_atomic_func, atomic_pointer_char )
728     {
729         test_atomic_pointer_for<char, false>();
730     }
731     TEST_F( cxx11_atomic_func, atomic_pointer_char_volatile )
732     {
733         test_atomic_pointer_for<char, true>();
734     }
735     TEST_F( cxx11_atomic_func, atomic_pointer_short )
736     {
737         test_atomic_pointer_for<short, false>();
738     }
739     TEST_F( cxx11_atomic_func, atomic_pointer_short_volatile )
740     {
741         test_atomic_pointer_for<short, true>();
742     }
743     TEST_F( cxx11_atomic_func, atomic_pointer_int )
744     {
745         test_atomic_pointer_for<int, false>();
746     }
747     TEST_F( cxx11_atomic_func, atomic_pointer_int_volatile )
748     {
749         test_atomic_pointer_for<int, true>();
750     }
751     TEST_F( cxx11_atomic_func, atomic_pointer_long )
752     {
753         test_atomic_pointer_for<long, false>();
754     }
755     TEST_F( cxx11_atomic_func, atomic_pointer_long_volatile )
756     {
757         test_atomic_pointer_for<long, true>();
758     }
759     TEST_F( cxx11_atomic_func, atomic_pointer_long_long )
760     {
761         test_atomic_pointer_for<long long, false>();
762     }
763     TEST_F( cxx11_atomic_func, atomic_pointer_long_long_volatile )
764     {
765         test_atomic_pointer_for<long long, true>();
766     }
767
768     TEST_F( cxx11_atomic_func, test_atomic_fence )
769     {
770         atomics::atomic_thread_fence(atomics::memory_order_relaxed );
771         atomics::atomic_thread_fence(atomics::memory_order_acquire );
772         atomics::atomic_thread_fence(atomics::memory_order_release );
773         atomics::atomic_thread_fence(atomics::memory_order_acq_rel );
774         atomics::atomic_thread_fence(atomics::memory_order_seq_cst );
775
776         atomics::atomic_signal_fence(atomics::memory_order_relaxed );
777         atomics::atomic_signal_fence(atomics::memory_order_acquire );
778         atomics::atomic_signal_fence(atomics::memory_order_release );
779         atomics::atomic_signal_fence(atomics::memory_order_acq_rel );
780         atomics::atomic_signal_fence(atomics::memory_order_seq_cst );
781     }
782 }   // namespace
783
784
785 #endif // #ifndef CDS_USE_BOOST_ATOMIC