From bca0ba0915c24c008381dca02cb954b725d7817c Mon Sep 17 00:00:00 2001
From: root <root@dw-6.eecs.uci.edu>
Date: Fri, 28 Jun 2019 12:32:51 -0700
Subject: [PATCH] tabbing plus lots of memory fixes

---
 Makefile                   |    2 +
 common.mk                  |    2 +-
 execution.cc               |    3 +-
 execution.h                |    8 +-
 futex.cc                   |    4 +-
 include/atomic2            |    2 +-
 include/cmodelint.h        |   38 +-
 include/condition_variable |   16 +-
 include/impatomic.h        | 4069 +++++++++++++++++++-----------------
 include/librace.h          |   40 +-
 include/memoryorder.h      |    6 +-
 include/model-assert.h     |    2 +-
 include/modeltypes.h       |    4 +-
 include/mutex.h            |   50 +-
 include/mypthread.h        |   18 +-
 include/stdatomic2.h       |    4 +-
 include/threads.h          |   20 +-
 include/wildcard.h         |   10 +-
 libthreads.cc              |    2 +-
 mutex.cc                   |    3 +-
 mymemory.cc                |    8 +-
 pthread.cc                 |   26 +-
 22 files changed, 2256 insertions(+), 2081 deletions(-)

diff --git a/Makefile b/Makefile
index 771c187a..96fe63be 100644
--- a/Makefile
+++ b/Makefile
@@ -92,3 +92,5 @@ pdfs: $(patsubst %.dot,%.pdf,$(wildcard *.dot))
 tabbing:
 	uncrustify -c C.cfg --no-backup --replace *.cc
 	uncrustify -c C.cfg --no-backup --replace *.h
+	uncrustify -c C.cfg --no-backup --replace include/*
+
diff --git a/common.mk b/common.mk
index bc068dff..aca498c7 100644
--- a/common.mk
+++ b/common.mk
@@ -8,7 +8,7 @@ UNAME := $(shell uname)
 LIB_NAME := model
 LIB_SO := lib$(LIB_NAME).so
 
-CPPFLAGS += -Wall -g -O3
+CPPFLAGS += -Wall -g -O0
 
 # Mac OSX options
 ifeq ($(UNAME), Darwin)
diff --git a/execution.cc b/execution.cc
index 2e744251..b20ad7d5 100644
--- a/execution.cc
+++ b/execution.cc
@@ -1300,7 +1300,8 @@ ModelAction * ModelExecution::get_parent_action(thread_id_t tid) const
  */
 ClockVector * ModelExecution::get_cv(thread_id_t tid) const
 {
-	return get_parent_action(tid)->get_cv();
+	ModelAction *firstaction=get_parent_action(tid);
+	return firstaction != NULL ? firstaction->get_cv() : NULL;
 }
 
 bool valequals(uint64_t val1, uint64_t val2, int size) {
diff --git a/execution.h b/execution.h
index 4d5ef866..fa2b78b8 100644
--- a/execution.h
+++ b/execution.h
@@ -103,8 +103,8 @@ public:
 	action_list_t * get_action_trace() { return &action_trace; }
 	Fuzzer * getFuzzer();
 	CycleGraph * const get_mo_graph() { return mo_graph; }
-	HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> * getCondMap() {return &cond_map;}
-	HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> * getMutexMap() {return &mutex_map;}
+	HashTable<pthread_cond_t *, cdsc::snapcondition_variable *, uintptr_t, 4> * getCondMap() {return &cond_map;}
+	HashTable<pthread_mutex_t *, cdsc::snapmutex *, uintptr_t, 4> * getMutexMap() {return &mutex_map;}
 	ModelAction * check_current_action(ModelAction *curr);
 
 	SNAPSHOTALLOC
@@ -164,8 +164,8 @@ private:
 
 	HashTable<void *, SnapVector<action_list_t> *, uintptr_t, 4> obj_thrd_map;
 
-	HashTable<pthread_mutex_t *, cdsc::mutex *, uintptr_t, 4> mutex_map;
-	HashTable<pthread_cond_t *, cdsc::condition_variable *, uintptr_t, 4> cond_map;
+	HashTable<pthread_mutex_t *, cdsc::snapmutex *, uintptr_t, 4> mutex_map;
+	HashTable<pthread_cond_t *, cdsc::snapcondition_variable *, uintptr_t, 4> cond_map;
 
 	/**
 	 * List of pending release sequences. Release sequences might be
diff --git a/futex.cc b/futex.cc
index b9298111..0647337b 100644
--- a/futex.cc
+++ b/futex.cc
@@ -45,8 +45,8 @@ namespace std _GLIBCXX_VISIBILITY(default)
 
 		ModelExecution *execution = model->get_execution();
 
-		cdsc::condition_variable *v = new cdsc::condition_variable();
-		cdsc::mutex *m = new cdsc::mutex();
+		cdsc::snapcondition_variable *v = new cdsc::snapcondition_variable();
+		cdsc::snapmutex *m = new cdsc::snapmutex();
 
 		execution->getCondMap()->put( (pthread_cond_t *) __addr, v);
 		execution->getMutexMap()->put( (pthread_mutex_t *) __addr, m);
diff --git a/include/atomic2 b/include/atomic2
index 5984e722..17ab8f28 100644
--- a/include/atomic2
+++ b/include/atomic2
@@ -8,4 +8,4 @@
 
 #include "impatomic.h"
 
-#endif /* __CXX_ATOMIC__ */
+#endif	/* __CXX_ATOMIC__ */
diff --git a/include/cmodelint.h b/include/cmodelint.h
index 78da6bd8..6b180a9f 100644
--- a/include/cmodelint.h
+++ b/include/cmodelint.h
@@ -14,7 +14,7 @@ extern "C" {
 typedef int bool;
 #endif
 
-  
+
 uint64_t model_read_action(void * obj, memory_order ord);
 void model_write_action(void * obj, memory_order ord, uint64_t val);
 void model_init_action(void * obj, uint64_t val);
@@ -25,7 +25,7 @@ void model_rmwc_action(void *obj, memory_order ord);
 void model_fence_action(memory_order ord);
 
 uint64_t model_rmwr_action_helper(void *obj, int atomic_index, const char *position);
-  uint64_t model_rmwrcas_action_helper(void *obj, int atomic_index, uint64_t oval, int size, const char *position);
+uint64_t model_rmwrcas_action_helper(void *obj, int atomic_index, uint64_t oval, int size, const char *position);
 void model_rmw_action_helper(void *obj, uint64_t val, int atomic_index, const char *position);
 void model_rmwc_action_helper(void *obj, int atomic_index, const char *position);
 // void model_fence_action_helper(int atomic_index);
@@ -80,23 +80,23 @@ uint32_t cds_atomic_fetch_xor32(void* addr, uint32_t val, int atomic_index, cons
 uint64_t cds_atomic_fetch_xor64(void* addr, uint64_t val, int atomic_index, const char * position);
 
 // cds atomic compare and exchange (strong)
-uint8_t cds_atomic_compare_exchange8_v1(void* addr, uint8_t expected, uint8_t desire, 
-		int atomic_index_succ, int atomic_index_fail, const char *position);
-uint16_t cds_atomic_compare_exchange16_v1(void* addr, uint16_t expected, uint16_t desire, 
-		int atomic_index_succ, int atomic_index_fail, const char *position);
-uint32_t cds_atomic_compare_exchange32_v1(void* addr, uint32_t expected, uint32_t desire, 
-		int atomic_index_succ, int atomic_index_fail, const char *position);
-uint64_t cds_atomic_compare_exchange64_v1(void* addr, uint64_t expected, uint64_t desire, 
-		int atomic_index_succ, int atomic_index_fail, const char *position);
-
-bool cds_atomic_compare_exchange8_v2(void* addr, uint8_t* expected, uint8_t desired, 
-		int atomic_index_succ, int atomic_index_fail, const char *position);
-bool cds_atomic_compare_exchange16_v2(void* addr, uint16_t* expected, uint16_t desired, 
-		int atomic_index_succ, int atomic_index_fail, const char *position);
-bool cds_atomic_compare_exchange32_v2(void* addr, uint32_t* expected, uint32_t desired, 
-		int atomic_index_succ, int atomic_index_fail, const char *position);
-bool cds_atomic_compare_exchange64_v2(void* addr, uint64_t* expected, uint64_t desired, 
-		int atomic_index_succ, int atomic_index_fail, const char *position);
+uint8_t cds_atomic_compare_exchange8_v1(void* addr, uint8_t expected, uint8_t desire,
+																				int atomic_index_succ, int atomic_index_fail, const char *position);
+uint16_t cds_atomic_compare_exchange16_v1(void* addr, uint16_t expected, uint16_t desire,
+																					int atomic_index_succ, int atomic_index_fail, const char *position);
+uint32_t cds_atomic_compare_exchange32_v1(void* addr, uint32_t expected, uint32_t desire,
+																					int atomic_index_succ, int atomic_index_fail, const char *position);
+uint64_t cds_atomic_compare_exchange64_v1(void* addr, uint64_t expected, uint64_t desire,
+																					int atomic_index_succ, int atomic_index_fail, const char *position);
+
+bool cds_atomic_compare_exchange8_v2(void* addr, uint8_t* expected, uint8_t desired,
+																		 int atomic_index_succ, int atomic_index_fail, const char *position);
+bool cds_atomic_compare_exchange16_v2(void* addr, uint16_t* expected, uint16_t desired,
+																			int atomic_index_succ, int atomic_index_fail, const char *position);
+bool cds_atomic_compare_exchange32_v2(void* addr, uint32_t* expected, uint32_t desired,
+																			int atomic_index_succ, int atomic_index_fail, const char *position);
+bool cds_atomic_compare_exchange64_v2(void* addr, uint64_t* expected, uint64_t desired,
+																			int atomic_index_succ, int atomic_index_fail, const char *position);
 
 // cds atomic thread fence
 void cds_atomic_thread_fence(int atomic_index, const char * position);
diff --git a/include/condition_variable b/include/condition_variable
index d6a70d47..f992dcb1 100644
--- a/include/condition_variable
+++ b/include/condition_variable
@@ -9,16 +9,24 @@ namespace cdsc {
 	};
 
 	class condition_variable {
-	public:
+public:
 		condition_variable();
 		~condition_variable();
 		void notify_one();
 		void notify_all();
 		void wait(mutex& lock);
-		
-	private:
+
+private:
 		struct condition_variable_state state;
 	};
+
+	class snapcondition_variable: public condition_variable {
+public:
+		snapcondition_variable() : condition_variable() {
+		}
+
+		SNAPSHOTALLOC
+	};
 }
 
-#endif /* __CXX_CONDITION_VARIABLE__ */
+#endif	/* __CXX_CONDITION_VARIABLE__ */
diff --git a/include/impatomic.h b/include/impatomic.h
index 70b77de2..02239d5f 100644
--- a/include/impatomic.h
+++ b/include/impatomic.h
@@ -24,16 +24,16 @@ namespace std {
 typedef struct atomic_flag
 {
 #ifdef __cplusplus
-    bool test_and_set( memory_order = memory_order_seq_cst ) volatile;
-    void clear( memory_order = memory_order_seq_cst ) volatile;
+	bool test_and_set( memory_order = memory_order_seq_cst ) volatile;
+	void clear( memory_order = memory_order_seq_cst ) volatile;
 
-    CPP0X( atomic_flag() = default; )
-    CPP0X( atomic_flag( const atomic_flag& ) = delete; )
-    atomic_flag& operator =( const atomic_flag& ) CPP0X(=delete);
+	CPP0X( atomic_flag() = default; )
+	CPP0X( atomic_flag( const atomic_flag& ) = delete; )
+	atomic_flag& operator =( const atomic_flag& ) CPP0X(=delete);
 
-CPP0X(private:)
+	CPP0X(private:)
 #endif
-    bool __f__;
+	bool __f__;
 } atomic_flag;
 
 #define ATOMIC_FLAG_INIT { false }
@@ -44,14 +44,14 @@ extern "C" {
 
 extern bool atomic_flag_test_and_set( volatile atomic_flag* );
 extern bool atomic_flag_test_and_set_explicit
-( volatile atomic_flag*, memory_order );
+	( volatile atomic_flag*, memory_order );
 extern void atomic_flag_clear( volatile atomic_flag* );
 extern void atomic_flag_clear_explicit
-( volatile atomic_flag*, memory_order );
+	( volatile atomic_flag*, memory_order );
 extern void __atomic_flag_wait__
-( volatile atomic_flag* );
+	( volatile atomic_flag* );
 extern void __atomic_flag_wait_explicit__
-( volatile atomic_flag*, memory_order );
+	( volatile atomic_flag*, memory_order );
 
 #ifdef __cplusplus
 }
@@ -78,55 +78,55 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile
         __g__=flag, __m__=modified, __o__=operation, __r__=result,
         __p__=pointer to field, __v__=value (for single evaluation),
         __x__=memory-ordering, and __y__=memory-ordering.
-*/
+ */
 
 #define _ATOMIC_LOAD_( __a__, __x__ )                                         \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-                __typeof__((__a__)->__f__) __r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__);  \
-                __r__; })
+	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+		 __typeof__((__a__)->__f__)__r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__);  \
+		 __r__; })
 
 #define _ATOMIC_STORE_( __a__, __m__, __x__ )                                 \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-                __typeof__(__m__) __v__ = (__m__);                            \
-                model_write_action((void *) __p__,  __x__, (uint64_t) __v__); \
-                __v__ = __v__; /* Silence clang (-Wunused-value) */           \
-         })
+	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+		 __typeof__(__m__)__v__ = (__m__);                            \
+		 model_write_action((void *) __p__,  __x__, (uint64_t) __v__); \
+		 __v__ = __v__;	/* Silence clang (-Wunused-value) */           \
+	 })
 
 
 #define _ATOMIC_INIT_( __a__, __m__ )                                         \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-                __typeof__(__m__) __v__ = (__m__);                            \
-                model_init_action((void *) __p__,  (uint64_t) __v__);         \
-                __v__ = __v__; /* Silence clang (-Wunused-value) */           \
-         })
+	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+		 __typeof__(__m__)__v__ = (__m__);                            \
+		 model_init_action((void *) __p__,  (uint64_t) __v__);         \
+		 __v__ = __v__;	/* Silence clang (-Wunused-value) */           \
+	 })
 
 #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ )                         \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-        __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__); \
-        __typeof__(__m__) __v__ = (__m__);                                    \
-        __typeof__((__a__)->__f__) __copy__= __old__;                         \
-        __copy__ __o__ __v__;                                                 \
-        model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);          \
-        __old__ = __old__; /* Silence clang (-Wunused-value) */               \
-         })
+	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+		 __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__); \
+		 __typeof__(__m__)__v__ = (__m__);                                    \
+		 __typeof__((__a__)->__f__)__copy__= __old__;                         \
+		 __copy__ __o__ __v__;                                                 \
+		 model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);          \
+		 __old__ = __old__;	/* Silence clang (-Wunused-value) */               \
+	 })
 
 /* No spurious failure for now */
 #define _ATOMIC_CMPSWP_WEAK_ _ATOMIC_CMPSWP_
 
 #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )                         \
-        ({ volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);   \
-                __typeof__(__e__) __q__ = (__e__);                            \
-                __typeof__(__m__) __v__ = (__m__);                            \
-                bool __r__;                                                   \
-                __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__)) model_rmwrcas_action((void *)__p__, __x__, (uint64_t) * __q__, sizeof((__a__)->__f__)); \
-                if (__t__ == * __q__ ) {;                                     \
-                        model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
-                else {  model_rmwc_action((void *)__p__, __x__); *__q__ = __t__;  __r__ = false;} \
-                __r__; })
+	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
+		 __typeof__(__e__)__q__ = (__e__);                            \
+		 __typeof__(__m__)__v__ = (__m__);                            \
+		 bool __r__;                                                   \
+		 __typeof__((__a__)->__f__)__t__=(__typeof__((__a__)->__f__))model_rmwrcas_action((void *)__p__, __x__, (uint64_t) *__q__, sizeof((__a__)->__f__)); \
+		 if (__t__ == *__q__ ) {;                                     \
+														model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
+		 else {  model_rmwc_action((void *)__p__, __x__); *__q__ = __t__;  __r__ = false;} \
+		 __r__; })
 
 #define _ATOMIC_FENCE_( __x__ ) \
 	({ model_fence_action(__x__);})
- 
+
 
 #define ATOMIC_CHAR_LOCK_FREE 1
 #define ATOMIC_CHAR16_T_LOCK_FREE 1
@@ -141,1078 +141,1091 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile
 typedef struct atomic_bool
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( bool, memory_order = memory_order_seq_cst ) volatile;
-    bool load( memory_order = memory_order_seq_cst ) volatile;
-    bool exchange( bool, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak ( bool&, bool, memory_order, memory_order ) volatile;
-    bool compare_exchange_strong ( bool&, bool, memory_order, memory_order ) volatile;
-    bool compare_exchange_weak ( bool&, bool,
-                        memory_order = memory_order_seq_cst) volatile;
-    bool compare_exchange_strong ( bool&, bool,
-                        memory_order = memory_order_seq_cst) volatile;
-
-    CPP0X( atomic_bool() = delete; )
-    CPP0X( constexpr explicit atomic_bool( bool __v__ ) : __f__( __v__ ) { } )
-    CPP0X( atomic_bool( const atomic_bool& ) = delete; )
-    atomic_bool& operator =( const atomic_bool& ) CPP0X(=delete);
-
-    bool operator =( bool __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_bool*, bool,
-                                       memory_order );
-    friend bool atomic_load_explicit( volatile atomic_bool*, memory_order );
-    friend bool atomic_exchange_explicit( volatile atomic_bool*, bool,
-                                      memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_bool*, bool*, bool,
-                                              memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_bool*, bool*, bool,
-                                              memory_order, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( bool, memory_order = memory_order_seq_cst ) volatile;
+	bool load( memory_order = memory_order_seq_cst ) volatile;
+	bool exchange( bool, memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak ( bool&, bool, memory_order, memory_order ) volatile;
+	bool compare_exchange_strong ( bool&, bool, memory_order, memory_order ) volatile;
+	bool compare_exchange_weak ( bool&, bool,
+															 memory_order = memory_order_seq_cst) volatile;
+	bool compare_exchange_strong ( bool&, bool,
+																 memory_order = memory_order_seq_cst) volatile;
+
+	CPP0X( atomic_bool() = delete; )
+	CPP0X( constexpr explicit atomic_bool( bool __v__ ) : __f__( __v__ ) {
+		} )
+	CPP0X( atomic_bool( const atomic_bool& ) = delete; )
+	atomic_bool& operator =( const atomic_bool& ) CPP0X(=delete);
+
+	bool operator =( bool __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_bool*, bool,
+																		 memory_order );
+	friend bool atomic_load_explicit( volatile atomic_bool*, memory_order );
+	friend bool atomic_exchange_explicit( volatile atomic_bool*, bool,
+																				memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_bool*, bool*, bool,
+																										 memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_bool*, bool*, bool,
+																											 memory_order, memory_order );
+
+	CPP0X(private:)
 #endif
-    bool __f__;
+	bool __f__;
 } atomic_bool;
 
 
 typedef struct atomic_address
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( void*, memory_order = memory_order_seq_cst ) volatile;
-    void* load( memory_order = memory_order_seq_cst ) volatile;
-    void* exchange( void*, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( void*&, void*, memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( void*&, void*, memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( void*&, void*,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( void*&, void*,
-                       memory_order = memory_order_seq_cst ) volatile;
-    void* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
-    void* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_address() = default; )
-    CPP0X( constexpr explicit atomic_address( void* __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_address( const atomic_address& ) = delete; )
-    atomic_address& operator =( const atomic_address & ) CPP0X(=delete);
-
-    void* operator =( void* __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    void* operator +=( ptrdiff_t __v__ ) volatile
-    { return fetch_add( __v__ ); }
-
-    void* operator -=( ptrdiff_t __v__ ) volatile
-    { return fetch_sub( __v__ ); }
-
-    friend void atomic_store_explicit( volatile atomic_address*, void*,
-                                       memory_order );
-    friend void* atomic_load_explicit( volatile atomic_address*, memory_order );
-    friend void* atomic_exchange_explicit( volatile atomic_address*, void*,
-                                       memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_address*,
-                              void**, void*, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_address*,
-                              void**, void*, memory_order, memory_order );
-    friend void* atomic_fetch_add_explicit( volatile atomic_address*, ptrdiff_t,
-                                            memory_order );
-    friend void* atomic_fetch_sub_explicit( volatile atomic_address*, ptrdiff_t,
-                                            memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( void*, memory_order = memory_order_seq_cst ) volatile;
+	void* load( memory_order = memory_order_seq_cst ) volatile;
+	void* exchange( void*, memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( void*&, void*, memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( void*&, void*, memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( void*&, void*,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( void*&, void*,
+																memory_order = memory_order_seq_cst ) volatile;
+	void* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
+	void* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_address() = default; )
+	CPP0X( constexpr explicit atomic_address( void* __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_address( const atomic_address& ) = delete; )
+	atomic_address& operator =( const atomic_address & ) CPP0X(=delete);
+
+	void* operator =( void* __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	void* operator +=( ptrdiff_t __v__ ) volatile
+	{ return fetch_add( __v__ ); }
+
+	void* operator -=( ptrdiff_t __v__ ) volatile
+	{ return fetch_sub( __v__ ); }
+
+	friend void atomic_store_explicit( volatile atomic_address*, void*,
+																		 memory_order );
+	friend void* atomic_load_explicit( volatile atomic_address*, memory_order );
+	friend void* atomic_exchange_explicit( volatile atomic_address*, void*,
+																				 memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_address*,
+																										 void**, void*, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_address*,
+																											 void**, void*, memory_order, memory_order );
+	friend void* atomic_fetch_add_explicit( volatile atomic_address*, ptrdiff_t,
+																					memory_order );
+	friend void* atomic_fetch_sub_explicit( volatile atomic_address*, ptrdiff_t,
+																					memory_order );
+
+	CPP0X(private:)
 #endif
-    void* __f__;
+	void* __f__;
 } atomic_address;
 
 
 typedef struct atomic_char
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( char,
-                memory_order = memory_order_seq_cst ) volatile;
-    char load( memory_order = memory_order_seq_cst ) volatile;
-    char exchange( char,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( char&, char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( char&, char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( char&, char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( char&, char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    char fetch_add( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    char fetch_sub( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    char fetch_and( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    char fetch_or( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    char fetch_xor( char,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_char() = default; )
-    CPP0X( constexpr atomic_char( char __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_char( const atomic_char& ) = delete; )
-    atomic_char& operator =( const atomic_char& ) CPP0X(=delete);
-
-    char operator =( char __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    char operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    char operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    char operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    char operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    char operator +=( char __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    char operator -=( char __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    char operator &=( char __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    char operator |=( char __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    char operator ^=( char __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_char*, char,
-                                       memory_order );
-    friend char atomic_load_explicit( volatile atomic_char*,
-                                             memory_order );
-    friend char atomic_exchange_explicit( volatile atomic_char*,
-                                             char, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_char*,
-                      char*, char, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_char*,
-                      char*, char, memory_order, memory_order );
-    friend char atomic_fetch_add_explicit( volatile atomic_char*,
-                                                  char, memory_order );
-    friend char atomic_fetch_sub_explicit( volatile atomic_char*,
-                                                  char, memory_order );
-    friend char atomic_fetch_and_explicit( volatile atomic_char*,
-                                                  char, memory_order );
-    friend char atomic_fetch_or_explicit(  volatile atomic_char*,
-                                                  char, memory_order );
-    friend char atomic_fetch_xor_explicit( volatile atomic_char*,
-                                                  char, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( char,
+							memory_order = memory_order_seq_cst ) volatile;
+	char load( memory_order = memory_order_seq_cst ) volatile;
+	char exchange( char,
+								 memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( char&, char,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( char&, char,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( char&, char,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( char&, char,
+																memory_order = memory_order_seq_cst ) volatile;
+	char fetch_add( char,
+									memory_order = memory_order_seq_cst ) volatile;
+	char fetch_sub( char,
+									memory_order = memory_order_seq_cst ) volatile;
+	char fetch_and( char,
+									memory_order = memory_order_seq_cst ) volatile;
+	char fetch_or( char,
+								 memory_order = memory_order_seq_cst ) volatile;
+	char fetch_xor( char,
+									memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_char() = default; )
+	CPP0X( constexpr atomic_char( char __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_char( const atomic_char& ) = delete; )
+	atomic_char& operator =( const atomic_char& ) CPP0X(=delete);
+
+	char operator =( char __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	char operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	char operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	char operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	char operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	char operator +=( char __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	char operator -=( char __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	char operator &=( char __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	char operator |=( char __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	char operator ^=( char __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_char*, char,
+																		 memory_order );
+	friend char atomic_load_explicit( volatile atomic_char*,
+																		memory_order );
+	friend char atomic_exchange_explicit( volatile atomic_char*,
+																				char, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_char*,
+																										 char*, char, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_char*,
+																											 char*, char, memory_order, memory_order );
+	friend char atomic_fetch_add_explicit( volatile atomic_char*,
+																				 char, memory_order );
+	friend char atomic_fetch_sub_explicit( volatile atomic_char*,
+																				 char, memory_order );
+	friend char atomic_fetch_and_explicit( volatile atomic_char*,
+																				 char, memory_order );
+	friend char atomic_fetch_or_explicit(  volatile atomic_char*,
+																				 char, memory_order );
+	friend char atomic_fetch_xor_explicit( volatile atomic_char*,
+																				 char, memory_order );
+
+	CPP0X(private:)
 #endif
-    char __f__;
+	char __f__;
 } atomic_char;
 
 
 typedef struct atomic_schar
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( signed char,
-                memory_order = memory_order_seq_cst ) volatile;
-    signed char load( memory_order = memory_order_seq_cst ) volatile;
-    signed char exchange( signed char,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( signed char&, signed char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( signed char&, signed char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( signed char&, signed char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( signed char&, signed char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_add( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_sub( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_and( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_or( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    signed char fetch_xor( signed char,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_schar() = default; )
-    CPP0X( constexpr atomic_schar( signed char __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_schar( const atomic_schar& ) = delete; )
-    atomic_schar& operator =( const atomic_schar& ) CPP0X(=delete);
-
-    signed char operator =( signed char __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    signed char operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    signed char operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    signed char operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    signed char operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    signed char operator +=( signed char __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    signed char operator -=( signed char __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    signed char operator &=( signed char __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    signed char operator |=( signed char __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    signed char operator ^=( signed char __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_schar*, signed char,
-                                       memory_order );
-    friend signed char atomic_load_explicit( volatile atomic_schar*,
-                                             memory_order );
-    friend signed char atomic_exchange_explicit( volatile atomic_schar*,
-                                             signed char, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_schar*,
-                      signed char*, signed char, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_schar*,
-                      signed char*, signed char, memory_order, memory_order );
-    friend signed char atomic_fetch_add_explicit( volatile atomic_schar*,
-                                                  signed char, memory_order );
-    friend signed char atomic_fetch_sub_explicit( volatile atomic_schar*,
-                                                  signed char, memory_order );
-    friend signed char atomic_fetch_and_explicit( volatile atomic_schar*,
-                                                  signed char, memory_order );
-    friend signed char atomic_fetch_or_explicit(  volatile atomic_schar*,
-                                                  signed char, memory_order );
-    friend signed char atomic_fetch_xor_explicit( volatile atomic_schar*,
-                                                  signed char, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( signed char,
+							memory_order = memory_order_seq_cst ) volatile;
+	signed char load( memory_order = memory_order_seq_cst ) volatile;
+	signed char exchange( signed char,
+												memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( signed char&, signed char,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( signed char&, signed char,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( signed char&, signed char,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( signed char&, signed char,
+																memory_order = memory_order_seq_cst ) volatile;
+	signed char fetch_add( signed char,
+												 memory_order = memory_order_seq_cst ) volatile;
+	signed char fetch_sub( signed char,
+												 memory_order = memory_order_seq_cst ) volatile;
+	signed char fetch_and( signed char,
+												 memory_order = memory_order_seq_cst ) volatile;
+	signed char fetch_or( signed char,
+												memory_order = memory_order_seq_cst ) volatile;
+	signed char fetch_xor( signed char,
+												 memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_schar() = default; )
+	CPP0X( constexpr atomic_schar( signed char __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_schar( const atomic_schar& ) = delete; )
+	atomic_schar& operator =( const atomic_schar& ) CPP0X(=delete);
+
+	signed char operator =( signed char __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	signed char operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	signed char operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	signed char operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	signed char operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	signed char operator +=( signed char __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	signed char operator -=( signed char __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	signed char operator &=( signed char __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	signed char operator |=( signed char __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	signed char operator ^=( signed char __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_schar*, signed char,
+																		 memory_order );
+	friend signed char atomic_load_explicit( volatile atomic_schar*,
+																					 memory_order );
+	friend signed char atomic_exchange_explicit( volatile atomic_schar*,
+																							 signed char, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_schar*,
+																										 signed char*, signed char, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_schar*,
+																											 signed char*, signed char, memory_order, memory_order );
+	friend signed char atomic_fetch_add_explicit( volatile atomic_schar*,
+																								signed char, memory_order );
+	friend signed char atomic_fetch_sub_explicit( volatile atomic_schar*,
+																								signed char, memory_order );
+	friend signed char atomic_fetch_and_explicit( volatile atomic_schar*,
+																								signed char, memory_order );
+	friend signed char atomic_fetch_or_explicit(  volatile atomic_schar*,
+																								signed char, memory_order );
+	friend signed char atomic_fetch_xor_explicit( volatile atomic_schar*,
+																								signed char, memory_order );
+
+	CPP0X(private:)
 #endif
-    signed char __f__;
+	signed char __f__;
 } atomic_schar;
 
 
 typedef struct atomic_uchar
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned char,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned char load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned char exchange( unsigned char,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned char&, unsigned char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned char&, unsigned char,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned char&, unsigned char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned char&, unsigned char,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_add( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_sub( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_and( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_or( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned char fetch_xor( unsigned char,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_uchar() = default; )
-    CPP0X( constexpr atomic_uchar( unsigned char __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_uchar( const atomic_uchar& ) = delete; )
-    atomic_uchar& operator =( const atomic_uchar& ) CPP0X(=delete);
-
-    unsigned char operator =( unsigned char __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned char operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned char operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned char operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned char operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned char operator +=( unsigned char __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned char operator -=( unsigned char __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned char operator &=( unsigned char __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned char operator |=( unsigned char __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned char operator ^=( unsigned char __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_uchar*, unsigned char,
-                                       memory_order );
-    friend unsigned char atomic_load_explicit( volatile atomic_uchar*,
-                                             memory_order );
-    friend unsigned char atomic_exchange_explicit( volatile atomic_uchar*,
-                                             unsigned char, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uchar*,
-                      unsigned char*, unsigned char, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uchar*,
-                      unsigned char*, unsigned char, memory_order, memory_order );
-    friend unsigned char atomic_fetch_add_explicit( volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-    friend unsigned char atomic_fetch_sub_explicit( volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-    friend unsigned char atomic_fetch_and_explicit( volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-    friend unsigned char atomic_fetch_or_explicit(  volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-    friend unsigned char atomic_fetch_xor_explicit( volatile atomic_uchar*,
-                                                  unsigned char, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( unsigned char,
+							memory_order = memory_order_seq_cst ) volatile;
+	unsigned char load( memory_order = memory_order_seq_cst ) volatile;
+	unsigned char exchange( unsigned char,
+													memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( unsigned char&, unsigned char,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( unsigned char&, unsigned char,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( unsigned char&, unsigned char,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( unsigned char&, unsigned char,
+																memory_order = memory_order_seq_cst ) volatile;
+	unsigned char fetch_add( unsigned char,
+													 memory_order = memory_order_seq_cst ) volatile;
+	unsigned char fetch_sub( unsigned char,
+													 memory_order = memory_order_seq_cst ) volatile;
+	unsigned char fetch_and( unsigned char,
+													 memory_order = memory_order_seq_cst ) volatile;
+	unsigned char fetch_or( unsigned char,
+													memory_order = memory_order_seq_cst ) volatile;
+	unsigned char fetch_xor( unsigned char,
+													 memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_uchar() = default; )
+	CPP0X( constexpr atomic_uchar( unsigned char __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_uchar( const atomic_uchar& ) = delete; )
+	atomic_uchar& operator =( const atomic_uchar& ) CPP0X(=delete);
+
+	unsigned char operator =( unsigned char __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	unsigned char operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	unsigned char operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	unsigned char operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	unsigned char operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	unsigned char operator +=( unsigned char __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	unsigned char operator -=( unsigned char __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	unsigned char operator &=( unsigned char __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	unsigned char operator |=( unsigned char __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	unsigned char operator ^=( unsigned char __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_uchar*, unsigned char,
+																		 memory_order );
+	friend unsigned char atomic_load_explicit( volatile atomic_uchar*,
+																						 memory_order );
+	friend unsigned char atomic_exchange_explicit( volatile atomic_uchar*,
+																								 unsigned char, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uchar*,
+																										 unsigned char*, unsigned char, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uchar*,
+																											 unsigned char*, unsigned char, memory_order, memory_order );
+	friend unsigned char atomic_fetch_add_explicit( volatile atomic_uchar*,
+																									unsigned char, memory_order );
+	friend unsigned char atomic_fetch_sub_explicit( volatile atomic_uchar*,
+																									unsigned char, memory_order );
+	friend unsigned char atomic_fetch_and_explicit( volatile atomic_uchar*,
+																									unsigned char, memory_order );
+	friend unsigned char atomic_fetch_or_explicit(  volatile atomic_uchar*,
+																									unsigned char, memory_order );
+	friend unsigned char atomic_fetch_xor_explicit( volatile atomic_uchar*,
+																									unsigned char, memory_order );
+
+	CPP0X(private:)
 #endif
-    unsigned char __f__;
+	unsigned char __f__;
 } atomic_uchar;
 
 
 typedef struct atomic_short
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( short,
-                memory_order = memory_order_seq_cst ) volatile;
-    short load( memory_order = memory_order_seq_cst ) volatile;
-    short exchange( short,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( short&, short,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( short&, short,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( short&, short,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( short&, short,
-                       memory_order = memory_order_seq_cst ) volatile;
-    short fetch_add( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    short fetch_sub( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    short fetch_and( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    short fetch_or( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    short fetch_xor( short,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_short() = default; )
-    CPP0X( constexpr atomic_short( short __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_short( const atomic_short& ) = delete; )
-    atomic_short& operator =( const atomic_short& ) CPP0X(=delete);
-
-    short operator =( short __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    short operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    short operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    short operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    short operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    short operator +=( short __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    short operator -=( short __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    short operator &=( short __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    short operator |=( short __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    short operator ^=( short __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_short*, short,
-                                       memory_order );
-    friend short atomic_load_explicit( volatile atomic_short*,
-                                             memory_order );
-    friend short atomic_exchange_explicit( volatile atomic_short*,
-                                             short, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_short*,
-                      short*, short, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_short*,
-                      short*, short, memory_order, memory_order );
-    friend short atomic_fetch_add_explicit( volatile atomic_short*,
-                                                  short, memory_order );
-    friend short atomic_fetch_sub_explicit( volatile atomic_short*,
-                                                  short, memory_order );
-    friend short atomic_fetch_and_explicit( volatile atomic_short*,
-                                                  short, memory_order );
-    friend short atomic_fetch_or_explicit(  volatile atomic_short*,
-                                                  short, memory_order );
-    friend short atomic_fetch_xor_explicit( volatile atomic_short*,
-                                                  short, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( short,
+							memory_order = memory_order_seq_cst ) volatile;
+	short load( memory_order = memory_order_seq_cst ) volatile;
+	short exchange( short,
+									memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( short&, short,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( short&, short,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( short&, short,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( short&, short,
+																memory_order = memory_order_seq_cst ) volatile;
+	short fetch_add( short,
+									 memory_order = memory_order_seq_cst ) volatile;
+	short fetch_sub( short,
+									 memory_order = memory_order_seq_cst ) volatile;
+	short fetch_and( short,
+									 memory_order = memory_order_seq_cst ) volatile;
+	short fetch_or( short,
+									memory_order = memory_order_seq_cst ) volatile;
+	short fetch_xor( short,
+									 memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_short() = default; )
+	CPP0X( constexpr atomic_short( short __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_short( const atomic_short& ) = delete; )
+	atomic_short& operator =( const atomic_short& ) CPP0X(=delete);
+
+	short operator =( short __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	short operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	short operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	short operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	short operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	short operator +=( short __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	short operator -=( short __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	short operator &=( short __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	short operator |=( short __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	short operator ^=( short __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_short*, short,
+																		 memory_order );
+	friend short atomic_load_explicit( volatile atomic_short*,
+																		 memory_order );
+	friend short atomic_exchange_explicit( volatile atomic_short*,
+																				 short, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_short*,
+																										 short*, short, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_short*,
+																											 short*, short, memory_order, memory_order );
+	friend short atomic_fetch_add_explicit( volatile atomic_short*,
+																					short, memory_order );
+	friend short atomic_fetch_sub_explicit( volatile atomic_short*,
+																					short, memory_order );
+	friend short atomic_fetch_and_explicit( volatile atomic_short*,
+																					short, memory_order );
+	friend short atomic_fetch_or_explicit(  volatile atomic_short*,
+																					short, memory_order );
+	friend short atomic_fetch_xor_explicit( volatile atomic_short*,
+																					short, memory_order );
+
+	CPP0X(private:)
 #endif
-    short __f__;
+	short __f__;
 } atomic_short;
 
 
 typedef struct atomic_ushort
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned short,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned short load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned short exchange( unsigned short,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned short&, unsigned short,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned short&, unsigned short,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned short&, unsigned short,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned short&, unsigned short,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_add( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_sub( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_and( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_or( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned short fetch_xor( unsigned short,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_ushort() = default; )
-    CPP0X( constexpr atomic_ushort( unsigned short __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_ushort( const atomic_ushort& ) = delete; )
-    atomic_ushort& operator =( const atomic_ushort& ) CPP0X(=delete);
-
-    unsigned short operator =( unsigned short __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned short operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned short operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned short operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned short operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned short operator +=( unsigned short __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned short operator -=( unsigned short __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned short operator &=( unsigned short __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned short operator |=( unsigned short __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned short operator ^=( unsigned short __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_ushort*, unsigned short,
-                                       memory_order );
-    friend unsigned short atomic_load_explicit( volatile atomic_ushort*,
-                                             memory_order );
-    friend unsigned short atomic_exchange_explicit( volatile atomic_ushort*,
-                                             unsigned short, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ushort*,
-                      unsigned short*, unsigned short, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ushort*,
-                      unsigned short*, unsigned short, memory_order, memory_order );
-    friend unsigned short atomic_fetch_add_explicit( volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-    friend unsigned short atomic_fetch_sub_explicit( volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-    friend unsigned short atomic_fetch_and_explicit( volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-    friend unsigned short atomic_fetch_or_explicit(  volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-    friend unsigned short atomic_fetch_xor_explicit( volatile atomic_ushort*,
-                                                  unsigned short, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( unsigned short,
+							memory_order = memory_order_seq_cst ) volatile;
+	unsigned short load( memory_order = memory_order_seq_cst ) volatile;
+	unsigned short exchange( unsigned short,
+													 memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( unsigned short&, unsigned short,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( unsigned short&, unsigned short,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( unsigned short&, unsigned short,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( unsigned short&, unsigned short,
+																memory_order = memory_order_seq_cst ) volatile;
+	unsigned short fetch_add( unsigned short,
+														memory_order = memory_order_seq_cst ) volatile;
+	unsigned short fetch_sub( unsigned short,
+														memory_order = memory_order_seq_cst ) volatile;
+	unsigned short fetch_and( unsigned short,
+														memory_order = memory_order_seq_cst ) volatile;
+	unsigned short fetch_or( unsigned short,
+													 memory_order = memory_order_seq_cst ) volatile;
+	unsigned short fetch_xor( unsigned short,
+														memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_ushort() = default; )
+	CPP0X( constexpr atomic_ushort( unsigned short __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_ushort( const atomic_ushort& ) = delete; )
+	atomic_ushort& operator =( const atomic_ushort& ) CPP0X(=delete);
+
+	unsigned short operator =( unsigned short __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	unsigned short operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	unsigned short operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	unsigned short operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	unsigned short operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	unsigned short operator +=( unsigned short __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	unsigned short operator -=( unsigned short __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	unsigned short operator &=( unsigned short __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	unsigned short operator |=( unsigned short __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	unsigned short operator ^=( unsigned short __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_ushort*, unsigned short,
+																		 memory_order );
+	friend unsigned short atomic_load_explicit( volatile atomic_ushort*,
+																							memory_order );
+	friend unsigned short atomic_exchange_explicit( volatile atomic_ushort*,
+																									unsigned short, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ushort*,
+																										 unsigned short*, unsigned short, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ushort*,
+																											 unsigned short*, unsigned short, memory_order, memory_order );
+	friend unsigned short atomic_fetch_add_explicit( volatile atomic_ushort*,
+																									 unsigned short, memory_order );
+	friend unsigned short atomic_fetch_sub_explicit( volatile atomic_ushort*,
+																									 unsigned short, memory_order );
+	friend unsigned short atomic_fetch_and_explicit( volatile atomic_ushort*,
+																									 unsigned short, memory_order );
+	friend unsigned short atomic_fetch_or_explicit(  volatile atomic_ushort*,
+																									 unsigned short, memory_order );
+	friend unsigned short atomic_fetch_xor_explicit( volatile atomic_ushort*,
+																									 unsigned short, memory_order );
+
+	CPP0X(private:)
 #endif
-    unsigned short __f__;
+	unsigned short __f__;
 } atomic_ushort;
 
 
 typedef struct atomic_int
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( int,
-                memory_order = memory_order_seq_cst ) volatile;
-    int load( memory_order = memory_order_seq_cst ) volatile;
-    int exchange( int,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( int&, int,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( int&, int,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( int&, int,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( int&, int,
-                       memory_order = memory_order_seq_cst ) volatile;
-    int fetch_add( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    int fetch_sub( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    int fetch_and( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    int fetch_or( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    int fetch_xor( int,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_int() = default; )
-    CPP0X( constexpr atomic_int( int __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_int( const atomic_int& ) = delete; )
-    atomic_int& operator =( const atomic_int& ) CPP0X(=delete);
-
-    int operator =( int __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    int operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    int operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    int operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    int operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    int operator +=( int __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    int operator -=( int __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    int operator &=( int __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    int operator |=( int __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    int operator ^=( int __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_int*, int,
-                                       memory_order );
-    friend int atomic_load_explicit( volatile atomic_int*,
-                                             memory_order );
-    friend int atomic_exchange_explicit( volatile atomic_int*,
-                                             int, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_int*,
-                      int*, int, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_int*,
-                      int*, int, memory_order, memory_order );
-    friend int atomic_fetch_add_explicit( volatile atomic_int*,
-                                                  int, memory_order );
-    friend int atomic_fetch_sub_explicit( volatile atomic_int*,
-                                                  int, memory_order );
-    friend int atomic_fetch_and_explicit( volatile atomic_int*,
-                                                  int, memory_order );
-    friend int atomic_fetch_or_explicit(  volatile atomic_int*,
-                                                  int, memory_order );
-    friend int atomic_fetch_xor_explicit( volatile atomic_int*,
-                                                  int, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( int,
+							memory_order = memory_order_seq_cst ) volatile;
+	int load( memory_order = memory_order_seq_cst ) volatile;
+	int exchange( int,
+								memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( int&, int,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( int&, int,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( int&, int,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( int&, int,
+																memory_order = memory_order_seq_cst ) volatile;
+	int fetch_add( int,
+								 memory_order = memory_order_seq_cst ) volatile;
+	int fetch_sub( int,
+								 memory_order = memory_order_seq_cst ) volatile;
+	int fetch_and( int,
+								 memory_order = memory_order_seq_cst ) volatile;
+	int fetch_or( int,
+								memory_order = memory_order_seq_cst ) volatile;
+	int fetch_xor( int,
+								 memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_int() = default; )
+	CPP0X( constexpr atomic_int( int __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_int( const atomic_int& ) = delete; )
+	atomic_int& operator =( const atomic_int& ) CPP0X(=delete);
+
+	int operator =( int __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	int operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	int operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	int operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	int operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	int operator +=( int __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	int operator -=( int __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	int operator &=( int __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	int operator |=( int __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	int operator ^=( int __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_int*, int,
+																		 memory_order );
+	friend int atomic_load_explicit( volatile atomic_int*,
+																	 memory_order );
+	friend int atomic_exchange_explicit( volatile atomic_int*,
+																			 int, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_int*,
+																										 int*, int, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_int*,
+																											 int*, int, memory_order, memory_order );
+	friend int atomic_fetch_add_explicit( volatile atomic_int*,
+																				int, memory_order );
+	friend int atomic_fetch_sub_explicit( volatile atomic_int*,
+																				int, memory_order );
+	friend int atomic_fetch_and_explicit( volatile atomic_int*,
+																				int, memory_order );
+	friend int atomic_fetch_or_explicit(  volatile atomic_int*,
+																				int, memory_order );
+	friend int atomic_fetch_xor_explicit( volatile atomic_int*,
+																				int, memory_order );
+
+	CPP0X(private:)
 #endif
-    int __f__;
+	int __f__;
 } atomic_int;
 
 
 typedef struct atomic_uint
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned int,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned int load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned int exchange( unsigned int,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned int&, unsigned int,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned int&, unsigned int,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned int&, unsigned int,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned int&, unsigned int,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_add( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_sub( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_and( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_or( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned int fetch_xor( unsigned int,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_uint() = default; )
-    CPP0X( constexpr atomic_uint( unsigned int __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_uint( const atomic_uint& ) = delete; )
-    atomic_uint& operator =( const atomic_uint& ) CPP0X(=delete);
-
-    unsigned int operator =( unsigned int __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned int operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned int operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned int operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned int operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned int operator +=( unsigned int __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned int operator -=( unsigned int __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned int operator &=( unsigned int __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned int operator |=( unsigned int __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned int operator ^=( unsigned int __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_uint*, unsigned int,
-                                       memory_order );
-    friend unsigned int atomic_load_explicit( volatile atomic_uint*,
-                                             memory_order );
-    friend unsigned int atomic_exchange_explicit( volatile atomic_uint*,
-                                             unsigned int, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uint*,
-                      unsigned int*, unsigned int, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uint*,
-                      unsigned int*, unsigned int, memory_order, memory_order );
-    friend unsigned int atomic_fetch_add_explicit( volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-    friend unsigned int atomic_fetch_sub_explicit( volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-    friend unsigned int atomic_fetch_and_explicit( volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-    friend unsigned int atomic_fetch_or_explicit(  volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-    friend unsigned int atomic_fetch_xor_explicit( volatile atomic_uint*,
-                                                  unsigned int, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( unsigned int,
+							memory_order = memory_order_seq_cst ) volatile;
+	unsigned int load( memory_order = memory_order_seq_cst ) volatile;
+	unsigned int exchange( unsigned int,
+												 memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( unsigned int&, unsigned int,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( unsigned int&, unsigned int,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( unsigned int&, unsigned int,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( unsigned int&, unsigned int,
+																memory_order = memory_order_seq_cst ) volatile;
+	unsigned int fetch_add( unsigned int,
+													memory_order = memory_order_seq_cst ) volatile;
+	unsigned int fetch_sub( unsigned int,
+													memory_order = memory_order_seq_cst ) volatile;
+	unsigned int fetch_and( unsigned int,
+													memory_order = memory_order_seq_cst ) volatile;
+	unsigned int fetch_or( unsigned int,
+												 memory_order = memory_order_seq_cst ) volatile;
+	unsigned int fetch_xor( unsigned int,
+													memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_uint() = default; )
+	CPP0X( constexpr atomic_uint( unsigned int __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_uint( const atomic_uint& ) = delete; )
+	atomic_uint& operator =( const atomic_uint& ) CPP0X(=delete);
+
+	unsigned int operator =( unsigned int __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	unsigned int operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	unsigned int operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	unsigned int operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	unsigned int operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	unsigned int operator +=( unsigned int __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	unsigned int operator -=( unsigned int __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	unsigned int operator &=( unsigned int __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	unsigned int operator |=( unsigned int __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	unsigned int operator ^=( unsigned int __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_uint*, unsigned int,
+																		 memory_order );
+	friend unsigned int atomic_load_explicit( volatile atomic_uint*,
+																						memory_order );
+	friend unsigned int atomic_exchange_explicit( volatile atomic_uint*,
+																								unsigned int, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_uint*,
+																										 unsigned int*, unsigned int, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_uint*,
+																											 unsigned int*, unsigned int, memory_order, memory_order );
+	friend unsigned int atomic_fetch_add_explicit( volatile atomic_uint*,
+																								 unsigned int, memory_order );
+	friend unsigned int atomic_fetch_sub_explicit( volatile atomic_uint*,
+																								 unsigned int, memory_order );
+	friend unsigned int atomic_fetch_and_explicit( volatile atomic_uint*,
+																								 unsigned int, memory_order );
+	friend unsigned int atomic_fetch_or_explicit(  volatile atomic_uint*,
+																								 unsigned int, memory_order );
+	friend unsigned int atomic_fetch_xor_explicit( volatile atomic_uint*,
+																								 unsigned int, memory_order );
+
+	CPP0X(private:)
 #endif
-    unsigned int __f__;
+	unsigned int __f__;
 } atomic_uint;
 
 
 typedef struct atomic_long
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( long,
-                memory_order = memory_order_seq_cst ) volatile;
-    long load( memory_order = memory_order_seq_cst ) volatile;
-    long exchange( long,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( long&, long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( long&, long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( long&, long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( long&, long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    long fetch_add( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long fetch_sub( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long fetch_and( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long fetch_or( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long fetch_xor( long,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_long() = default; )
-    CPP0X( constexpr atomic_long( long __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_long( const atomic_long& ) = delete; )
-    atomic_long& operator =( const atomic_long& ) CPP0X(=delete);
-
-    long operator =( long __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    long operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    long operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    long operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    long operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    long operator +=( long __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    long operator -=( long __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    long operator &=( long __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    long operator |=( long __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    long operator ^=( long __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_long*, long,
-                                       memory_order );
-    friend long atomic_load_explicit( volatile atomic_long*,
-                                             memory_order );
-    friend long atomic_exchange_explicit( volatile atomic_long*,
-                                             long, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_long*,
-                      long*, long, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_long*,
-                      long*, long, memory_order, memory_order );
-    friend long atomic_fetch_add_explicit( volatile atomic_long*,
-                                                  long, memory_order );
-    friend long atomic_fetch_sub_explicit( volatile atomic_long*,
-                                                  long, memory_order );
-    friend long atomic_fetch_and_explicit( volatile atomic_long*,
-                                                  long, memory_order );
-    friend long atomic_fetch_or_explicit(  volatile atomic_long*,
-                                                  long, memory_order );
-    friend long atomic_fetch_xor_explicit( volatile atomic_long*,
-                                                  long, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( long,
+							memory_order = memory_order_seq_cst ) volatile;
+	long load( memory_order = memory_order_seq_cst ) volatile;
+	long exchange( long,
+								 memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( long&, long,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( long&, long,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( long&, long,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( long&, long,
+																memory_order = memory_order_seq_cst ) volatile;
+	long fetch_add( long,
+									memory_order = memory_order_seq_cst ) volatile;
+	long fetch_sub( long,
+									memory_order = memory_order_seq_cst ) volatile;
+	long fetch_and( long,
+									memory_order = memory_order_seq_cst ) volatile;
+	long fetch_or( long,
+								 memory_order = memory_order_seq_cst ) volatile;
+	long fetch_xor( long,
+									memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_long() = default; )
+	CPP0X( constexpr atomic_long( long __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_long( const atomic_long& ) = delete; )
+	atomic_long& operator =( const atomic_long& ) CPP0X(=delete);
+
+	long operator =( long __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	long operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	long operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	long operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	long operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	long operator +=( long __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	long operator -=( long __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	long operator &=( long __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	long operator |=( long __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	long operator ^=( long __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_long*, long,
+																		 memory_order );
+	friend long atomic_load_explicit( volatile atomic_long*,
+																		memory_order );
+	friend long atomic_exchange_explicit( volatile atomic_long*,
+																				long, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_long*,
+																										 long*, long, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_long*,
+																											 long*, long, memory_order, memory_order );
+	friend long atomic_fetch_add_explicit( volatile atomic_long*,
+																				 long, memory_order );
+	friend long atomic_fetch_sub_explicit( volatile atomic_long*,
+																				 long, memory_order );
+	friend long atomic_fetch_and_explicit( volatile atomic_long*,
+																				 long, memory_order );
+	friend long atomic_fetch_or_explicit(  volatile atomic_long*,
+																				 long, memory_order );
+	friend long atomic_fetch_xor_explicit( volatile atomic_long*,
+																				 long, memory_order );
+
+	CPP0X(private:)
 #endif
-    long __f__;
+	long __f__;
 } atomic_long;
 
 
 typedef struct atomic_ulong
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned long,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned long load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned long exchange( unsigned long,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned long&, unsigned long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned long&, unsigned long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned long&, unsigned long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned long&, unsigned long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_add( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_sub( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_and( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_or( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long fetch_xor( unsigned long,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_ulong() = default; )
-    CPP0X( constexpr atomic_ulong( unsigned long __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_ulong( const atomic_ulong& ) = delete; )
-    atomic_ulong& operator =( const atomic_ulong& ) CPP0X(=delete);
-
-    unsigned long operator =( unsigned long __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned long operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned long operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned long operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned long operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned long operator +=( unsigned long __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned long operator -=( unsigned long __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned long operator &=( unsigned long __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned long operator |=( unsigned long __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned long operator ^=( unsigned long __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_ulong*, unsigned long,
-                                       memory_order );
-    friend unsigned long atomic_load_explicit( volatile atomic_ulong*,
-                                             memory_order );
-    friend unsigned long atomic_exchange_explicit( volatile atomic_ulong*,
-                                             unsigned long, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ulong*,
-                      unsigned long*, unsigned long, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ulong*,
-                      unsigned long*, unsigned long, memory_order, memory_order );
-    friend unsigned long atomic_fetch_add_explicit( volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-    friend unsigned long atomic_fetch_sub_explicit( volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-    friend unsigned long atomic_fetch_and_explicit( volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-    friend unsigned long atomic_fetch_or_explicit(  volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-    friend unsigned long atomic_fetch_xor_explicit( volatile atomic_ulong*,
-                                                  unsigned long, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( unsigned long,
+							memory_order = memory_order_seq_cst ) volatile;
+	unsigned long load( memory_order = memory_order_seq_cst ) volatile;
+	unsigned long exchange( unsigned long,
+													memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( unsigned long&, unsigned long,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( unsigned long&, unsigned long,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( unsigned long&, unsigned long,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( unsigned long&, unsigned long,
+																memory_order = memory_order_seq_cst ) volatile;
+	unsigned long fetch_add( unsigned long,
+													 memory_order = memory_order_seq_cst ) volatile;
+	unsigned long fetch_sub( unsigned long,
+													 memory_order = memory_order_seq_cst ) volatile;
+	unsigned long fetch_and( unsigned long,
+													 memory_order = memory_order_seq_cst ) volatile;
+	unsigned long fetch_or( unsigned long,
+													memory_order = memory_order_seq_cst ) volatile;
+	unsigned long fetch_xor( unsigned long,
+													 memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_ulong() = default; )
+	CPP0X( constexpr atomic_ulong( unsigned long __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_ulong( const atomic_ulong& ) = delete; )
+	atomic_ulong& operator =( const atomic_ulong& ) CPP0X(=delete);
+
+	unsigned long operator =( unsigned long __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	unsigned long operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	unsigned long operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	unsigned long operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	unsigned long operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	unsigned long operator +=( unsigned long __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	unsigned long operator -=( unsigned long __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	unsigned long operator &=( unsigned long __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	unsigned long operator |=( unsigned long __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	unsigned long operator ^=( unsigned long __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_ulong*, unsigned long,
+																		 memory_order );
+	friend unsigned long atomic_load_explicit( volatile atomic_ulong*,
+																						 memory_order );
+	friend unsigned long atomic_exchange_explicit( volatile atomic_ulong*,
+																								 unsigned long, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ulong*,
+																										 unsigned long*, unsigned long, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ulong*,
+																											 unsigned long*, unsigned long, memory_order, memory_order );
+	friend unsigned long atomic_fetch_add_explicit( volatile atomic_ulong*,
+																									unsigned long, memory_order );
+	friend unsigned long atomic_fetch_sub_explicit( volatile atomic_ulong*,
+																									unsigned long, memory_order );
+	friend unsigned long atomic_fetch_and_explicit( volatile atomic_ulong*,
+																									unsigned long, memory_order );
+	friend unsigned long atomic_fetch_or_explicit(  volatile atomic_ulong*,
+																									unsigned long, memory_order );
+	friend unsigned long atomic_fetch_xor_explicit( volatile atomic_ulong*,
+																									unsigned long, memory_order );
+
+	CPP0X(private:)
 #endif
-    unsigned long __f__;
+	unsigned long __f__;
 } atomic_ulong;
 
 
 typedef struct atomic_llong
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( long long,
-                memory_order = memory_order_seq_cst ) volatile;
-    long long load( memory_order = memory_order_seq_cst ) volatile;
-    long long exchange( long long,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( long long&, long long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( long long&, long long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( long long&, long long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( long long&, long long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_add( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_sub( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_and( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_or( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    long long fetch_xor( long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_llong() = default; )
-    CPP0X( constexpr atomic_llong( long long __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_llong( const atomic_llong& ) = delete; )
-    atomic_llong& operator =( const atomic_llong& ) CPP0X(=delete);
-
-    long long operator =( long long __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    long long operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    long long operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    long long operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    long long operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    long long operator +=( long long __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    long long operator -=( long long __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    long long operator &=( long long __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    long long operator |=( long long __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    long long operator ^=( long long __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_llong*, long long,
-                                       memory_order );
-    friend long long atomic_load_explicit( volatile atomic_llong*,
-                                             memory_order );
-    friend long long atomic_exchange_explicit( volatile atomic_llong*,
-                                             long long, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_llong*,
-                      long long*, long long, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_llong*,
-                      long long*, long long, memory_order, memory_order );
-    friend long long atomic_fetch_add_explicit( volatile atomic_llong*,
-                                                  long long, memory_order );
-    friend long long atomic_fetch_sub_explicit( volatile atomic_llong*,
-                                                  long long, memory_order );
-    friend long long atomic_fetch_and_explicit( volatile atomic_llong*,
-                                                  long long, memory_order );
-    friend long long atomic_fetch_or_explicit(  volatile atomic_llong*,
-                                                  long long, memory_order );
-    friend long long atomic_fetch_xor_explicit( volatile atomic_llong*,
-                                                  long long, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( long long,
+							memory_order = memory_order_seq_cst ) volatile;
+	long long load( memory_order = memory_order_seq_cst ) volatile;
+	long long exchange( long long,
+											memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( long long&, long long,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( long long&, long long,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( long long&, long long,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( long long&, long long,
+																memory_order = memory_order_seq_cst ) volatile;
+	long long fetch_add( long long,
+											 memory_order = memory_order_seq_cst ) volatile;
+	long long fetch_sub( long long,
+											 memory_order = memory_order_seq_cst ) volatile;
+	long long fetch_and( long long,
+											 memory_order = memory_order_seq_cst ) volatile;
+	long long fetch_or( long long,
+											memory_order = memory_order_seq_cst ) volatile;
+	long long fetch_xor( long long,
+											 memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_llong() = default; )
+	CPP0X( constexpr atomic_llong( long long __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_llong( const atomic_llong& ) = delete; )
+	atomic_llong& operator =( const atomic_llong& ) CPP0X(=delete);
+
+	long long operator =( long long __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	long long operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	long long operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	long long operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	long long operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	long long operator +=( long long __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	long long operator -=( long long __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	long long operator &=( long long __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	long long operator |=( long long __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	long long operator ^=( long long __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_llong*, long long,
+																		 memory_order );
+	friend long long atomic_load_explicit( volatile atomic_llong*,
+																				 memory_order );
+	friend long long atomic_exchange_explicit( volatile atomic_llong*,
+																						 long long, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_llong*,
+																										 long long*, long long, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_llong*,
+																											 long long*, long long, memory_order, memory_order );
+	friend long long atomic_fetch_add_explicit( volatile atomic_llong*,
+																							long long, memory_order );
+	friend long long atomic_fetch_sub_explicit( volatile atomic_llong*,
+																							long long, memory_order );
+	friend long long atomic_fetch_and_explicit( volatile atomic_llong*,
+																							long long, memory_order );
+	friend long long atomic_fetch_or_explicit(  volatile atomic_llong*,
+																							long long, memory_order );
+	friend long long atomic_fetch_xor_explicit( volatile atomic_llong*,
+																							long long, memory_order );
+
+	CPP0X(private:)
 #endif
-    long long __f__;
+	long long __f__;
 } atomic_llong;
 
 
 typedef struct atomic_ullong
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( unsigned long long,
-                memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long load( memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long exchange( unsigned long long,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( unsigned long long&, unsigned long long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( unsigned long long&, unsigned long long,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( unsigned long long&, unsigned long long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( unsigned long long&, unsigned long long,
-                       memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_add( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_sub( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_and( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_or( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-    unsigned long long fetch_xor( unsigned long long,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_ullong() = default; )
-    CPP0X( constexpr atomic_ullong( unsigned long long __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_ullong( const atomic_ullong& ) = delete; )
-    atomic_ullong& operator =( const atomic_ullong& ) CPP0X(=delete);
-
-    unsigned long long operator =( unsigned long long __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    unsigned long long operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    unsigned long long operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    unsigned long long operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    unsigned long long operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    unsigned long long operator +=( unsigned long long __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    unsigned long long operator -=( unsigned long long __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    unsigned long long operator &=( unsigned long long __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    unsigned long long operator |=( unsigned long long __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    unsigned long long operator ^=( unsigned long long __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_ullong*, unsigned long long,
-                                       memory_order );
-    friend unsigned long long atomic_load_explicit( volatile atomic_ullong*,
-                                             memory_order );
-    friend unsigned long long atomic_exchange_explicit( volatile atomic_ullong*,
-                                             unsigned long long, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ullong*,
-                      unsigned long long*, unsigned long long, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ullong*,
-                      unsigned long long*, unsigned long long, memory_order, memory_order );
-    friend unsigned long long atomic_fetch_add_explicit( volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-    friend unsigned long long atomic_fetch_sub_explicit( volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-    friend unsigned long long atomic_fetch_and_explicit( volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-    friend unsigned long long atomic_fetch_or_explicit(  volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-    friend unsigned long long atomic_fetch_xor_explicit( volatile atomic_ullong*,
-                                                  unsigned long long, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( unsigned long long,
+							memory_order = memory_order_seq_cst ) volatile;
+	unsigned long long load( memory_order = memory_order_seq_cst ) volatile;
+	unsigned long long exchange( unsigned long long,
+															 memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( unsigned long long&, unsigned long long,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( unsigned long long&, unsigned long long,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( unsigned long long&, unsigned long long,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( unsigned long long&, unsigned long long,
+																memory_order = memory_order_seq_cst ) volatile;
+	unsigned long long fetch_add( unsigned long long,
+																memory_order = memory_order_seq_cst ) volatile;
+	unsigned long long fetch_sub( unsigned long long,
+																memory_order = memory_order_seq_cst ) volatile;
+	unsigned long long fetch_and( unsigned long long,
+																memory_order = memory_order_seq_cst ) volatile;
+	unsigned long long fetch_or( unsigned long long,
+															 memory_order = memory_order_seq_cst ) volatile;
+	unsigned long long fetch_xor( unsigned long long,
+																memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_ullong() = default; )
+	CPP0X( constexpr atomic_ullong( unsigned long long __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_ullong( const atomic_ullong& ) = delete; )
+	atomic_ullong& operator =( const atomic_ullong& ) CPP0X(=delete);
+
+	unsigned long long operator =( unsigned long long __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	unsigned long long operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	unsigned long long operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	unsigned long long operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	unsigned long long operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	unsigned long long operator +=( unsigned long long __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	unsigned long long operator -=( unsigned long long __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	unsigned long long operator &=( unsigned long long __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	unsigned long long operator |=( unsigned long long __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	unsigned long long operator ^=( unsigned long long __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_ullong*, unsigned long long,
+																		 memory_order );
+	friend unsigned long long atomic_load_explicit( volatile atomic_ullong*,
+																									memory_order );
+	friend unsigned long long atomic_exchange_explicit( volatile atomic_ullong*,
+																											unsigned long long, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_ullong*,
+																										 unsigned long long*, unsigned long long, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_ullong*,
+																											 unsigned long long*, unsigned long long, memory_order, memory_order );
+	friend unsigned long long atomic_fetch_add_explicit( volatile atomic_ullong*,
+																											 unsigned long long, memory_order );
+	friend unsigned long long atomic_fetch_sub_explicit( volatile atomic_ullong*,
+																											 unsigned long long, memory_order );
+	friend unsigned long long atomic_fetch_and_explicit( volatile atomic_ullong*,
+																											 unsigned long long, memory_order );
+	friend unsigned long long atomic_fetch_or_explicit(  volatile atomic_ullong*,
+																											 unsigned long long, memory_order );
+	friend unsigned long long atomic_fetch_xor_explicit( volatile atomic_ullong*,
+																											 unsigned long long, memory_order );
+
+	CPP0X(private:)
 #endif
-    unsigned long long __f__;
+	unsigned long long __f__;
 } atomic_ullong;
 
 
@@ -1252,89 +1265,90 @@ typedef atomic_ullong atomic_uintmax_t;
 typedef struct atomic_wchar_t
 {
 #ifdef __cplusplus
-    bool is_lock_free() const volatile;
-    void store( wchar_t, memory_order = memory_order_seq_cst ) volatile;
-    wchar_t load( memory_order = memory_order_seq_cst ) volatile;
-    wchar_t exchange( wchar_t,
-                      memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( wchar_t&, wchar_t,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( wchar_t&, wchar_t,
-                       memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( wchar_t&, wchar_t,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( wchar_t&, wchar_t,
-                       memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_add( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_sub( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_and( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_or( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-    wchar_t fetch_xor( wchar_t,
-                           memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic_wchar_t() = default; )
-    CPP0X( constexpr atomic_wchar_t( wchar_t __v__ ) : __f__( __v__) { } )
-    CPP0X( atomic_wchar_t( const atomic_wchar_t& ) = delete; )
-    atomic_wchar_t& operator =( const atomic_wchar_t& ) CPP0X(=delete);
-
-    wchar_t operator =( wchar_t __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    wchar_t operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    wchar_t operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    wchar_t operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    wchar_t operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    wchar_t operator +=( wchar_t __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    wchar_t operator -=( wchar_t __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
-
-    wchar_t operator &=( wchar_t __v__ ) volatile
-    { return fetch_and( __v__ ) & __v__; }
-
-    wchar_t operator |=( wchar_t __v__ ) volatile
-    { return fetch_or( __v__ ) | __v__; }
-
-    wchar_t operator ^=( wchar_t __v__ ) volatile
-    { return fetch_xor( __v__ ) ^ __v__; }
-
-    friend void atomic_store_explicit( volatile atomic_wchar_t*, wchar_t,
-                                       memory_order );
-    friend wchar_t atomic_load_explicit( volatile atomic_wchar_t*,
-                                             memory_order );
-    friend wchar_t atomic_exchange_explicit( volatile atomic_wchar_t*,
-                                             wchar_t, memory_order );
-    friend bool atomic_compare_exchange_weak_explicit( volatile atomic_wchar_t*,
-                    wchar_t*, wchar_t, memory_order, memory_order );
-    friend bool atomic_compare_exchange_strong_explicit( volatile atomic_wchar_t*,
-                    wchar_t*, wchar_t, memory_order, memory_order );
-    friend wchar_t atomic_fetch_add_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-    friend wchar_t atomic_fetch_sub_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-    friend wchar_t atomic_fetch_and_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-    friend wchar_t atomic_fetch_or_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-    friend wchar_t atomic_fetch_xor_explicit( volatile atomic_wchar_t*,
-                                                  wchar_t, memory_order );
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( wchar_t, memory_order = memory_order_seq_cst ) volatile;
+	wchar_t load( memory_order = memory_order_seq_cst ) volatile;
+	wchar_t exchange( wchar_t,
+										memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( wchar_t&, wchar_t,
+															memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( wchar_t&, wchar_t,
+																memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( wchar_t&, wchar_t,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( wchar_t&, wchar_t,
+																memory_order = memory_order_seq_cst ) volatile;
+	wchar_t fetch_add( wchar_t,
+										 memory_order = memory_order_seq_cst ) volatile;
+	wchar_t fetch_sub( wchar_t,
+										 memory_order = memory_order_seq_cst ) volatile;
+	wchar_t fetch_and( wchar_t,
+										 memory_order = memory_order_seq_cst ) volatile;
+	wchar_t fetch_or( wchar_t,
+										memory_order = memory_order_seq_cst ) volatile;
+	wchar_t fetch_xor( wchar_t,
+										 memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic_wchar_t() = default; )
+	CPP0X( constexpr atomic_wchar_t( wchar_t __v__ ) : __f__( __v__) {
+		} )
+	CPP0X( atomic_wchar_t( const atomic_wchar_t& ) = delete; )
+	atomic_wchar_t& operator =( const atomic_wchar_t& ) CPP0X(=delete);
+
+	wchar_t operator =( wchar_t __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	wchar_t operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	wchar_t operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	wchar_t operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	wchar_t operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	wchar_t operator +=( wchar_t __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	wchar_t operator -=( wchar_t __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
+
+	wchar_t operator &=( wchar_t __v__ ) volatile
+	{ return fetch_and( __v__ ) & __v__; }
+
+	wchar_t operator |=( wchar_t __v__ ) volatile
+	{ return fetch_or( __v__ ) | __v__; }
+
+	wchar_t operator ^=( wchar_t __v__ ) volatile
+	{ return fetch_xor( __v__ ) ^ __v__; }
+
+	friend void atomic_store_explicit( volatile atomic_wchar_t*, wchar_t,
+																		 memory_order );
+	friend wchar_t atomic_load_explicit( volatile atomic_wchar_t*,
+																			 memory_order );
+	friend wchar_t atomic_exchange_explicit( volatile atomic_wchar_t*,
+																					 wchar_t, memory_order );
+	friend bool atomic_compare_exchange_weak_explicit( volatile atomic_wchar_t*,
+																										 wchar_t*, wchar_t, memory_order, memory_order );
+	friend bool atomic_compare_exchange_strong_explicit( volatile atomic_wchar_t*,
+																											 wchar_t*, wchar_t, memory_order, memory_order );
+	friend wchar_t atomic_fetch_add_explicit( volatile atomic_wchar_t*,
+																						wchar_t, memory_order );
+	friend wchar_t atomic_fetch_sub_explicit( volatile atomic_wchar_t*,
+																						wchar_t, memory_order );
+	friend wchar_t atomic_fetch_and_explicit( volatile atomic_wchar_t*,
+																						wchar_t, memory_order );
+	friend wchar_t atomic_fetch_or_explicit( volatile atomic_wchar_t*,
+																					 wchar_t, memory_order );
+	friend wchar_t atomic_fetch_xor_explicit( volatile atomic_wchar_t*,
+																						wchar_t, memory_order );
+
+	CPP0X(private:)
 #endif
-    wchar_t __f__;
+	wchar_t __f__;
 } atomic_wchar_t;
 
 
@@ -1354,26 +1368,27 @@ struct atomic
 {
 #ifdef __cplusplus
 
-    bool is_lock_free() const volatile;
-    void store( T, memory_order = memory_order_seq_cst ) volatile;
-    T load( memory_order = memory_order_seq_cst ) volatile;
-    T exchange( T __v__, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( T&, T, memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( T&, T, memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( T&, T, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( T&, T, memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( T __v__ ) : __f__( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    T operator =( T __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-CPP0X(private:)
+	bool is_lock_free() const volatile;
+	void store( T, memory_order = memory_order_seq_cst ) volatile;
+	T load( memory_order = memory_order_seq_cst ) volatile;
+	T exchange( T __v__, memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( T&, T, memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( T&, T, memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( T&, T, memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( T&, T, memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( T __v__ ) : __f__( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	T operator =( T __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	CPP0X(private:)
 #endif
-    T __f__;
+	T __f__;
 };
 
 #endif
@@ -1382,42 +1397,43 @@ CPP0X(private:)
 
 template<typename T> struct atomic< T* > : atomic_address
 {
-    T* load( memory_order = memory_order_seq_cst ) volatile;
-    T* exchange( T*, memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_weak( T*&, T*, memory_order, memory_order ) volatile;
-    bool compare_exchange_strong( T*&, T*, memory_order, memory_order ) volatile;
-    bool compare_exchange_weak( T*&, T*,
-                       memory_order = memory_order_seq_cst ) volatile;
-    bool compare_exchange_strong( T*&, T*,
-                       memory_order = memory_order_seq_cst ) volatile;
-    T* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
-    T* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
-
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( T __v__ ) : atomic_address( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    T* operator =( T* __v__ ) volatile
-    { store( __v__ ); return __v__; }
-
-    T* operator ++( int ) volatile
-    { return fetch_add( 1 ); }
-
-    T* operator --( int ) volatile
-    { return fetch_sub( 1 ); }
-
-    T* operator ++() volatile
-    { return fetch_add( 1 ) + 1; }
-
-    T* operator --() volatile
-    { return fetch_sub( 1 ) - 1; }
-
-    T* operator +=( T* __v__ ) volatile
-    { return fetch_add( __v__ ) + __v__; }
-
-    T* operator -=( T* __v__ ) volatile
-    { return fetch_sub( __v__ ) - __v__; }
+	T* load( memory_order = memory_order_seq_cst ) volatile;
+	T* exchange( T*, memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_weak( T*&, T*, memory_order, memory_order ) volatile;
+	bool compare_exchange_strong( T*&, T*, memory_order, memory_order ) volatile;
+	bool compare_exchange_weak( T*&, T*,
+															memory_order = memory_order_seq_cst ) volatile;
+	bool compare_exchange_strong( T*&, T*,
+																memory_order = memory_order_seq_cst ) volatile;
+	T* fetch_add( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
+	T* fetch_sub( ptrdiff_t, memory_order = memory_order_seq_cst ) volatile;
+
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( T __v__ ) : atomic_address( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	T* operator =( T* __v__ ) volatile
+	{ store( __v__ ); return __v__; }
+
+	T* operator ++( int ) volatile
+	{ return fetch_add( 1 ); }
+
+	T* operator --( int ) volatile
+	{ return fetch_sub( 1 ); }
+
+	T* operator ++() volatile
+	{ return fetch_add( 1 ) + 1; }
+
+	T* operator --() volatile
+	{ return fetch_sub( 1 ) - 1; }
+
+	T* operator +=( T* __v__ ) volatile
+	{ return fetch_add( __v__ ) + __v__; }
+
+	T* operator -=( T* __v__ ) volatile
+	{ return fetch_sub( __v__ ) - __v__; }
 };
 
 #endif
@@ -1427,183 +1443,197 @@ template<typename T> struct atomic< T* > : atomic_address
 
 template<> struct atomic< bool > : atomic_bool
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( bool __v__ )
-    : atomic_bool( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    bool operator =( bool __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( bool __v__ )
+					 : atomic_bool( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	bool operator =( bool __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< void* > : atomic_address
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( void* __v__ )
-    : atomic_address( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    void* operator =( void* __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( void* __v__ )
+					 : atomic_address( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	void* operator =( void* __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< char > : atomic_char
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( char __v__ )
-    : atomic_char( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    char operator =( char __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( char __v__ )
+					 : atomic_char( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	char operator =( char __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< signed char > : atomic_schar
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( signed char __v__ )
-    : atomic_schar( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    signed char operator =( signed char __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( signed char __v__ )
+					 : atomic_schar( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	signed char operator =( signed char __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned char > : atomic_uchar
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned char __v__ )
-    : atomic_uchar( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned char operator =( unsigned char __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( unsigned char __v__ )
+					 : atomic_uchar( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	unsigned char operator =( unsigned char __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< short > : atomic_short
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( short __v__ )
-    : atomic_short( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    short operator =( short __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( short __v__ )
+					 : atomic_short( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	short operator =( short __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned short > : atomic_ushort
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned short __v__ )
-    : atomic_ushort( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned short operator =( unsigned short __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( unsigned short __v__ )
+					 : atomic_ushort( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	unsigned short operator =( unsigned short __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< int > : atomic_int
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( int __v__ )
-    : atomic_int( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    int operator =( int __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( int __v__ )
+					 : atomic_int( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	int operator =( int __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned int > : atomic_uint
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned int __v__ )
-    : atomic_uint( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned int operator =( unsigned int __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( unsigned int __v__ )
+					 : atomic_uint( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	unsigned int operator =( unsigned int __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< long > : atomic_long
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( long __v__ )
-    : atomic_long( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    long operator =( long __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( long __v__ )
+					 : atomic_long( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	long operator =( long __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned long > : atomic_ulong
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned long __v__ )
-    : atomic_ulong( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned long operator =( unsigned long __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( unsigned long __v__ )
+					 : atomic_ulong( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	unsigned long operator =( unsigned long __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< long long > : atomic_llong
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( long long __v__ )
-    : atomic_llong( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    long long operator =( long long __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( long long __v__ )
+					 : atomic_llong( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	long long operator =( long long __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< unsigned long long > : atomic_ullong
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( unsigned long long __v__ )
-    : atomic_ullong( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    unsigned long long operator =( unsigned long long __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( unsigned long long __v__ )
+					 : atomic_ullong( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	unsigned long long operator =( unsigned long long __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
 template<> struct atomic< wchar_t > : atomic_wchar_t
 {
-    CPP0X( atomic() = default; )
-    CPP0X( constexpr explicit atomic( wchar_t __v__ )
-    : atomic_wchar_t( __v__ ) { } )
-    CPP0X( atomic( const atomic& ) = delete; )
-    atomic& operator =( const atomic& ) CPP0X(=delete);
-
-    wchar_t operator =( wchar_t __v__ ) volatile
-    { store( __v__ ); return __v__; }
+	CPP0X( atomic() = default; )
+	CPP0X( constexpr explicit atomic( wchar_t __v__ )
+					 : atomic_wchar_t( __v__ ) {
+		} )
+	CPP0X( atomic( const atomic& ) = delete; )
+	atomic& operator =( const atomic& ) CPP0X(=delete);
+
+	wchar_t operator =( wchar_t __v__ ) volatile
+	{ store( __v__ ); return __v__; }
 };
 
 
@@ -1614,1287 +1644,1344 @@ template<> struct atomic< wchar_t > : atomic_wchar_t
 
 
 inline bool atomic_is_lock_free
-( const volatile atomic_bool* __a__ )
+	( const volatile atomic_bool* __a__ )
 { return false; }
 
 inline bool atomic_load_explicit
-( volatile atomic_bool* __a__, memory_order __x__ )
+	( volatile atomic_bool* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline bool atomic_load
-( volatile atomic_bool* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
+	( volatile atomic_bool* __a__ ) { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_bool* __a__, bool __m__ )
+	( volatile atomic_bool* __a__, bool __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
+	( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_bool* __a__, bool __m__ )
+	( volatile atomic_bool* __a__, bool __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_exchange_explicit
-( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
+	( volatile atomic_bool* __a__, bool __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline bool atomic_exchange
-( volatile atomic_bool* __a__, bool __m__ )
+	( volatile atomic_bool* __a__, bool __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_bool* __a__, bool* __e__, bool __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_bool* __a__, bool* __e__, bool __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_bool* __a__, bool* __e__, bool __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_bool* __a__, bool* __e__, bool __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_bool* __a__, bool* __e__, bool __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_address* __a__ )
 { return false; }
 
 inline void* atomic_load_explicit
-( volatile atomic_address* __a__, memory_order __x__ )
+	( volatile atomic_address* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline void* atomic_load( volatile atomic_address* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_address* __a__, void* __m__ )
+	( volatile atomic_address* __a__, void* __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
+	( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_address* __a__, void* __m__ )
+	( volatile atomic_address* __a__, void* __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline void* atomic_exchange_explicit
-( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
+	( volatile atomic_address* __a__, void* __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__,  __x__ ); }
 
 inline void* atomic_exchange
-( volatile atomic_address* __a__, void* __m__ )
+	( volatile atomic_address* __a__, void* __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_address* __a__, void** __e__, void* __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_address* __a__, void** __e__, void* __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_address* __a__, void** __e__, void* __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_address* __a__, void** __e__, void* __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_address* __a__, void** __e__, void* __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_address* __a__, void** __e__, void* __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_address* __a__, void** __e__, void* __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_address* __a__, void** __e__, void* __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_char* __a__ )
 { return false; }
 
 inline char atomic_load_explicit
-( volatile atomic_char* __a__, memory_order __x__ )
+	( volatile atomic_char* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline char atomic_load( volatile atomic_char* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_char* __a__, char __m__ )
+	( volatile atomic_char* __a__, char __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+	( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_char* __a__, char __m__ )
+	( volatile atomic_char* __a__, char __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline char atomic_exchange_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+	( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline char atomic_exchange
-( volatile atomic_char* __a__, char __m__ )
+	( volatile atomic_char* __a__, char __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_char* __a__, char* __e__, char __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_char* __a__, char* __e__, char __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_char* __a__, char* __e__, char __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_char* __a__, char* __e__, char __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_char* __a__, char* __e__, char __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_char* __a__, char* __e__, char __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_char* __a__, char* __e__, char __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_char* __a__, char* __e__, char __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_schar* __a__ )
 { return false; }
 
 inline signed char atomic_load_explicit
-( volatile atomic_schar* __a__, memory_order __x__ )
+	( volatile atomic_schar* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline signed char atomic_load( volatile atomic_schar* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_schar* __a__, signed char __m__ )
+	( volatile atomic_schar* __a__, signed char __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+	( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_schar* __a__, signed char __m__ )
+	( volatile atomic_schar* __a__, signed char __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline signed char atomic_exchange_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+	( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline signed char atomic_exchange
-( volatile atomic_schar* __a__, signed char __m__ )
+	( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_schar* __a__, signed char* __e__, signed char __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_schar* __a__, signed char* __e__, signed char __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_uchar* __a__ )
 { return false; }
 
 inline unsigned char atomic_load_explicit
-( volatile atomic_uchar* __a__, memory_order __x__ )
+	( volatile atomic_uchar* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned char atomic_load( volatile atomic_uchar* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned char atomic_exchange_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned char atomic_exchange
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_uchar* __a__, unsigned char* __e__, unsigned char __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_short* __a__ )
 { return false; }
 
 inline short atomic_load_explicit
-( volatile atomic_short* __a__, memory_order __x__ )
+	( volatile atomic_short* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline short atomic_load( volatile atomic_short* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_short* __a__, short __m__ )
+	( volatile atomic_short* __a__, short __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+	( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_short* __a__, short __m__ )
+	( volatile atomic_short* __a__, short __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline short atomic_exchange_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+	( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline short atomic_exchange
-( volatile atomic_short* __a__, short __m__ )
+	( volatile atomic_short* __a__, short __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_short* __a__, short* __e__, short __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_short* __a__, short* __e__, short __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_short* __a__, short* __e__, short __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_short* __a__, short* __e__, short __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_short* __a__, short* __e__, short __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_short* __a__, short* __e__, short __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_short* __a__, short* __e__, short __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_short* __a__, short* __e__, short __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_ushort* __a__ )
 { return false; }
 
 inline unsigned short atomic_load_explicit
-( volatile atomic_ushort* __a__, memory_order __x__ )
+	( volatile atomic_ushort* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned short atomic_load( volatile atomic_ushort* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned short atomic_exchange_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned short atomic_exchange
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_ushort* __a__, unsigned short* __e__, unsigned short __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_int* __a__ )
 { return false; }
 
 inline int atomic_load_explicit
-( volatile atomic_int* __a__, memory_order __x__ )
+	( volatile atomic_int* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline int atomic_load( volatile atomic_int* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_int* __a__, int __m__ )
+	( volatile atomic_int* __a__, int __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+	( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_int* __a__, int __m__ )
+	( volatile atomic_int* __a__, int __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline int atomic_exchange_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+	( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline int atomic_exchange
-( volatile atomic_int* __a__, int __m__ )
+	( volatile atomic_int* __a__, int __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_int* __a__, int* __e__, int __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_int* __a__, int* __e__, int __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_int* __a__, int* __e__, int __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_int* __a__, int* __e__, int __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_int* __a__, int* __e__, int __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_int* __a__, int* __e__, int __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_int* __a__, int* __e__, int __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_int* __a__, int* __e__, int __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_uint* __a__ )
 { return false; }
 
 inline unsigned int atomic_load_explicit
-( volatile atomic_uint* __a__, memory_order __x__ )
+	( volatile atomic_uint* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned int atomic_load( volatile atomic_uint* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_uint* __a__, unsigned int __m__ )
+	( volatile atomic_uint* __a__, unsigned int __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+	( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_uint* __a__, unsigned int __m__ )
+	( volatile atomic_uint* __a__, unsigned int __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned int atomic_exchange_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+	( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned int atomic_exchange
-( volatile atomic_uint* __a__, unsigned int __m__ )
+	( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_uint* __a__, unsigned int* __e__, unsigned int __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_long* __a__ )
 { return false; }
 
 inline long atomic_load_explicit
-( volatile atomic_long* __a__, memory_order __x__ )
+	( volatile atomic_long* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline long atomic_load( volatile atomic_long* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_long* __a__, long __m__ )
+	( volatile atomic_long* __a__, long __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+	( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_long* __a__, long __m__ )
+	( volatile atomic_long* __a__, long __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline long atomic_exchange_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+	( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline long atomic_exchange
-( volatile atomic_long* __a__, long __m__ )
+	( volatile atomic_long* __a__, long __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_long* __a__, long* __e__, long __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_long* __a__, long* __e__, long __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_long* __a__, long* __e__, long __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_long* __a__, long* __e__, long __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_long* __a__, long* __e__, long __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_long* __a__, long* __e__, long __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_long* __a__, long* __e__, long __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_long* __a__, long* __e__, long __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_ulong* __a__ )
 { return false; }
 
 inline unsigned long atomic_load_explicit
-( volatile atomic_ulong* __a__, memory_order __x__ )
+	( volatile atomic_ulong* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned long atomic_load( volatile atomic_ulong* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned long atomic_exchange_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned long atomic_exchange
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_ulong* __a__, unsigned long* __e__, unsigned long __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_llong* __a__ )
 { return false; }
 
 inline long long atomic_load_explicit
-( volatile atomic_llong* __a__, memory_order __x__ )
+	( volatile atomic_llong* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline long long atomic_load( volatile atomic_llong* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_llong* __a__, long long __m__ )
+	( volatile atomic_llong* __a__, long long __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+	( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_llong* __a__, long long __m__ )
+	( volatile atomic_llong* __a__, long long __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline long long atomic_exchange_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+	( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline long long atomic_exchange
-( volatile atomic_llong* __a__, long long __m__ )
+	( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_llong* __a__, long long* __e__, long long __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_llong* __a__, long long* __e__, long long __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_llong* __a__, long long* __e__, long long __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_llong* __a__, long long* __e__, long long __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_llong* __a__, long long* __e__, long long __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_ullong* __a__ )
 { return false; }
 
 inline unsigned long long atomic_load_explicit
-( volatile atomic_ullong* __a__, memory_order __x__ )
+	( volatile atomic_ullong* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline unsigned long long atomic_load( volatile atomic_ullong* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline unsigned long long atomic_exchange_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline unsigned long long atomic_exchange
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_ullong* __a__, unsigned long long* __e__, unsigned long long __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline bool atomic_is_lock_free( const volatile atomic_wchar_t* __a__ )
 { return false; }
 
 inline wchar_t atomic_load_explicit
-( volatile atomic_wchar_t* __a__, memory_order __x__ )
+	( volatile atomic_wchar_t* __a__, memory_order __x__ )
 { return _ATOMIC_LOAD_( __a__, __x__ ); }
 
 inline wchar_t atomic_load( volatile atomic_wchar_t* __a__ )
 { return atomic_load_explicit( __a__, memory_order_seq_cst ); }
 
 inline void atomic_init
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { _ATOMIC_INIT_( __a__, __m__ ); }
 
 inline void atomic_store_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { _ATOMIC_STORE_( __a__, __m__, __x__ ); }
 
 inline void atomic_store
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { atomic_store_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline wchar_t atomic_exchange_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, =, __m__, __x__ ); }
 
 inline wchar_t atomic_exchange
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_exchange_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline bool atomic_compare_exchange_weak_explicit
-( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_strong_explicit
-( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
-  memory_order __x__, memory_order __y__ )
+	( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__,
+	memory_order __x__, memory_order __y__ )
 { return _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ ); }
 
 inline bool atomic_compare_exchange_weak
-( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
-{ return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
+{
+	return atomic_compare_exchange_weak_explicit( __a__, __e__, __m__,
+																								memory_order_seq_cst, memory_order_seq_cst );
+}
 
 inline bool atomic_compare_exchange_strong
-( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
-{ return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
-                 memory_order_seq_cst, memory_order_seq_cst ); }
+	( volatile atomic_wchar_t* __a__, wchar_t* __e__, wchar_t __m__ )
+{
+	return atomic_compare_exchange_strong_explicit( __a__, __e__, __m__,
+																									memory_order_seq_cst, memory_order_seq_cst );
+}
 
 
 inline void* atomic_fetch_add_explicit
-( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
+	( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
 {
-	volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);
-	__typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__);
-	__typeof__((__a__)->__f__) __copy__= __old__;
+	volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
+	__typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+	__typeof__((__a__)->__f__)__copy__= __old__;
 	__copy__ = (void *) (((char *)__copy__) + __m__);
 	model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
 	return __old__;
 }
 
- inline void* atomic_fetch_add
-( volatile atomic_address* __a__, ptrdiff_t __m__ )
+inline void* atomic_fetch_add
+	( volatile atomic_address* __a__, ptrdiff_t __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline void* atomic_fetch_sub_explicit
-( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
-{	volatile __typeof__((__a__)->__f__)* __p__ = & ((__a__)->__f__);
-	__typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__)) model_rmwr_action((void *)__p__, __x__);
-	__typeof__((__a__)->__f__) __copy__= __old__;
+	( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
+{
+	volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
+	__typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+	__typeof__((__a__)->__f__)__copy__= __old__;
 	__copy__ = (void *) (((char *)__copy__) - __m__);
 	model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
 	return __old__;
 }
 
 inline void* atomic_fetch_sub
-( volatile atomic_address* __a__, ptrdiff_t __m__ )
+	( volatile atomic_address* __a__, ptrdiff_t __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 inline char atomic_fetch_add_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+	( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline char atomic_fetch_add
-( volatile atomic_char* __a__, char __m__ )
+	( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline char atomic_fetch_sub_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+	( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline char atomic_fetch_sub
-( volatile atomic_char* __a__, char __m__ )
+	( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline char atomic_fetch_and_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+	( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline char atomic_fetch_and
-( volatile atomic_char* __a__, char __m__ )
+	( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline char atomic_fetch_or_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+	( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline char atomic_fetch_or
-( volatile atomic_char* __a__, char __m__ )
+	( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline char atomic_fetch_xor_explicit
-( volatile atomic_char* __a__, char __m__, memory_order __x__ )
+	( volatile atomic_char* __a__, char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline char atomic_fetch_xor
-( volatile atomic_char* __a__, char __m__ )
+	( volatile atomic_char* __a__, char __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_add_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+	( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_add
-( volatile atomic_schar* __a__, signed char __m__ )
+	( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_sub_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+	( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_sub
-( volatile atomic_schar* __a__, signed char __m__ )
+	( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_and_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+	( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_and
-( volatile atomic_schar* __a__, signed char __m__ )
+	( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_or_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+	( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_or
-( volatile atomic_schar* __a__, signed char __m__ )
+	( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline signed char atomic_fetch_xor_explicit
-( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
+	( volatile atomic_schar* __a__, signed char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline signed char atomic_fetch_xor
-( volatile atomic_schar* __a__, signed char __m__ )
+	( volatile atomic_schar* __a__, signed char __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_add_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_add
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_sub_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_sub
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_and_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_and
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_or_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_or
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned char atomic_fetch_xor_explicit
-( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned char atomic_fetch_xor
-( volatile atomic_uchar* __a__, unsigned char __m__ )
+	( volatile atomic_uchar* __a__, unsigned char __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_add_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+	( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline short atomic_fetch_add
-( volatile atomic_short* __a__, short __m__ )
+	( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_sub_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+	( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline short atomic_fetch_sub
-( volatile atomic_short* __a__, short __m__ )
+	( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_and_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+	( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline short atomic_fetch_and
-( volatile atomic_short* __a__, short __m__ )
+	( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_or_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+	( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline short atomic_fetch_or
-( volatile atomic_short* __a__, short __m__ )
+	( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline short atomic_fetch_xor_explicit
-( volatile atomic_short* __a__, short __m__, memory_order __x__ )
+	( volatile atomic_short* __a__, short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline short atomic_fetch_xor
-( volatile atomic_short* __a__, short __m__ )
+	( volatile atomic_short* __a__, short __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_add_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_add
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_sub_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_sub
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_and_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_and
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_or_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_or
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned short atomic_fetch_xor_explicit
-( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned short atomic_fetch_xor
-( volatile atomic_ushort* __a__, unsigned short __m__ )
+	( volatile atomic_ushort* __a__, unsigned short __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_add_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+	( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline int atomic_fetch_add
-( volatile atomic_int* __a__, int __m__ )
+	( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_sub_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+	( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline int atomic_fetch_sub
-( volatile atomic_int* __a__, int __m__ )
+	( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_and_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+	( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline int atomic_fetch_and
-( volatile atomic_int* __a__, int __m__ )
+	( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_or_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+	( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline int atomic_fetch_or
-( volatile atomic_int* __a__, int __m__ )
+	( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline int atomic_fetch_xor_explicit
-( volatile atomic_int* __a__, int __m__, memory_order __x__ )
+	( volatile atomic_int* __a__, int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline int atomic_fetch_xor
-( volatile atomic_int* __a__, int __m__ )
+	( volatile atomic_int* __a__, int __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_add_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+	( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_add
-( volatile atomic_uint* __a__, unsigned int __m__ )
+	( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_sub_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+	( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_sub
-( volatile atomic_uint* __a__, unsigned int __m__ )
+	( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_and_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+	( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_and
-( volatile atomic_uint* __a__, unsigned int __m__ )
+	( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_or_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+	( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_or
-( volatile atomic_uint* __a__, unsigned int __m__ )
+	( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned int atomic_fetch_xor_explicit
-( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
+	( volatile atomic_uint* __a__, unsigned int __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned int atomic_fetch_xor
-( volatile atomic_uint* __a__, unsigned int __m__ )
+	( volatile atomic_uint* __a__, unsigned int __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_add_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+	( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline long atomic_fetch_add
-( volatile atomic_long* __a__, long __m__ )
+	( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_sub_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+	( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline long atomic_fetch_sub
-( volatile atomic_long* __a__, long __m__ )
+	( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_and_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+	( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline long atomic_fetch_and
-( volatile atomic_long* __a__, long __m__ )
+	( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_or_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+	( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline long atomic_fetch_or
-( volatile atomic_long* __a__, long __m__ )
+	( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long atomic_fetch_xor_explicit
-( volatile atomic_long* __a__, long __m__, memory_order __x__ )
+	( volatile atomic_long* __a__, long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline long atomic_fetch_xor
-( volatile atomic_long* __a__, long __m__ )
+	( volatile atomic_long* __a__, long __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_add_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_add
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_sub_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_sub
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_and_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_and
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_or_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_or
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long atomic_fetch_xor_explicit
-( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned long atomic_fetch_xor
-( volatile atomic_ulong* __a__, unsigned long __m__ )
+	( volatile atomic_ulong* __a__, unsigned long __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_add_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+	( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline long long atomic_fetch_add
-( volatile atomic_llong* __a__, long long __m__ )
+	( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_sub_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+	( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline long long atomic_fetch_sub
-( volatile atomic_llong* __a__, long long __m__ )
+	( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_and_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+	( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline long long atomic_fetch_and
-( volatile atomic_llong* __a__, long long __m__ )
+	( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_or_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+	( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline long long atomic_fetch_or
-( volatile atomic_llong* __a__, long long __m__ )
+	( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline long long atomic_fetch_xor_explicit
-( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
+	( volatile atomic_llong* __a__, long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline long long atomic_fetch_xor
-( volatile atomic_llong* __a__, long long __m__ )
+	( volatile atomic_llong* __a__, long long __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_add_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_add
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_sub_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_sub
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_and_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_and
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_or_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_or
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline unsigned long long atomic_fetch_xor_explicit
-( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline unsigned long long atomic_fetch_xor
-( volatile atomic_ullong* __a__, unsigned long long __m__ )
+	( volatile atomic_ullong* __a__, unsigned long long __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_add_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, +=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_add
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_add_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_sub_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, -=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_sub
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_sub_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_and_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, &=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_and
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_and_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_or_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, |=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_or
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_or_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
 inline wchar_t atomic_fetch_xor_explicit
-( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__, memory_order __x__ )
 { return _ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ ); }
 
 inline wchar_t atomic_fetch_xor
-( volatile atomic_wchar_t* __a__, wchar_t __m__ )
+	( volatile atomic_wchar_t* __a__, wchar_t __m__ )
 { return atomic_fetch_xor_explicit( __a__, __m__, memory_order_seq_cst ); }
 
 
@@ -2902,75 +2989,75 @@ inline wchar_t atomic_fetch_xor
 
 
 #define atomic_is_lock_free( __a__ ) \
-false
+	false
 
 #define atomic_load( __a__ ) \
-_ATOMIC_LOAD_( __a__, memory_order_seq_cst )
+	_ATOMIC_LOAD_( __a__, memory_order_seq_cst )
 
 #define atomic_load_explicit( __a__, __x__ ) \
-_ATOMIC_LOAD_( __a__, __x__ )
+	_ATOMIC_LOAD_( __a__, __x__ )
 
 #define atomic_init( __a__, __m__ ) \
-_ATOMIC_INIT_( __a__, __m__ )
+	_ATOMIC_INIT_( __a__, __m__ )
 
 #define atomic_store( __a__, __m__ ) \
-_ATOMIC_STORE_( __a__, __m__, memory_order_seq_cst )
+	_ATOMIC_STORE_( __a__, __m__, memory_order_seq_cst )
 
 #define atomic_store_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_STORE_( __a__, __m__, __x__ )
+	_ATOMIC_STORE_( __a__, __m__, __x__ )
 
 #define atomic_exchange( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, =, __m__, memory_order_seq_cst )
+	_ATOMIC_MODIFY_( __a__, =, __m__, memory_order_seq_cst )
 
 #define atomic_exchange_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, =, __m__, __x__ )
+	_ATOMIC_MODIFY_( __a__, =, __m__, __x__ )
 
 #define atomic_compare_exchange_weak( __a__, __e__, __m__ ) \
-_ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, memory_order_seq_cst )
+	_ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, memory_order_seq_cst )
 
 #define atomic_compare_exchange_strong( __a__, __e__, __m__ ) \
-_ATOMIC_CMPSWP_( __a__, __e__, __m__, memory_order_seq_cst )
+	_ATOMIC_CMPSWP_( __a__, __e__, __m__, memory_order_seq_cst )
 
 #define atomic_compare_exchange_weak_explicit( __a__, __e__, __m__, __x__, __y__ ) \
-_ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ )
+	_ATOMIC_CMPSWP_WEAK_( __a__, __e__, __m__, __x__ )
 
 #define atomic_compare_exchange_strong_explicit( __a__, __e__, __m__, __x__, __y__ ) \
-_ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )
+	_ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )
 
 
 #define atomic_fetch_add_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, +=, __m__, __x__ )
+	_ATOMIC_MODIFY_( __a__, +=, __m__, __x__ )
 
 #define atomic_fetch_add( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, +=, __m__, memory_order_seq_cst )
+	_ATOMIC_MODIFY_( __a__, +=, __m__, memory_order_seq_cst )
 
 
 #define atomic_fetch_sub_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, -=, __m__, __x__ )
+	_ATOMIC_MODIFY_( __a__, -=, __m__, __x__ )
 
 #define atomic_fetch_sub( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, -=, __m__, memory_order_seq_cst )
+	_ATOMIC_MODIFY_( __a__, -=, __m__, memory_order_seq_cst )
 
 
 #define atomic_fetch_and_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, &=, __m__, __x__ )
+	_ATOMIC_MODIFY_( __a__, &=, __m__, __x__ )
 
 #define atomic_fetch_and( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, &=, __m__, memory_order_seq_cst )
+	_ATOMIC_MODIFY_( __a__, &=, __m__, memory_order_seq_cst )
 
 
 #define atomic_fetch_or_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, |=, __m__, __x__ )
+	_ATOMIC_MODIFY_( __a__, |=, __m__, __x__ )
 
 #define atomic_fetch_or( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, |=, __m__, memory_order_seq_cst )
+	_ATOMIC_MODIFY_( __a__, |=, __m__, memory_order_seq_cst )
 
 
 #define atomic_fetch_xor_explicit( __a__, __m__, __x__ ) \
-_ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ )
+	_ATOMIC_MODIFY_( __a__, ^=, __m__, __x__ )
 
 #define atomic_fetch_xor( __a__, __m__ ) \
-_ATOMIC_MODIFY_( __a__, ^=, __m__, memory_order_seq_cst )
+	_ATOMIC_MODIFY_( __a__, ^=, __m__, memory_order_seq_cst )
 
 
 #endif
@@ -2983,532 +3070,588 @@ inline bool atomic_bool::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_bool::store
-( bool __m__, memory_order __x__ ) volatile
+	( bool __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_bool::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline bool atomic_bool::exchange
-( bool __m__, memory_order __x__ ) volatile
+	( bool __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_bool::compare_exchange_weak
-( bool& __e__, bool __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( bool& __e__, bool __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_bool::compare_exchange_strong
-( bool& __e__, bool __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( bool& __e__, bool __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_bool::compare_exchange_weak
-( bool& __e__, bool __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( bool& __e__, bool __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_bool::compare_exchange_strong
-( bool& __e__, bool __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( bool& __e__, bool __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_address::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_address::store
-( void* __m__, memory_order __x__ ) volatile
+	( void* __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline void* atomic_address::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline void* atomic_address::exchange
-( void* __m__, memory_order __x__ ) volatile
+	( void* __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_address::compare_exchange_weak
-( void*& __e__, void* __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( void*& __e__, void* __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_address::compare_exchange_strong
-( void*& __e__, void* __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( void*& __e__, void* __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_address::compare_exchange_weak
-( void*& __e__, void* __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( void*& __e__, void* __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_address::compare_exchange_strong
-( void*& __e__, void* __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( void*& __e__, void* __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_char::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_char::store
-( char __m__, memory_order __x__ ) volatile
+	( char __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline char atomic_char::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline char atomic_char::exchange
-( char __m__, memory_order __x__ ) volatile
+	( char __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_char::compare_exchange_weak
-( char& __e__, char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( char& __e__, char __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_char::compare_exchange_strong
-( char& __e__, char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( char& __e__, char __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_char::compare_exchange_weak
-( char& __e__, char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( char& __e__, char __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_char::compare_exchange_strong
-( char& __e__, char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( char& __e__, char __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_schar::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_schar::store
-( signed char __m__, memory_order __x__ ) volatile
+	( signed char __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline signed char atomic_schar::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline signed char atomic_schar::exchange
-( signed char __m__, memory_order __x__ ) volatile
+	( signed char __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_schar::compare_exchange_weak
-( signed char& __e__, signed char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( signed char& __e__, signed char __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_schar::compare_exchange_strong
-( signed char& __e__, signed char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( signed char& __e__, signed char __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_schar::compare_exchange_weak
-( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_schar::compare_exchange_strong
-( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( signed char& __e__, signed char __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_uchar::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_uchar::store
-( unsigned char __m__, memory_order __x__ ) volatile
+	( unsigned char __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned char atomic_uchar::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned char atomic_uchar::exchange
-( unsigned char __m__, memory_order __x__ ) volatile
+	( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_uchar::compare_exchange_weak
-( unsigned char& __e__, unsigned char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned char& __e__, unsigned char __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_uchar::compare_exchange_strong
-( unsigned char& __e__, unsigned char __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned char& __e__, unsigned char __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_uchar::compare_exchange_weak
-( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_uchar::compare_exchange_strong
-( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned char& __e__, unsigned char __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_short::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_short::store
-( short __m__, memory_order __x__ ) volatile
+	( short __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline short atomic_short::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline short atomic_short::exchange
-( short __m__, memory_order __x__ ) volatile
+	( short __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_short::compare_exchange_weak
-( short& __e__, short __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( short& __e__, short __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_short::compare_exchange_strong
-( short& __e__, short __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( short& __e__, short __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_short::compare_exchange_weak
-( short& __e__, short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( short& __e__, short __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_short::compare_exchange_strong
-( short& __e__, short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( short& __e__, short __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_ushort::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_ushort::store
-( unsigned short __m__, memory_order __x__ ) volatile
+	( unsigned short __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned short atomic_ushort::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned short atomic_ushort::exchange
-( unsigned short __m__, memory_order __x__ ) volatile
+	( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_ushort::compare_exchange_weak
-( unsigned short& __e__, unsigned short __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned short& __e__, unsigned short __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ushort::compare_exchange_strong
-( unsigned short& __e__, unsigned short __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned short& __e__, unsigned short __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ushort::compare_exchange_weak
-( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_ushort::compare_exchange_strong
-( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned short& __e__, unsigned short __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_int::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_int::store
-( int __m__, memory_order __x__ ) volatile
+	( int __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline int atomic_int::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline int atomic_int::exchange
-( int __m__, memory_order __x__ ) volatile
+	( int __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_int::compare_exchange_weak
-( int& __e__, int __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( int& __e__, int __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_int::compare_exchange_strong
-( int& __e__, int __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( int& __e__, int __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_int::compare_exchange_weak
-( int& __e__, int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( int& __e__, int __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_int::compare_exchange_strong
-( int& __e__, int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( int& __e__, int __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_uint::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_uint::store
-( unsigned int __m__, memory_order __x__ ) volatile
+	( unsigned int __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned int atomic_uint::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned int atomic_uint::exchange
-( unsigned int __m__, memory_order __x__ ) volatile
+	( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_uint::compare_exchange_weak
-( unsigned int& __e__, unsigned int __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned int& __e__, unsigned int __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_uint::compare_exchange_strong
-( unsigned int& __e__, unsigned int __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned int& __e__, unsigned int __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_uint::compare_exchange_weak
-( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_uint::compare_exchange_strong
-( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned int& __e__, unsigned int __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_long::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_long::store
-( long __m__, memory_order __x__ ) volatile
+	( long __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline long atomic_long::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline long atomic_long::exchange
-( long __m__, memory_order __x__ ) volatile
+	( long __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_long::compare_exchange_weak
-( long& __e__, long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( long& __e__, long __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_long::compare_exchange_strong
-( long& __e__, long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( long& __e__, long __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_long::compare_exchange_weak
-( long& __e__, long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( long& __e__, long __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_long::compare_exchange_strong
-( long& __e__, long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( long& __e__, long __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_ulong::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_ulong::store
-( unsigned long __m__, memory_order __x__ ) volatile
+	( unsigned long __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned long atomic_ulong::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned long atomic_ulong::exchange
-( unsigned long __m__, memory_order __x__ ) volatile
+	( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_ulong::compare_exchange_weak
-( unsigned long& __e__, unsigned long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned long& __e__, unsigned long __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ulong::compare_exchange_strong
-( unsigned long& __e__, unsigned long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned long& __e__, unsigned long __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ulong::compare_exchange_weak
-( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_ulong::compare_exchange_strong
-( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned long& __e__, unsigned long __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_llong::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_llong::store
-( long long __m__, memory_order __x__ ) volatile
+	( long long __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline long long atomic_llong::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline long long atomic_llong::exchange
-( long long __m__, memory_order __x__ ) volatile
+	( long long __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_llong::compare_exchange_weak
-( long long& __e__, long long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( long long& __e__, long long __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_llong::compare_exchange_strong
-( long long& __e__, long long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( long long& __e__, long long __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_llong::compare_exchange_weak
-( long long& __e__, long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( long long& __e__, long long __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_llong::compare_exchange_strong
-( long long& __e__, long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( long long& __e__, long long __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_ullong::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_ullong::store
-( unsigned long long __m__, memory_order __x__ ) volatile
+	( unsigned long long __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline unsigned long long atomic_ullong::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline unsigned long long atomic_ullong::exchange
-( unsigned long long __m__, memory_order __x__ ) volatile
+	( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_ullong::compare_exchange_weak
-( unsigned long long& __e__, unsigned long long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned long long& __e__, unsigned long long __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ullong::compare_exchange_strong
-( unsigned long long& __e__, unsigned long long __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( unsigned long long& __e__, unsigned long long __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_ullong::compare_exchange_weak
-( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_ullong::compare_exchange_strong
-( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( unsigned long long& __e__, unsigned long long __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline bool atomic_wchar_t::is_lock_free() const volatile
 { return false; }
 
 inline void atomic_wchar_t::store
-( wchar_t __m__, memory_order __x__ ) volatile
+	( wchar_t __m__, memory_order __x__ ) volatile
 { atomic_store_explicit( this, __m__, __x__ ); }
 
 inline wchar_t atomic_wchar_t::load
-( memory_order __x__ ) volatile
+	( memory_order __x__ ) volatile
 { return atomic_load_explicit( this, __x__ ); }
 
 inline wchar_t atomic_wchar_t::exchange
-( wchar_t __m__, memory_order __x__ ) volatile
+	( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_exchange_explicit( this, __m__, __x__ ); }
 
 inline bool atomic_wchar_t::compare_exchange_weak
-( wchar_t& __e__, wchar_t __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( wchar_t& __e__, wchar_t __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_wchar_t::compare_exchange_strong
-( wchar_t& __e__, wchar_t __m__,
-  memory_order __x__, memory_order __y__ ) volatile
+	( wchar_t& __e__, wchar_t __m__,
+	memory_order __x__, memory_order __y__ ) volatile
 { return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__, __y__ ); }
 
 inline bool atomic_wchar_t::compare_exchange_weak
-( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_weak_explicit( this, &__e__, __m__, __x__,
+																								__x__ == memory_order_acq_rel ? memory_order_acquire :
+																								__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 inline bool atomic_wchar_t::compare_exchange_strong
-( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
-{ return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( wchar_t& __e__, wchar_t __m__, memory_order __x__ ) volatile
+{
+	return atomic_compare_exchange_strong_explicit( this, &__e__, __m__, __x__,
+																									__x__ == memory_order_acq_rel ? memory_order_acquire :
+																									__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 template< typename T >
@@ -3529,335 +3672,339 @@ inline T atomic<T>::exchange( T __v__, memory_order __x__ ) volatile
 
 template< typename T >
 inline bool atomic<T>::compare_exchange_weak
-( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
+	( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
 { return _ATOMIC_CMPSWP_WEAK_( this, &__r__, __v__, __x__ ); }
 
 template< typename T >
 inline bool atomic<T>::compare_exchange_strong
-( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
+	( T& __r__, T __v__, memory_order __x__, memory_order __y__ ) volatile
 { return _ATOMIC_CMPSWP_( this, &__r__, __v__, __x__ ); }
 
 template< typename T >
 inline bool atomic<T>::compare_exchange_weak
-( T& __r__, T __v__, memory_order __x__ ) volatile
-{ return compare_exchange_weak( __r__, __v__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( T& __r__, T __v__, memory_order __x__ ) volatile
+{
+	return compare_exchange_weak( __r__, __v__, __x__,
+																__x__ == memory_order_acq_rel ? memory_order_acquire :
+																__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 template< typename T >
 inline bool atomic<T>::compare_exchange_strong
-( T& __r__, T __v__, memory_order __x__ ) volatile
-{ return compare_exchange_strong( __r__, __v__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( T& __r__, T __v__, memory_order __x__ ) volatile
+{
+	return compare_exchange_strong( __r__, __v__, __x__,
+																	__x__ == memory_order_acq_rel ? memory_order_acquire :
+																	__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 
 inline void* atomic_address::fetch_add
-( ptrdiff_t __m__, memory_order __x__ ) volatile
+	( ptrdiff_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 inline void* atomic_address::fetch_sub
-( ptrdiff_t __m__, memory_order __x__ ) volatile
+	( ptrdiff_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_add
-( char __m__, memory_order __x__ ) volatile
+	( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_sub
-( char __m__, memory_order __x__ ) volatile
+	( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_and
-( char __m__, memory_order __x__ ) volatile
+	( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_or
-( char __m__, memory_order __x__ ) volatile
+	( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline char atomic_char::fetch_xor
-( char __m__, memory_order __x__ ) volatile
+	( char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_add
-( signed char __m__, memory_order __x__ ) volatile
+	( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_sub
-( signed char __m__, memory_order __x__ ) volatile
+	( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_and
-( signed char __m__, memory_order __x__ ) volatile
+	( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_or
-( signed char __m__, memory_order __x__ ) volatile
+	( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline signed char atomic_schar::fetch_xor
-( signed char __m__, memory_order __x__ ) volatile
+	( signed char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_add
-( unsigned char __m__, memory_order __x__ ) volatile
+	( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_sub
-( unsigned char __m__, memory_order __x__ ) volatile
+	( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_and
-( unsigned char __m__, memory_order __x__ ) volatile
+	( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_or
-( unsigned char __m__, memory_order __x__ ) volatile
+	( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned char atomic_uchar::fetch_xor
-( unsigned char __m__, memory_order __x__ ) volatile
+	( unsigned char __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_add
-( short __m__, memory_order __x__ ) volatile
+	( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_sub
-( short __m__, memory_order __x__ ) volatile
+	( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_and
-( short __m__, memory_order __x__ ) volatile
+	( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_or
-( short __m__, memory_order __x__ ) volatile
+	( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline short atomic_short::fetch_xor
-( short __m__, memory_order __x__ ) volatile
+	( short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_add
-( unsigned short __m__, memory_order __x__ ) volatile
+	( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_sub
-( unsigned short __m__, memory_order __x__ ) volatile
+	( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_and
-( unsigned short __m__, memory_order __x__ ) volatile
+	( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_or
-( unsigned short __m__, memory_order __x__ ) volatile
+	( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned short atomic_ushort::fetch_xor
-( unsigned short __m__, memory_order __x__ ) volatile
+	( unsigned short __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_add
-( int __m__, memory_order __x__ ) volatile
+	( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_sub
-( int __m__, memory_order __x__ ) volatile
+	( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_and
-( int __m__, memory_order __x__ ) volatile
+	( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_or
-( int __m__, memory_order __x__ ) volatile
+	( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline int atomic_int::fetch_xor
-( int __m__, memory_order __x__ ) volatile
+	( int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_add
-( unsigned int __m__, memory_order __x__ ) volatile
+	( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_sub
-( unsigned int __m__, memory_order __x__ ) volatile
+	( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_and
-( unsigned int __m__, memory_order __x__ ) volatile
+	( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_or
-( unsigned int __m__, memory_order __x__ ) volatile
+	( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned int atomic_uint::fetch_xor
-( unsigned int __m__, memory_order __x__ ) volatile
+	( unsigned int __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_add
-( long __m__, memory_order __x__ ) volatile
+	( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_sub
-( long __m__, memory_order __x__ ) volatile
+	( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_and
-( long __m__, memory_order __x__ ) volatile
+	( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_or
-( long __m__, memory_order __x__ ) volatile
+	( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline long atomic_long::fetch_xor
-( long __m__, memory_order __x__ ) volatile
+	( long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_add
-( unsigned long __m__, memory_order __x__ ) volatile
+	( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_sub
-( unsigned long __m__, memory_order __x__ ) volatile
+	( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_and
-( unsigned long __m__, memory_order __x__ ) volatile
+	( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_or
-( unsigned long __m__, memory_order __x__ ) volatile
+	( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long atomic_ulong::fetch_xor
-( unsigned long __m__, memory_order __x__ ) volatile
+	( unsigned long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_add
-( long long __m__, memory_order __x__ ) volatile
+	( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_sub
-( long long __m__, memory_order __x__ ) volatile
+	( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_and
-( long long __m__, memory_order __x__ ) volatile
+	( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_or
-( long long __m__, memory_order __x__ ) volatile
+	( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline long long atomic_llong::fetch_xor
-( long long __m__, memory_order __x__ ) volatile
+	( long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_add
-( unsigned long long __m__, memory_order __x__ ) volatile
+	( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_sub
-( unsigned long long __m__, memory_order __x__ ) volatile
+	( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_and
-( unsigned long long __m__, memory_order __x__ ) volatile
+	( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_or
-( unsigned long long __m__, memory_order __x__ ) volatile
+	( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline unsigned long long atomic_ullong::fetch_xor
-( unsigned long long __m__, memory_order __x__ ) volatile
+	( unsigned long long __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_add
-( wchar_t __m__, memory_order __x__ ) volatile
+	( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_add_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_sub
-( wchar_t __m__, memory_order __x__ ) volatile
+	( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_sub_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_and
-( wchar_t __m__, memory_order __x__ ) volatile
+	( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_and_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_or
-( wchar_t __m__, memory_order __x__ ) volatile
+	( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_or_explicit( this, __m__, __x__ ); }
 
 
 inline wchar_t atomic_wchar_t::fetch_xor
-( wchar_t __m__, memory_order __x__ ) volatile
+	( wchar_t __m__, memory_order __x__ ) volatile
 { return atomic_fetch_xor_explicit( this, __m__, __x__ ); }
 
 
@@ -3871,31 +4018,39 @@ T* atomic<T*>::exchange( T* __v__, memory_order __x__ ) volatile
 
 template< typename T >
 bool atomic<T*>::compare_exchange_weak
-( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
-{ return atomic_address::compare_exchange_weak( *reinterpret_cast<void**>( &__r__ ),
-               static_cast<void*>( __v__ ), __x__, __y__ ); }
+	( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
+{
+	return atomic_address::compare_exchange_weak( *reinterpret_cast<void**>( &__r__ ),
+																								static_cast<void*>( __v__ ), __x__, __y__ );
+}
 //{ return _ATOMIC_CMPSWP_WEAK_( this, &__r__, __v__, __x__ ); }
 
 template< typename T >
 bool atomic<T*>::compare_exchange_strong
-( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
-{ return atomic_address::compare_exchange_strong( *reinterpret_cast<void**>( &__r__ ),
-               static_cast<void*>( __v__ ), __x__, __y__ ); }
+	( T*& __r__, T* __v__, memory_order __x__, memory_order __y__) volatile
+{
+	return atomic_address::compare_exchange_strong( *reinterpret_cast<void**>( &__r__ ),
+																									static_cast<void*>( __v__ ), __x__, __y__ );
+}
 //{ return _ATOMIC_CMPSWP_( this, &__r__, __v__, __x__ ); }
 
 template< typename T >
 bool atomic<T*>::compare_exchange_weak
-( T*& __r__, T* __v__, memory_order __x__ ) volatile
-{ return compare_exchange_weak( __r__, __v__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( T*& __r__, T* __v__, memory_order __x__ ) volatile
+{
+	return compare_exchange_weak( __r__, __v__, __x__,
+																__x__ == memory_order_acq_rel ? memory_order_acquire :
+																__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 template< typename T >
 bool atomic<T*>::compare_exchange_strong
-( T*& __r__, T* __v__, memory_order __x__ ) volatile
-{ return compare_exchange_strong( __r__, __v__, __x__,
-      __x__ == memory_order_acq_rel ? memory_order_acquire :
-      __x__ == memory_order_release ? memory_order_relaxed : __x__ ); }
+	( T*& __r__, T* __v__, memory_order __x__ ) volatile
+{
+	return compare_exchange_strong( __r__, __v__, __x__,
+																	__x__ == memory_order_acq_rel ? memory_order_acquire :
+																	__x__ == memory_order_release ? memory_order_relaxed : __x__ );
+}
 
 template< typename T >
 T* atomic<T*>::fetch_add( ptrdiff_t __v__, memory_order __x__ ) volatile
@@ -3916,14 +4071,14 @@ static inline void atomic_thread_fence(memory_order order)
 
 /** @todo Do we want to try to support a user's signal-handler? */
 static inline void atomic_signal_fence(memory_order order)
-{ /* No-op? */ }
+{	 /* No-op? */ }
 #ifdef __cplusplus
 }
 #endif
 
 
 #ifdef __cplusplus
-} // namespace std
+}	// namespace std
 #endif
 
-#endif /* __IMPATOMIC_H__ */
+#endif	/* __IMPATOMIC_H__ */
diff --git a/include/librace.h b/include/librace.h
index 83e05d92..68a6d635 100644
--- a/include/librace.h
+++ b/include/librace.h
@@ -11,28 +11,28 @@
 extern "C" {
 #endif
 
-	void store_8(void *addr, uint8_t val);
-	void store_16(void *addr, uint16_t val);
-	void store_32(void *addr, uint32_t val);
-	void store_64(void *addr, uint64_t val);
-
-	uint8_t load_8(const void *addr);
-	uint16_t load_16(const void *addr);
-	uint32_t load_32(const void *addr);
-	uint64_t load_64(const void *addr);
-
-	void cds_store8(void *addr);
-	void cds_store16(void *addr);
-	void cds_store32(void *addr);
-	void cds_store64(void *addr);
-
-	void cds_load8(const void *addr);
-	void cds_load16(const void *addr);
-	void cds_load32(const void *addr);
-	void cds_load64(const void *addr);
+void store_8(void *addr, uint8_t val);
+void store_16(void *addr, uint16_t val);
+void store_32(void *addr, uint32_t val);
+void store_64(void *addr, uint64_t val);
+
+uint8_t load_8(const void *addr);
+uint16_t load_16(const void *addr);
+uint32_t load_32(const void *addr);
+uint64_t load_64(const void *addr);
+
+void cds_store8(void *addr);
+void cds_store16(void *addr);
+void cds_store32(void *addr);
+void cds_store64(void *addr);
+
+void cds_load8(const void *addr);
+void cds_load16(const void *addr);
+void cds_load32(const void *addr);
+void cds_load64(const void *addr);
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif /* __LIBRACE_H__ */
+#endif	/* __LIBRACE_H__ */
diff --git a/include/memoryorder.h b/include/memoryorder.h
index ca496f1a..2117b5eb 100644
--- a/include/memoryorder.h
+++ b/include/memoryorder.h
@@ -13,10 +13,10 @@ namespace std {
 #endif
 
 typedef enum memory_order {
-    memory_order_relaxed, memory_order_consume, memory_order_acquire, 
-    memory_order_release, memory_order_acq_rel, memory_order_seq_cst
+	memory_order_relaxed, memory_order_consume, memory_order_acquire,
+	memory_order_release, memory_order_acq_rel, memory_order_seq_cst
 } memory_order;
-  
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/include/model-assert.h b/include/model-assert.h
index ddc44278..9180e983 100644
--- a/include/model-assert.h
+++ b/include/model-assert.h
@@ -14,4 +14,4 @@ void model_assert(bool expr, const char *file, int line);
 }
 #endif
 
-#endif /* __MODEL_ASSERT_H__ */
+#endif	/* __MODEL_ASSERT_H__ */
diff --git a/include/modeltypes.h b/include/modeltypes.h
index 34525d2c..c60f0a85 100644
--- a/include/modeltypes.h
+++ b/include/modeltypes.h
@@ -21,8 +21,8 @@
  */
 typedef int thread_id_t;
 
-#define THREAD_ID_T_NONE	-1
+#define THREAD_ID_T_NONE        -1
 
 typedef unsigned int modelclock_t;
 
-#endif /* __MODELTYPES_H__ */
+#endif	/* __MODELTYPES_H__ */
diff --git a/include/mutex.h b/include/mutex.h
index 1903a792..d90d6645 100644
--- a/include/mutex.h
+++ b/include/mutex.h
@@ -7,28 +7,36 @@
 #define __CXX_MUTEX__
 
 #include "modeltypes.h"
+#include "mymemory.h"
 
 namespace cdsc {
-	struct mutex_state {
-		void *locked; /* Thread holding the lock */
-		thread_id_t alloc_tid;
-		modelclock_t alloc_clock;
-		int init; // WL
-	};
+struct mutex_state {
+	void *locked;	/* Thread holding the lock */
+	thread_id_t alloc_tid;
+	modelclock_t alloc_clock;
+	int init;	// WL
+};
 
-	class mutex {
-	public:
-		mutex();
-		~mutex() {}
-		void lock();
-		bool try_lock();
-		void unlock();
-		struct mutex_state * get_state() {return &state;}
-		void initialize() { state.init = 1; } // WL
-		bool is_initialized() { return state.init == 1; }
-		
-	private:
-		struct mutex_state state;
-	};
+class mutex {
+public:
+	mutex();
+	~mutex() {}
+	void lock();
+	bool try_lock();
+	void unlock();
+	struct mutex_state * get_state() {return &state;}
+	void initialize() { state.init = 1; }	// WL
+	bool is_initialized() { return state.init == 1; }
+
+private:
+	struct mutex_state state;
+};
+
+class snapmutex : public mutex {
+public:
+	snapmutex() : mutex()
+	{ }
+	SNAPSHOTALLOC
+};
 }
-#endif /* __CXX_MUTEX__ */
+#endif	/* __CXX_MUTEX__ */
diff --git a/include/mypthread.h b/include/mypthread.h
index c55ce485..cffd8c2d 100644
--- a/include/mypthread.h
+++ b/include/mypthread.h
@@ -12,8 +12,8 @@
 typedef void *(*pthread_start_t)(void *);
 
 struct pthread_params {
-    pthread_start_t func;
-    void *arg;
+	pthread_start_t func;
+	void *arg;
 };
 
 extern "C" {
@@ -28,7 +28,7 @@ int pthread_attr_getdetachstate(const pthread_attr_t *, int *);
 int pthread_attr_getguardsize(const pthread_attr_t *, size_t *);
 int pthread_attr_getinheritsched(const pthread_attr_t *, int *);
 int pthread_attr_getschedparam(const pthread_attr_t *,
-          struct sched_param *);
+															 struct sched_param *);
 int pthread_attr_getschedpolicy(const pthread_attr_t *, int *);
 int pthread_attr_getscope(const pthread_attr_t *, int *);
 int pthread_attr_getstackaddr(const pthread_attr_t *, void **);
@@ -38,7 +38,7 @@ int pthread_attr_setdetachstate(pthread_attr_t *, int);
 int pthread_attr_setguardsize(pthread_attr_t *, size_t);
 int pthread_attr_setinheritsched(pthread_attr_t *, int);
 int pthread_attr_setschedparam(pthread_attr_t *,
-          const struct sched_param *);
+															 const struct sched_param *);
 int pthread_attr_setschedpolicy(pthread_attr_t *, int);
 int pthread_attr_setscope(pthread_attr_t *, int);
 int pthread_attr_setstackaddr(pthread_attr_t *, void *);
@@ -63,7 +63,7 @@ int pthread_mutex_getprioceiling(const pthread_mutex_t *, int *);
 int pthread_mutex_setprioceiling(pthread_mutex_t *, int, int *);
 int pthread_mutexattr_destroy(pthread_mutexattr_t *);
 int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *,
-          int *);
+																		 int *);
 int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *, int *);
 int pthread_mutexattr_getpshared(const pthread_mutexattr_t *, int *);
 int pthread_mutexattr_gettype(const pthread_mutexattr_t *, int *);
@@ -75,7 +75,7 @@ int pthread_mutexattr_settype(pthread_mutexattr_t *, int);
 int pthread_once(pthread_once_t *, void (*)(void));
 int pthread_rwlock_destroy(pthread_rwlock_t *);
 int pthread_rwlock_init(pthread_rwlock_t *,
-          const pthread_rwlockattr_t *);
+												const pthread_rwlockattr_t *);
 int pthread_rwlock_rdlock(pthread_rwlock_t *);
 int pthread_rwlock_tryrdlock(pthread_rwlock_t *);
 int pthread_rwlock_trywrlock(pthread_rwlock_t *);
@@ -83,14 +83,14 @@ int pthread_rwlock_unlock(pthread_rwlock_t *);
 int pthread_rwlock_wrlock(pthread_rwlock_t *);
 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *);
 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *,
-          int *);
+																	int *);
 int pthread_rwlockattr_init(pthread_rwlockattr_t *);
 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *, int);
 int pthread_setcancelstate(int, int *);
 int pthread_setcanceltype(int, int *);
 int pthread_setconcurrency(int);
-int pthread_setschedparam(pthread_t, int ,
-          const struct sched_param *);
+int pthread_setschedparam(pthread_t, int,
+													const struct sched_param *);
 int pthread_setspecific(pthread_key_t, const void *);
 void pthread_testcancel(void);
 
diff --git a/include/stdatomic2.h b/include/stdatomic2.h
index d4d21984..783e934c 100644
--- a/include/stdatomic2.h
+++ b/include/stdatomic2.h
@@ -67,6 +67,6 @@ using std::memory_order_seq_cst;
 using std::atomic_thread_fence;
 using std::atomic_signal_fence;
 
-#endif /* __cplusplus */
+#endif	/* __cplusplus */
 
-#endif /* __STDATOMIC_H__ */
+#endif	/* __STDATOMIC_H__ */
diff --git a/include/threads.h b/include/threads.h
index f38be0ab..7c84e095 100644
--- a/include/threads.h
+++ b/include/threads.h
@@ -17,21 +17,21 @@ typedef void *__thread_identifier;
 extern "C" {
 #endif
 
-	typedef void (*thrd_start_t)(void *);
+typedef void (*thrd_start_t)(void *);
 
-	typedef struct {
-		__thread_identifier priv;
-	} thrd_t;
+typedef struct {
+	__thread_identifier priv;
+} thrd_t;
 
-	int thrd_create(thrd_t *t, thrd_start_t start_routine, void *arg);
-	int thrd_join(thrd_t);
-	void thrd_yield(void);
-	thrd_t thrd_current(void);
+int thrd_create(thrd_t *t, thrd_start_t start_routine, void *arg);
+int thrd_join(thrd_t);
+void thrd_yield(void);
+thrd_t thrd_current(void);
 
-	int user_main(int, char**);
+int user_main(int, char**);
 
 #ifdef __cplusplus
 }
 #endif
 
-#endif /* __THREADS_H__ */
+#endif	/* __THREADS_H__ */
diff --git a/include/wildcard.h b/include/wildcard.h
index c7b9f265..0eaffd5e 100644
--- a/include/wildcard.h
+++ b/include/wildcard.h
@@ -20,12 +20,12 @@
 #define is_normal_mo_infer(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == WILDCARD_NONEXIST || x == memory_order_normal)
 #define is_normal_mo(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == memory_order_normal)
 
-#define assert_infer(x) for (int i = 0; i <= wildcardNum; i++)\
-	ASSERT(is_normal_mo_infer((x[i])));
+#define assert_infer(x) for (int i = 0;i <= wildcardNum;i++) \
+		ASSERT(is_normal_mo_infer((x[i])));
 
-#define assert_infers(x) for (ModelList<memory_order *>::iterator iter =\
-	(x)->begin(); iter != (x)->end(); iter++)\
-	assert_infer((*iter));
+#define assert_infers(x) for (ModelList<memory_order *>::iterator iter = \
+																(x)->begin();iter != (x)->end();iter++) \
+		assert_infer((*iter));
 
 #define relaxed memory_order_relaxed
 #define release memory_order_release
diff --git a/libthreads.cc b/libthreads.cc
index ca69fdab..5ff106c8 100644
--- a/libthreads.cc
+++ b/libthreads.cc
@@ -12,7 +12,7 @@
  */
 int thrd_create(thrd_t *t, thrd_start_t start_routine, void *arg)
 {
-  struct thread_params params = { start_routine, arg };
+	struct thread_params params = { start_routine, arg };
 	/* seq_cst is just a 'don't care' parameter */
 	model->switch_to_master(new ModelAction(THREAD_CREATE, std::memory_order_seq_cst, t, (uint64_t)&params));
 	return 0;
diff --git a/mutex.cc b/mutex.cc
index 44f64ff7..0776db8e 100644
--- a/mutex.cc
+++ b/mutex.cc
@@ -13,7 +13,8 @@ mutex::mutex()
 	state.locked = NULL;
 	thread_id_t tid = thread_current()->get_id();
 	state.alloc_tid = tid;
-	state.alloc_clock = model->get_execution()->get_cv(tid)->getClock(tid);
+	ClockVector *cv = model->get_execution()->get_cv(tid);
+	state.alloc_clock = cv  == NULL ? 0 : cv->getClock(tid);
 }
 
 void mutex::lock()
diff --git a/mymemory.cc b/mymemory.cc
index e72c1421..a85c48c3 100644
--- a/mymemory.cc
+++ b/mymemory.cc
@@ -75,7 +75,7 @@ void *model_malloc(size_t size)
 /** @brief Snapshotting malloc, for use by model-checker (not user progs) */
 void * snapshot_malloc(size_t size)
 {
-  void *tmp = mspace_malloc(model_snapshot_space, size);
+	void *tmp = mspace_malloc(model_snapshot_space, size);
 	ASSERT(tmp);
 	return tmp;
 }
@@ -83,7 +83,7 @@ void * snapshot_malloc(size_t size)
 /** @brief Snapshotting calloc, for use by model-checker (not user progs) */
 void * snapshot_calloc(size_t count, size_t size)
 {
-  void *tmp = mspace_calloc(model_snapshot_space, count, size);
+	void *tmp = mspace_calloc(model_snapshot_space, count, size);
 	ASSERT(tmp);
 	return tmp;
 }
@@ -91,7 +91,7 @@ void * snapshot_calloc(size_t count, size_t size)
 /** @brief Snapshotting realloc, for use by model-checker (not user progs) */
 void *snapshot_realloc(void *ptr, size_t size)
 {
-  void *tmp = mspace_realloc(model_snapshot_space, ptr, size);
+	void *tmp = mspace_realloc(model_snapshot_space, ptr, size);
 	ASSERT(tmp);
 	return tmp;
 }
@@ -99,7 +99,7 @@ void *snapshot_realloc(void *ptr, size_t size)
 /** @brief Snapshotting free, for use by model-checker (not user progs) */
 void snapshot_free(void *ptr)
 {
-  mspace_free(model_snapshot_space, ptr);
+	mspace_free(model_snapshot_space, ptr);
 }
 
 /** Non-snapshotting free for our use. */
diff --git a/pthread.cc b/pthread.cc
index b8c97510..1df85e13 100644
--- a/pthread.cc
+++ b/pthread.cc
@@ -21,7 +21,7 @@ int pthread_create(pthread_t *t, const pthread_attr_t * attr,
 		model = new ModelChecker();
 		model->startChecker();
 	}
-	
+
 	struct pthread_params params = { start_routine, arg };
 
 	ModelAction *act = new ModelAction(PTHREAD_CREATE, std::memory_order_seq_cst, t, (uint64_t)&params);
@@ -54,7 +54,7 @@ void pthread_exit(void *value_ptr) {
 }
 
 int pthread_mutex_init(pthread_mutex_t *p_mutex, const pthread_mutexattr_t *) {
-	cdsc::mutex *m = new cdsc::mutex();
+	cdsc::snapmutex *m = new cdsc::snapmutex();
 
 	if (!model) {
 		snapshot_system_init(10000, 1024, 1024, 40000);
@@ -74,7 +74,7 @@ int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
 		model = new ModelChecker();
 		model->startChecker();
 	}
-	
+
 
 	ModelExecution *execution = model->get_execution();
 
@@ -85,7 +85,7 @@ int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
 		pthread_mutex_init(p_mutex, NULL);
 	}
 
-	cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+	cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 
 	if (m != NULL) {
 		m->lock();
@@ -98,12 +98,12 @@ int pthread_mutex_lock(pthread_mutex_t *p_mutex) {
 
 int pthread_mutex_trylock(pthread_mutex_t *p_mutex) {
 	ModelExecution *execution = model->get_execution();
-	cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+	cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 	return m->try_lock();
 }
 int pthread_mutex_unlock(pthread_mutex_t *p_mutex) {
 	ModelExecution *execution = model->get_execution();
-	cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+	cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 
 	if (m != NULL) {
 		m->unlock();
@@ -123,7 +123,7 @@ int pthread_mutex_timedlock (pthread_mutex_t *__restrict p_mutex,
         if (!execution->mutex_map.contains(p_mutex)) {
                 pthread_mutex_init(p_mutex, NULL);
         }
-        cdsc::mutex *m = execution->mutex_map.get(p_mutex);
+        cdsc::snapmutex *m = execution->mutex_map.get(p_mutex);
 
         if (m != NULL) {
                 m->lock();
@@ -147,7 +147,7 @@ int pthread_key_delete(pthread_key_t) {
 }
 
 int pthread_cond_init(pthread_cond_t *p_cond, const pthread_condattr_t *attr) {
-	cdsc::condition_variable *v = new cdsc::condition_variable();
+	cdsc::snapcondition_variable *v = new cdsc::snapcondition_variable();
 
 	ModelExecution *execution = model->get_execution();
 	execution->getCondMap()->put(p_cond, v);
@@ -159,8 +159,8 @@ int pthread_cond_wait(pthread_cond_t *p_cond, pthread_mutex_t *p_mutex) {
 	if ( !execution->getCondMap()->contains(p_cond) )
 		pthread_cond_init(p_cond, NULL);
 
-	cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
-	cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+	cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
+	cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 
 	v->wait(*m);
 	return 0;
@@ -176,8 +176,8 @@ int pthread_cond_timedwait(pthread_cond_t *p_cond,
 	if ( !execution->getMutexMap()->contains(p_mutex) )
 		pthread_mutex_init(p_mutex, NULL);
 
-	cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
-	cdsc::mutex *m = execution->getMutexMap()->get(p_mutex);
+	cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
+	cdsc::snapmutex *m = execution->getMutexMap()->get(p_mutex);
 
 	model->switch_to_master(new ModelAction(NOOP, std::memory_order_seq_cst, v));
 //	v->wait(*m);
@@ -191,7 +191,7 @@ int pthread_cond_signal(pthread_cond_t *p_cond) {
 	if ( !execution->getCondMap()->contains(p_cond) )
 		pthread_cond_init(p_cond, NULL);
 
-	cdsc::condition_variable *v = execution->getCondMap()->get(p_cond);
+	cdsc::snapcondition_variable *v = execution->getCondMap()->get(p_cond);
 
 	v->notify_one();
 	return 0;
-- 
2.34.1