Removed redundant spaces
[libcds.git] / cds / memory / michael / allocator.h
index d25fa5ffa50f24bbbb7dab4f505f3c3b87374ee5..4ef0f6d832fccc065eddf53df258d4dd5c66611d 100644 (file)
@@ -1,7 +1,35 @@
-//$$CDS-header$$
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
 
-#ifndef __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
-#define __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+#define CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
 
 /*
     Michael allocator implementation
@@ -15,6 +43,8 @@
     2011.01.02 khizmax  Created
 */
 
+#include <stdlib.h>
+#include <mutex>        // unique_lock
 #include <cds/init.h>
 #include <cds/memory/michael/options.h>
 #include <cds/memory/michael/bound_check.h>
 
 #include <cds/os/topology.h>
 #include <cds/os/alloc_aligned.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
 #include <cds/details/type_padding.h>
 #include <cds/details/marked_ptr.h>
 #include <cds/container/vyukov_mpmc_cycle_queue.h>
 #include <cds/user_setup/cache_line.h>
 #include <cds/details/lib.h>
 
-#include <stdlib.h>
 #include <boost/intrusive/list.hpp>
 
 namespace cds {
@@ -59,7 +88,8 @@ namespace michael {
         /// Allocates memory block of \p nSize bytes (\p malloc wrapper)
         static void * alloc( size_t nSize )
         {
-            return ::malloc( nSize );
+            void * p = ::malloc( nSize );
+            return p;
         }
         /// Returning memory block to the system (\p free wrapper)
         static void free( void * p )
@@ -144,14 +174,14 @@ namespace michael {
             }
         };
 #endif
-
-        typedef container::VyukovMPMCCycleQueue<
-            void *,
-            opt::buffer< opt::v::static_buffer<void *, FreeListCapacity> >
+        struct free_list_traits : public cds::container::vyukov_queue::traits
+        {
+            typedef opt::v::initialized_static_buffer<void *, FreeListCapacity> buffer;
 #ifdef _DEBUG
-            , opt::value_cleaner< make_null_ptr >
+            typedef make_null_ptr value_cleaner;
 #endif
-        >   free_list;
+        };
+        typedef container::VyukovMPMCCycleQueue< void *, free_list_traits > free_list;
 
         free_list   m_FreeList;
         //@endcond
@@ -169,7 +199,7 @@ namespace michael {
         ~page_cached_allocator()
         {
             void * pPage;
-            while ( m_FreeList.pop(pPage) )
+            while ( m_FreeList.pop(pPage))
                 base_class::free( pPage );
         }
         //@endcond
@@ -178,7 +208,7 @@ namespace michael {
         void * alloc()
         {
             void * pPage;
-            if ( !m_FreeList.pop( pPage ) )
+            if ( !m_FreeList.pop( pPage ))
                 pPage = base_class::alloc();
             return pPage;
         }
@@ -275,7 +305,7 @@ namespace michael {
         /// Gets details::size_class struct for size-class index \p nIndex
         static const size_class * at( sizeclass_index nIndex )
         {
-            assert( nIndex < size() );
+            assert( nIndex < size());
             return m_szClass + nIndex;
         }
     };
@@ -306,7 +336,7 @@ namespace michael {
         typedef details::free_list_locked_hook item_hook;
         typedef Lock lock_type;
     protected:
-        typedef cds::lock::scoped_lock<lock_type>   auto_lock;
+        typedef std::unique_lock<lock_type>   auto_lock;
 
         mutable lock_type   m_access;
         //@endcond
@@ -322,7 +352,7 @@ namespace michael {
         /// Push superblock descriptor to free-list
         void push( T * pDesc )
         {
-            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) );
+            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc)));
             auto_lock al(m_access);
             base_class::push_back( *pDesc );
         }
@@ -331,11 +361,11 @@ namespace michael {
         T *   pop()
         {
             auto_lock al(m_access);
-            if ( base_class::empty() )
+            if ( base_class::empty())
                 return nullptr;
             T& rDesc = base_class::front();
             base_class::pop_front();
-            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
+            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc)));
             return &rDesc;
         }
 
@@ -360,7 +390,7 @@ namespace michael {
         typedef details::partial_list_locked_hook item_hook;
         typedef Lock    lock_type;
     protected:
-        typedef cds::lock::scoped_lock<lock_type>   auto_lock;
+        typedef std::unique_lock<lock_type>   auto_lock;
 
         mutable lock_type   m_access;
         //@endcond
@@ -377,7 +407,7 @@ namespace michael {
         void    push( T * pDesc )
         {
             auto_lock al( m_access );
-            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) );
+            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc)));
             base_class::push_back( *pDesc );
         }
 
@@ -385,11 +415,11 @@ namespace michael {
         T * pop()
         {
             auto_lock al( m_access );
-            if ( base_class::empty() )
+            if ( base_class::empty())
                 return nullptr;
             T& rDesc = base_class::front();
             base_class::pop_front();
-            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
+            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc)));
             return &rDesc;
         }
 
@@ -399,8 +429,8 @@ namespace michael {
             assert(pDesc != nullptr);
             auto_lock al( m_access );
             // !inited(pDesc) is equal to "pDesc is being linked to partial list"
-            if ( !base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) ) {
-                base_class::erase( base_class::iterator_to( *pDesc ) );
+            if ( !base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc))) {
+                base_class::erase( base_class::iterator_to( *pDesc ));
                 return true;
             }
             return false;
@@ -428,13 +458,13 @@ namespace michael {
         size_t      nPageDeallocCount   ;  ///< Count of page (superblock) deallocated
         size_t      nDescAllocCount     ;  ///< Count of superblock descriptors
         size_t      nDescFull           ;  ///< Count of full superblock
-        atomic64u_t nBytesAllocated     ;  ///< Count of allocated bytes (for heap managed memory blocks)
-        atomic64u_t nBytesDeallocated   ;  ///< Count of deallocated bytes (for heap managed memory blocks)
+        uint64_t    nBytesAllocated     ;  ///< Count of allocated bytes (for heap managed memory blocks)
+        uint64_t    nBytesDeallocated   ;  ///< Count of deallocated bytes (for heap managed memory blocks)
 
         size_t      nSysAllocCount      ;  ///< Count of \p alloc and \p alloc_aligned function call (for large memory blocks that allocated directly from OS)
         size_t      nSysFreeCount       ;  ///< Count of \p free and \p free_aligned function call (for large memory blocks that allocated directly from OS)
-        atomic64u_t nSysBytesAllocated  ;  ///< Count of allocated bytes (for large memory blocks that allocated directly from OS)
-        atomic64_t  nSysBytesDeallocated;  ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS)
+        uint64_t    nSysBytesAllocated  ;  ///< Count of allocated bytes (for large memory blocks that allocated directly from OS)
+        int64_t     nSysBytesDeallocated;  ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS)
 
         // Internal contention indicators
         /// CAS failure counter for updating active field of active block of \p alloc_from_active Heap internal function
@@ -702,8 +732,8 @@ namespace michael {
             typedef page_cached_allocator<>     page_heap;
             typedef aligned_malloc_heap         aligned_heap;
             typedef default_sizeclass_selector  sizeclass_selector;
-            typedef free_list_locked<cds::lock::Spin>  free_list;
-            typedef partial_list_locked<cds::lock::Spin>    partial_list;
+            typedef free_list_locked<cds::sync::spin>    free_list;
+            typedef partial_list_locked<cds::sync::spin> partial_list;
             typedef procheap_empty_stat         procheap_stat;
             typedef os_allocated_empty          os_allocated_stat;
             typedef cds::opt::none              check_bounds;
@@ -829,12 +859,12 @@ namespace michael {
 
             union {
                 superblock_desc *   pDesc       ;   // pointer to superblock descriptor
-                atomic32u_t         nSize       ;   // block size (allocated form OS)
+                uint32_t         nSize       ;   // block size (allocated form OS)
             };
-            atomic32u_t         nFlags;
+            uint32_t         nFlags;
 
         public:
-            void  set( superblock_desc * pdesc, atomic32u_t isAligned )
+            void  set( superblock_desc * pdesc, uint32_t isAligned )
             {
                 pDesc = pdesc;
                 nFlags = isAligned ? bitAligned : 0;
@@ -869,7 +899,7 @@ namespace michael {
 
             size_t getOSAllocSize() const
             {
-                assert( isOSAllocated() );
+                assert( isOSAllocated());
                 return nSize;
             }
 
@@ -888,20 +918,20 @@ namespace michael {
             // allocated from OS
             marked_desc_ptr     pDesc;
         public:
-            void  set( superblock_desc * pdesc, atomic32u_t isAligned )
+            void  set( superblock_desc * pdesc, uint32_t isAligned )
             {
                 pDesc = marked_desc_ptr( pdesc, isAligned );
             }
 
             superblock_desc * desc()
             {
-                assert( !isOSAllocated() );
-                return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr() )->desc() : pDesc.ptr();
+                assert( !isOSAllocated());
+                return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr())->desc() : pDesc.ptr();
             }
 
             block_header * begin()
             {
-                return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr() ) : this;
+                return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr()) : this;
             }
 
             bool isAligned() const
@@ -922,8 +952,8 @@ namespace michael {
 
             size_t getOSAllocSize() const
             {
-                assert( isOSAllocated() );
-                return reinterpret_cast<uptr_atomic_t>( pDesc.ptr() ) >> 2;
+                assert( isOSAllocated());
+                return reinterpret_cast<uintptr_t>( pDesc.ptr()) >> 2;
             }
 
         };
@@ -956,7 +986,7 @@ namespace michael {
         class active_tag {
         //@cond
             superblock_desc *       pDesc;
-            atomic32u_t             nCredits;
+            uint32_t             nCredits;
 
         public:
             static const unsigned int c_nMaxCredits = 0 - 1;
@@ -967,12 +997,12 @@ namespace michael {
                 , nCredits(0)
             {}
 
-            active_tag( active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
-            ~active_tag() CDS_NOEXCEPT_DEFAULTED = default;
-            active_tag& operator=(active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
-#       if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
-            active_tag( active_tag&& ) CDS_NOEXCEPT_DEFAULTED = default;
-            active_tag& operator=(active_tag&&) CDS_NOEXCEPT_DEFAULTED = default;
+            active_tag( active_tag const& ) CDS_NOEXCEPT = default;
+            ~active_tag() CDS_NOEXCEPT = default;
+            active_tag& operator=(active_tag const& ) CDS_NOEXCEPT = default;
+#       if !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
+            active_tag( active_tag&& ) CDS_NOEXCEPT = default;
+            active_tag& operator=(active_tag&&) CDS_NOEXCEPT = default;
 #       endif
 
             /// Returns pointer to superblock descriptor
@@ -1025,13 +1055,13 @@ namespace michael {
                 : pDesc( nullptr )
             {}
             // Clang 3.1: error: first argument to atomic operation must be a pointer to a trivially-copyable type
-            //active_tag() CDS_NOEXCEPT_DEFAULTED = default;
-            active_tag( active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
-            ~active_tag() CDS_NOEXCEPT_DEFAULTED = default;
-            active_tag& operator=(active_tag const&) CDS_NOEXCEPT_DEFAULTED = default;
-#       if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
-            active_tag( active_tag&& ) CDS_NOEXCEPT_DEFAULTED = default;
-            active_tag& operator=(active_tag&&) CDS_NOEXCEPT_DEFAULTED = default;
+            //active_tag() CDS_NOEXCEPT = default;
+            active_tag( active_tag const& ) CDS_NOEXCEPT = default;
+            ~active_tag() CDS_NOEXCEPT = default;
+            active_tag& operator=(active_tag const&) CDS_NOEXCEPT = default;
+#       if !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
+            active_tag( active_tag&& ) CDS_NOEXCEPT = default;
+            active_tag& operator=(active_tag&&) CDS_NOEXCEPT = default;
 #       endif
             superblock_desc *    ptr() const
             {
@@ -1040,7 +1070,7 @@ namespace michael {
 
             void ptr( superblock_desc * p )
             {
-                assert( (reinterpret_cast<uptr_atomic_t>(p) & c_nMaxCredits) == 0 );
+                assert( (reinterpret_cast<uintptr_t>(p) & c_nMaxCredits) == 0 );
                 pDesc = marked_desc_ptr( p, pDesc.bits());
             }
 
@@ -1062,7 +1092,7 @@ namespace michael {
 
             void set( superblock_desc * pSB, unsigned int n )
             {
-                assert( (reinterpret_cast<uptr_atomic_t>(pSB) & c_nMaxCredits) == 0 );
+                assert( (reinterpret_cast<uintptr_t>(pSB) & c_nMaxCredits) == 0 );
                 pDesc = marked_desc_ptr( pSB, n );
             }
 
@@ -1100,7 +1130,7 @@ namespace michael {
                 , pSizeClass( nullptr )
                 , pPartial( nullptr )
             {
-                assert( (reinterpret_cast<uptr_atomic_t>(this) & (c_nAlignment - 1)) == 0 );
+                assert( (reinterpret_cast<uintptr_t>(this) & (c_nAlignment - 1)) == 0 );
             }
             //@endcond
 
@@ -1113,10 +1143,10 @@ namespace michael {
                         pDesc =  partialList.pop();
                         break;
                     }
-                } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+                } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
-                //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
-                //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+                //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc)));
+                //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc)));
                 return pDesc;
             }
 
@@ -1124,10 +1154,10 @@ namespace michael {
             void add_partial( superblock_desc * pDesc )
             {
                 assert( pPartial != pDesc );
-                //assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+                //assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc)));
 
                 superblock_desc * pCur = nullptr;
-                if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed) )
+                if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed))
                     partialList.push( pDesc );
             }
 
@@ -1175,7 +1205,7 @@ namespace michael {
         /// Allocates large block from system memory
         block_header * alloc_from_OS( size_t nSize )
         {
-            block_header * p = reinterpret_cast<block_header *>( m_LargeHeap.alloc( nSize ) );
+            block_header * p = reinterpret_cast<block_header *>( m_LargeHeap.alloc( nSize ));
             m_OSAllocStat.incBytesAllocated( nSize );
             p->setOSAllocated( nSize );
             return p;
@@ -1191,7 +1221,7 @@ namespace michael {
             while ( true ) {
                 ++nCollision;
                 oldActive = pProcHeap->active.load(atomics::memory_order_acquire);
-                if ( !oldActive.ptr() )
+                if ( !oldActive.ptr())
                     return nullptr;
                 unsigned int nCredits = oldActive.credits();
                 active_tag  newActive   ; // default = 0
@@ -1221,7 +1251,11 @@ namespace michael {
 
                 assert( oldAnchor.avail < pDesc->nCapacity );
                 pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
+
+                // TSan reports data race if the block contained atomic ops before
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                 newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
                 newAnchor.tag += 1;
 
                 if ( oldActive.credits() == 0 ) {
@@ -1250,8 +1284,8 @@ namespace michael {
             // block_header fields is not needed to setup
             // It was set in alloc_from_new_superblock
             assert( reinterpret_cast<block_header *>( pAddr )->desc() == pDesc );
-            assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated() );
-            assert( !reinterpret_cast<block_header *>( pAddr )->isAligned() );
+            assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated());
+            assert( !reinterpret_cast<block_header *>( pAddr )->isAligned());
 
             return reinterpret_cast<block_header *>( pAddr );
         }
@@ -1284,7 +1318,7 @@ namespace michael {
                 newAnchor.count -= nMoreCredits + 1;
                 newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL;
                 newAnchor.tag += 1;
-            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
+            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed));
 
             if ( nCollision )
                 pProcHeap->stat.incPartialDescCASFailureCount( nCollision );
@@ -1305,7 +1339,7 @@ namespace michael {
                 pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
                 newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
                 ++newAnchor.tag;
-            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
+            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed));
 
             if ( nCollision )
                 pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision );
@@ -1320,8 +1354,8 @@ namespace michael {
             // block_header fields is not needed to setup
             // It was set in alloc_from_new_superblock
             assert( reinterpret_cast<block_header *>( pAddr )->desc() == pDesc );
-            assert( !reinterpret_cast<block_header *>( pAddr )->isAligned() );
-            assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated() );
+            assert( !reinterpret_cast<block_header *>( pAddr )->isAligned());
+            assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated());
 
             return reinterpret_cast<block_header *>( pAddr );
         }
@@ -1367,7 +1401,7 @@ namespace michael {
         /// Find appropriate processor heap based on size-class selected
         processor_heap * find_heap( typename sizeclass_selector::sizeclass_index nSizeClassIndex )
         {
-            assert( nSizeClassIndex < m_SizeClassSelector.size() );
+            assert( nSizeClassIndex < m_SizeClassSelector.size());
 
             unsigned int nProcessorId = m_Topology.current_processor();
             assert( nProcessorId < m_nProcessorCount );
@@ -1379,7 +1413,7 @@ namespace michael {
             while ( !pDesc ) {
 
                 processor_desc * pNewDesc = new_processor_desc( nProcessorId );
-                if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed ) ) {
+                if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed )) {
                     pDesc = pNewDesc;
                     break;
                 }
@@ -1398,7 +1432,7 @@ namespace michael {
             active_tag  newActive;
             newActive.set( pDesc, nCredits - 1 );
 
-            if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
+            if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
                 return;
 
             // Someone installed another active superblock.
@@ -1419,6 +1453,7 @@ namespace michael {
         /// Allocates new processor descriptor
         processor_desc * new_processor_desc( unsigned int nProcessorId )
         {
+            CDS_UNUSED( nProcessorId );
             processor_desc * pDesc;
             const size_t nPageHeapCount = m_SizeClassSelector.pageTypeCount();
 
@@ -1448,7 +1483,10 @@ namespace michael {
 
             static_assert( (sizeof(processor_heap) % c_nAlignment) == 0, "sizeof(processor_heap) error" );
 
-            pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment ) ) processor_desc;
+            // TSan false positive: a new descriptor will be linked further with release fence
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+
+            pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment )) processor_desc;
 
             pDesc->pageHeaps = reinterpret_cast<page_heap *>( pDesc + 1 );
             for ( size_t i = 0; i < nPageHeapCount; ++i )
@@ -1457,8 +1495,8 @@ namespace michael {
             // initialize processor heaps
             pDesc->arrProcHeap =
                 reinterpret_cast<processor_heap *>(
-                    reinterpret_cast<uptr_atomic_t>(reinterpret_cast<byte *>(pDesc + 1) + sizeof(pDesc->pageHeaps[0]) * nPageHeapCount + c_nAlignment - 1)
-                    & ~(uptr_atomic_t(c_nAlignment) - 1)
+                    reinterpret_cast<uintptr_t>(reinterpret_cast<byte *>(pDesc + 1) + sizeof(pDesc->pageHeaps[0]) * nPageHeapCount + c_nAlignment - 1)
+                    & ~(uintptr_t(c_nAlignment) - 1)
                 );
 
             processor_heap * pProcHeap = pDesc->arrProcHeap;
@@ -1472,54 +1510,34 @@ namespace michael {
                 else
                     pProcHeap->nPageIdx = pProcHeap->pSizeClass->nSBSizeIdx;
             }
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
             return pDesc;
         }
 
 
         void free_processor_heap( processor_heap * pProcHeap )
         {
-            if ( pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) {
-                superblock_desc * pDesc;
-
-                for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
-                    free( pDesc->pSB );
-                    m_AlignedHeap.free( pDesc );
-                }
+            assert( pProcHeap->nPageIdx != processor_heap::c_nPageSelfAllocation );
 
-                superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
-                if ( pPartial ) {
-                    free( pPartial->pSB );
-                    m_AlignedHeap.free( pPartial );
-                }
+            page_heap& pageHeap = pProcHeap->pProcDesc->pageHeaps[pProcHeap->nPageIdx];
+            superblock_desc * pDesc;
 
-                pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
-                if ( pDesc ) {
-                    free( pDesc->pSB );
-                    m_AlignedHeap.free( pDesc );
-                }
+            for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
+                pageHeap.free( pDesc->pSB );
+                m_AlignedHeap.free( pDesc );
             }
-            else {
-                page_heap& pageHeap = pProcHeap->pProcDesc->pageHeaps[pProcHeap->nPageIdx];
-                superblock_desc * pDesc;
 
-                for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
-                    pageHeap.free( pDesc->pSB );
-                    m_AlignedHeap.free( pDesc );
-                }
-
-                superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
-                if ( pPartial ) {
-                    pageHeap.free( pPartial->pSB );
-                    m_AlignedHeap.free( pPartial );
-                }
+            superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
+            if ( pPartial ) {
+                pageHeap.free( pPartial->pSB );
+                m_AlignedHeap.free( pPartial );
+            }
 
-                pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
-                if ( pDesc ) {
-                    pageHeap.free( pDesc->pSB );
-                    m_AlignedHeap.free( pDesc );
-                }
+            pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
+            if ( pDesc ) {
+                pageHeap.free( pDesc->pSB );
+                m_AlignedHeap.free( pDesc );
             }
-            pProcHeap->~processor_heap();
         }
 
         /// Frees processor descriptor
@@ -1527,8 +1545,17 @@ namespace michael {
         {
             const size_t nPageHeapCount = m_SizeClassSelector.pageTypeCount();
 
-            for (unsigned int j = 0; j < m_SizeClassSelector.size(); ++j )
-                free_processor_heap( pDesc->arrProcHeap + j );
+            {
+                processor_heap * const pProcHeapEnd = pDesc->arrProcHeap + m_SizeClassSelector.size();
+
+                // free large blocks only
+                for ( processor_heap * pProcHeap = pDesc->arrProcHeap; pProcHeap < pProcHeapEnd; ++pProcHeap ) {
+                    if ( pProcHeap->nPageIdx != processor_heap::c_nPageSelfAllocation )
+                        free_processor_heap( pProcHeap );
+
+                    pProcHeap->~processor_heap();
+                }
+            }
 
             for ( superblock_desc * pSBDesc = pDesc->listSBDescFree.pop(); pSBDesc; pSBDesc = pDesc->listSBDescFree.pop())
                 m_AlignedHeap.free( pSBDesc );
@@ -1536,7 +1563,6 @@ namespace michael {
             for (size_t i = 0; i < nPageHeapCount; ++i )
                 (pDesc->pageHeaps + i)->page_heap::~page_heap();
 
-            //m_IntHeap.free( pDesc->pageHeaps );
             pDesc->pageHeaps = nullptr;
 
             pDesc->processor_desc::~processor_desc();
@@ -1549,8 +1575,8 @@ namespace michael {
             anchor_tag anchor;
             superblock_desc * pDesc = pProcHeap->pProcDesc->listSBDescFree.pop();
             if ( pDesc == nullptr ) {
-                pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
-                assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 );
+                pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment )) superblock_desc;
+                assert( (uintptr_t(pDesc) & (c_nAlignment - 1)) == 0 );
 
                 anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
                 anchor.tag = 0;
@@ -1588,12 +1614,10 @@ namespace michael {
             pDesc->pProcHeap->stat.incBlockDeallocated();
             processor_desc * pProcDesc = pDesc->pProcHeap->pProcDesc;
             if ( pDesc->pSB ) {
-                if ( pDesc->pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) {
+                if ( pDesc->pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation )
                     free( pDesc->pSB );
-                }
-                else {
+                else
                     pProcDesc->pageHeaps[pDesc->pProcHeap->nPageIdx].free( pDesc->pSB );
-                }
             }
             pProcDesc->listSBDescFree.push( pDesc );
         }
@@ -1607,7 +1631,7 @@ namespace michael {
             if ( nSizeClassIndex == sizeclass_selector::c_nNoSizeClass ) {
                 return alloc_from_OS( nSize );
             }
-            assert( nSizeClassIndex < m_SizeClassSelector.size() );
+            assert( nSizeClassIndex < m_SizeClassSelector.size());
 
             block_header * pBlock;
             processor_heap * pProcHeap;
@@ -1670,7 +1694,7 @@ namespace michael {
             block_header * pBlock = int_alloc( nSize + sizeof(block_header) + bound_checker::trailer_size );
 
             // Bound checking is only for our blocks
-            if ( !pBlock->isOSAllocated() ) {
+            if ( !pBlock->isOSAllocated()) {
                 // the block is allocated from our heap - bound checker is applicable
                 m_BoundChecker.make_trailer(
                     reinterpret_cast<byte *>(pBlock + 1),
@@ -1679,6 +1703,7 @@ namespace michael {
                 );
             }
 
+            CDS_TSAN_ANNOTATE_NEW_MEMORY( pBlock + 1, nSize );
             return pBlock + 1;
         }
 
@@ -1693,14 +1718,14 @@ namespace michael {
             block_header * pRedirect = (reinterpret_cast<block_header *>( pMemory ) - 1);
             block_header * pBlock = pRedirect->begin();
 
-            if ( pBlock->isOSAllocated() ) {
+            if ( pBlock->isOSAllocated()) {
                 // Block has been allocated from OS
-                m_OSAllocStat.incBytesDeallocated( pBlock->getOSAllocSize() );
+                m_OSAllocStat.incBytesDeallocated( pBlock->getOSAllocSize());
                 m_LargeHeap.free( pBlock );
                 return;
             }
 
-            assert( !pBlock->isAligned() );
+            assert( !pBlock->isAligned());
             superblock_desc * pDesc = pBlock->desc();
 
             m_BoundChecker.check_bounds(
@@ -1735,7 +1760,7 @@ namespace michael {
                 }
                 else
                     newAnchor.count += 1;
-            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
             pProcHeap->stat.incFreeCount();
 
@@ -1777,12 +1802,12 @@ namespace michael {
             block_header * pBlock = reinterpret_cast<block_header *>( pMemory ) - 1;
 
             // Reallocation of aligned block is not possible
-            if ( pBlock->isAligned() ) {
+            if ( pBlock->isAligned()) {
                 assert( false );
                 return nullptr;
             }
 
-            if ( pBlock->isOSAllocated() ) {
+            if ( pBlock->isOSAllocated()) {
                 // The block has been allocated from OS
                 size_t nCurSize = pBlock->getOSAllocSize();
 
@@ -1792,7 +1817,7 @@ namespace michael {
                 // Grow block size
                 void * pNewBuf = alloc( nOrigSize );
                 if ( pNewBuf ) {
-                    memcpy( pNewBuf, pMemory, nCurSize - sizeof(block_header) );
+                    memcpy( pNewBuf, pMemory, nCurSize - sizeof(block_header));
                     free( pMemory );
                 }
                 return pNewBuf;
@@ -1812,7 +1837,7 @@ namespace michael {
 
             void * pNew = alloc( nNewSize );
             if ( pNew ) {
-                memcpy( pNew, pMemory, pDesc->nBlockSize - sizeof(block_header) );
+                memcpy( pNew, pMemory, pDesc->nBlockSize - sizeof(block_header));
                 free( pMemory );
                 return pNew;
             }
@@ -1828,26 +1853,26 @@ namespace michael {
         {
             if ( nAlignment <= c_nDefaultBlockAlignment ) {
                 void * p = alloc( nSize );
-                assert( (reinterpret_cast<uptr_atomic_t>(p) & (nAlignment - 1)) == 0 );
+                assert( (reinterpret_cast<uintptr_t>(p) & (nAlignment - 1)) == 0 );
                 return p;
             }
 
             block_header * pBlock = int_alloc( nSize + nAlignment + sizeof(block_header) + bound_checker::trailer_size );
 
             block_header * pRedirect;
-            if ( (reinterpret_cast<uptr_atomic_t>( pBlock + 1) & (nAlignment - 1)) != 0 ) {
-                pRedirect = reinterpret_cast<block_header *>( (reinterpret_cast<uptr_atomic_t>( pBlock ) & ~(nAlignment - 1)) + nAlignment ) - 1;
+            if ( (reinterpret_cast<uintptr_t>( pBlock + 1) & (nAlignment - 1)) != 0 ) {
+                pRedirect = reinterpret_cast<block_header *>( (reinterpret_cast<uintptr_t>( pBlock ) & ~(nAlignment - 1)) + nAlignment ) - 1;
                 assert( pRedirect != pBlock );
                 pRedirect->set( reinterpret_cast<superblock_desc *>(pBlock), 1 );
 
-                assert( (reinterpret_cast<uptr_atomic_t>(pRedirect + 1) & (nAlignment - 1)) == 0 );
+                assert( (reinterpret_cast<uintptr_t>(pRedirect + 1) & (nAlignment - 1)) == 0 );
             }
             else
                 pRedirect = pBlock;
 
 
             // Bound checking is only for our blocks
-            if ( !pBlock->isOSAllocated() ) {
+            if ( !pBlock->isOSAllocated()) {
                 // the block is allocated from our heap - bound checker is applicable
                 m_BoundChecker.make_trailer(
                     reinterpret_cast<byte *>(pRedirect + 1),
@@ -1891,4 +1916,4 @@ namespace michael {
 
 }}} // namespace cds::memory::michael
 
-#endif // __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+#endif // CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H