Removed redundant spaces
[libcds.git] / cds / memory / michael / allocator.h
index 0271139504076b99a6f9d7b2e55414780b5d40c0..4ef0f6d832fccc065eddf53df258d4dd5c66611d 100644 (file)
@@ -1,4 +1,32 @@
-//$$CDS-header$$
+/*
+    This file is a part of libcds - Concurrent Data Structures library
+
+    (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+    Source code repo: http://github.com/khizmax/libcds/
+    Download: http://sourceforge.net/projects/libcds/files/
+
+    Redistribution and use in source and binary forms, with or without
+    modification, are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice, this
+      list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
 
 #ifndef CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
 #define CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
@@ -148,7 +176,7 @@ namespace michael {
 #endif
         struct free_list_traits : public cds::container::vyukov_queue::traits
         {
-            typedef opt::v::static_buffer<void *, FreeListCapacity> buffer;
+            typedef opt::v::initialized_static_buffer<void *, FreeListCapacity> buffer;
 #ifdef _DEBUG
             typedef make_null_ptr value_cleaner;
 #endif
@@ -171,7 +199,7 @@ namespace michael {
         ~page_cached_allocator()
         {
             void * pPage;
-            while ( m_FreeList.pop(pPage) )
+            while ( m_FreeList.pop(pPage))
                 base_class::free( pPage );
         }
         //@endcond
@@ -180,7 +208,7 @@ namespace michael {
         void * alloc()
         {
             void * pPage;
-            if ( !m_FreeList.pop( pPage ) )
+            if ( !m_FreeList.pop( pPage ))
                 pPage = base_class::alloc();
             return pPage;
         }
@@ -277,7 +305,7 @@ namespace michael {
         /// Gets details::size_class struct for size-class index \p nIndex
         static const size_class * at( sizeclass_index nIndex )
         {
-            assert( nIndex < size() );
+            assert( nIndex < size());
             return m_szClass + nIndex;
         }
     };
@@ -324,7 +352,7 @@ namespace michael {
         /// Push superblock descriptor to free-list
         void push( T * pDesc )
         {
-            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) );
+            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc)));
             auto_lock al(m_access);
             base_class::push_back( *pDesc );
         }
@@ -333,11 +361,11 @@ namespace michael {
         T *   pop()
         {
             auto_lock al(m_access);
-            if ( base_class::empty() )
+            if ( base_class::empty())
                 return nullptr;
             T& rDesc = base_class::front();
             base_class::pop_front();
-            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
+            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc)));
             return &rDesc;
         }
 
@@ -379,7 +407,7 @@ namespace michael {
         void    push( T * pDesc )
         {
             auto_lock al( m_access );
-            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) );
+            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc)));
             base_class::push_back( *pDesc );
         }
 
@@ -387,11 +415,11 @@ namespace michael {
         T * pop()
         {
             auto_lock al( m_access );
-            if ( base_class::empty() )
+            if ( base_class::empty())
                 return nullptr;
             T& rDesc = base_class::front();
             base_class::pop_front();
-            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
+            assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc)));
             return &rDesc;
         }
 
@@ -401,8 +429,8 @@ namespace michael {
             assert(pDesc != nullptr);
             auto_lock al( m_access );
             // !inited(pDesc) is equal to "pDesc is being linked to partial list"
-            if ( !base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) ) {
-                base_class::erase( base_class::iterator_to( *pDesc ) );
+            if ( !base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc))) {
+                base_class::erase( base_class::iterator_to( *pDesc ));
                 return true;
             }
             return false;
@@ -430,13 +458,13 @@ namespace michael {
         size_t      nPageDeallocCount   ;  ///< Count of page (superblock) deallocated
         size_t      nDescAllocCount     ;  ///< Count of superblock descriptors
         size_t      nDescFull           ;  ///< Count of full superblock
-        atomic64u_t nBytesAllocated     ;  ///< Count of allocated bytes (for heap managed memory blocks)
-        atomic64u_t nBytesDeallocated   ;  ///< Count of deallocated bytes (for heap managed memory blocks)
+        uint64_t    nBytesAllocated     ;  ///< Count of allocated bytes (for heap managed memory blocks)
+        uint64_t    nBytesDeallocated   ;  ///< Count of deallocated bytes (for heap managed memory blocks)
 
         size_t      nSysAllocCount      ;  ///< Count of \p alloc and \p alloc_aligned function call (for large memory blocks that allocated directly from OS)
         size_t      nSysFreeCount       ;  ///< Count of \p free and \p free_aligned function call (for large memory blocks that allocated directly from OS)
-        atomic64u_t nSysBytesAllocated  ;  ///< Count of allocated bytes (for large memory blocks that allocated directly from OS)
-        atomic64_t  nSysBytesDeallocated;  ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS)
+        uint64_t    nSysBytesAllocated  ;  ///< Count of allocated bytes (for large memory blocks that allocated directly from OS)
+        int64_t     nSysBytesDeallocated;  ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS)
 
         // Internal contention indicators
         /// CAS failure counter for updating active field of active block of \p alloc_from_active Heap internal function
@@ -871,7 +899,7 @@ namespace michael {
 
             size_t getOSAllocSize() const
             {
-                assert( isOSAllocated() );
+                assert( isOSAllocated());
                 return nSize;
             }
 
@@ -897,13 +925,13 @@ namespace michael {
 
             superblock_desc * desc()
             {
-                assert( !isOSAllocated() );
-                return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr() )->desc() : pDesc.ptr();
+                assert( !isOSAllocated());
+                return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr())->desc() : pDesc.ptr();
             }
 
             block_header * begin()
             {
-                return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr() ) : this;
+                return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr()) : this;
             }
 
             bool isAligned() const
@@ -924,8 +952,8 @@ namespace michael {
 
             size_t getOSAllocSize() const
             {
-                assert( isOSAllocated() );
-                return reinterpret_cast<uintptr_t>( pDesc.ptr() ) >> 2;
+                assert( isOSAllocated());
+                return reinterpret_cast<uintptr_t>( pDesc.ptr()) >> 2;
             }
 
         };
@@ -1115,10 +1143,10 @@ namespace michael {
                         pDesc =  partialList.pop();
                         break;
                     }
-                } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+                } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
-                //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
-                //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+                //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc)));
+                //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc)));
                 return pDesc;
             }
 
@@ -1126,10 +1154,10 @@ namespace michael {
             void add_partial( superblock_desc * pDesc )
             {
                 assert( pPartial != pDesc );
-                //assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+                //assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc)));
 
                 superblock_desc * pCur = nullptr;
-                if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed) )
+                if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed))
                     partialList.push( pDesc );
             }
 
@@ -1177,7 +1205,7 @@ namespace michael {
         /// Allocates large block from system memory
         block_header * alloc_from_OS( size_t nSize )
         {
-            block_header * p = reinterpret_cast<block_header *>( m_LargeHeap.alloc( nSize ) );
+            block_header * p = reinterpret_cast<block_header *>( m_LargeHeap.alloc( nSize ));
             m_OSAllocStat.incBytesAllocated( nSize );
             p->setOSAllocated( nSize );
             return p;
@@ -1193,7 +1221,7 @@ namespace michael {
             while ( true ) {
                 ++nCollision;
                 oldActive = pProcHeap->active.load(atomics::memory_order_acquire);
-                if ( !oldActive.ptr() )
+                if ( !oldActive.ptr())
                     return nullptr;
                 unsigned int nCredits = oldActive.credits();
                 active_tag  newActive   ; // default = 0
@@ -1256,8 +1284,8 @@ namespace michael {
             // block_header fields is not needed to setup
             // It was set in alloc_from_new_superblock
             assert( reinterpret_cast<block_header *>( pAddr )->desc() == pDesc );
-            assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated() );
-            assert( !reinterpret_cast<block_header *>( pAddr )->isAligned() );
+            assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated());
+            assert( !reinterpret_cast<block_header *>( pAddr )->isAligned());
 
             return reinterpret_cast<block_header *>( pAddr );
         }
@@ -1290,7 +1318,7 @@ namespace michael {
                 newAnchor.count -= nMoreCredits + 1;
                 newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL;
                 newAnchor.tag += 1;
-            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
+            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed));
 
             if ( nCollision )
                 pProcHeap->stat.incPartialDescCASFailureCount( nCollision );
@@ -1311,7 +1339,7 @@ namespace michael {
                 pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
                 newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
                 ++newAnchor.tag;
-            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
+            } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed));
 
             if ( nCollision )
                 pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision );
@@ -1326,8 +1354,8 @@ namespace michael {
             // block_header fields is not needed to setup
             // It was set in alloc_from_new_superblock
             assert( reinterpret_cast<block_header *>( pAddr )->desc() == pDesc );
-            assert( !reinterpret_cast<block_header *>( pAddr )->isAligned() );
-            assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated() );
+            assert( !reinterpret_cast<block_header *>( pAddr )->isAligned());
+            assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated());
 
             return reinterpret_cast<block_header *>( pAddr );
         }
@@ -1373,7 +1401,7 @@ namespace michael {
         /// Find appropriate processor heap based on size-class selected
         processor_heap * find_heap( typename sizeclass_selector::sizeclass_index nSizeClassIndex )
         {
-            assert( nSizeClassIndex < m_SizeClassSelector.size() );
+            assert( nSizeClassIndex < m_SizeClassSelector.size());
 
             unsigned int nProcessorId = m_Topology.current_processor();
             assert( nProcessorId < m_nProcessorCount );
@@ -1385,7 +1413,7 @@ namespace michael {
             while ( !pDesc ) {
 
                 processor_desc * pNewDesc = new_processor_desc( nProcessorId );
-                if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed ) ) {
+                if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed )) {
                     pDesc = pNewDesc;
                     break;
                 }
@@ -1404,7 +1432,7 @@ namespace michael {
             active_tag  newActive;
             newActive.set( pDesc, nCredits - 1 );
 
-            if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
+            if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
                 return;
 
             // Someone installed another active superblock.
@@ -1458,7 +1486,7 @@ namespace michael {
             // TSan false positive: a new descriptor will be linked further with release fence
             CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
 
-            pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment ) ) processor_desc;
+            pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment )) processor_desc;
 
             pDesc->pageHeaps = reinterpret_cast<page_heap *>( pDesc + 1 );
             for ( size_t i = 0; i < nPageHeapCount; ++i )
@@ -1489,48 +1517,27 @@ namespace michael {
 
         void free_processor_heap( processor_heap * pProcHeap )
         {
-            if ( pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) {
-                superblock_desc * pDesc;
+            assert( pProcHeap->nPageIdx != processor_heap::c_nPageSelfAllocation );
 
-                for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
-                    free( pDesc->pSB );
-                    m_AlignedHeap.free( pDesc );
-                }
+            page_heap& pageHeap = pProcHeap->pProcDesc->pageHeaps[pProcHeap->nPageIdx];
+            superblock_desc * pDesc;
 
-                superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
-                if ( pPartial ) {
-                    free( pPartial->pSB );
-                    m_AlignedHeap.free( pPartial );
-                }
-
-                pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
-                if ( pDesc ) {
-                    free( pDesc->pSB );
-                    m_AlignedHeap.free( pDesc );
-                }
+            for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
+                pageHeap.free( pDesc->pSB );
+                m_AlignedHeap.free( pDesc );
             }
-            else {
-                page_heap& pageHeap = pProcHeap->pProcDesc->pageHeaps[pProcHeap->nPageIdx];
-                superblock_desc * pDesc;
-
-                for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
-                    pageHeap.free( pDesc->pSB );
-                    m_AlignedHeap.free( pDesc );
-                }
 
-                superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
-                if ( pPartial ) {
-                    pageHeap.free( pPartial->pSB );
-                    m_AlignedHeap.free( pPartial );
-                }
+            superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
+            if ( pPartial ) {
+                pageHeap.free( pPartial->pSB );
+                m_AlignedHeap.free( pPartial );
+            }
 
-                pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
-                if ( pDesc ) {
-                    pageHeap.free( pDesc->pSB );
-                    m_AlignedHeap.free( pDesc );
-                }
+            pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
+            if ( pDesc ) {
+                pageHeap.free( pDesc->pSB );
+                m_AlignedHeap.free( pDesc );
             }
-            pProcHeap->~processor_heap();
         }
 
         /// Frees processor descriptor
@@ -1538,8 +1545,17 @@ namespace michael {
         {
             const size_t nPageHeapCount = m_SizeClassSelector.pageTypeCount();
 
-            for (unsigned int j = 0; j < m_SizeClassSelector.size(); ++j )
-                free_processor_heap( pDesc->arrProcHeap + j );
+            {
+                processor_heap * const pProcHeapEnd = pDesc->arrProcHeap + m_SizeClassSelector.size();
+
+                // free large blocks only
+                for ( processor_heap * pProcHeap = pDesc->arrProcHeap; pProcHeap < pProcHeapEnd; ++pProcHeap ) {
+                    if ( pProcHeap->nPageIdx != processor_heap::c_nPageSelfAllocation )
+                        free_processor_heap( pProcHeap );
+
+                    pProcHeap->~processor_heap();
+                }
+            }
 
             for ( superblock_desc * pSBDesc = pDesc->listSBDescFree.pop(); pSBDesc; pSBDesc = pDesc->listSBDescFree.pop())
                 m_AlignedHeap.free( pSBDesc );
@@ -1547,7 +1563,6 @@ namespace michael {
             for (size_t i = 0; i < nPageHeapCount; ++i )
                 (pDesc->pageHeaps + i)->page_heap::~page_heap();
 
-            //m_IntHeap.free( pDesc->pageHeaps );
             pDesc->pageHeaps = nullptr;
 
             pDesc->processor_desc::~processor_desc();
@@ -1560,7 +1575,7 @@ namespace michael {
             anchor_tag anchor;
             superblock_desc * pDesc = pProcHeap->pProcDesc->listSBDescFree.pop();
             if ( pDesc == nullptr ) {
-                pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
+                pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment )) superblock_desc;
                 assert( (uintptr_t(pDesc) & (c_nAlignment - 1)) == 0 );
 
                 anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
@@ -1599,12 +1614,10 @@ namespace michael {
             pDesc->pProcHeap->stat.incBlockDeallocated();
             processor_desc * pProcDesc = pDesc->pProcHeap->pProcDesc;
             if ( pDesc->pSB ) {
-                if ( pDesc->pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) {
+                if ( pDesc->pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation )
                     free( pDesc->pSB );
-                }
-                else {
+                else
                     pProcDesc->pageHeaps[pDesc->pProcHeap->nPageIdx].free( pDesc->pSB );
-                }
             }
             pProcDesc->listSBDescFree.push( pDesc );
         }
@@ -1618,7 +1631,7 @@ namespace michael {
             if ( nSizeClassIndex == sizeclass_selector::c_nNoSizeClass ) {
                 return alloc_from_OS( nSize );
             }
-            assert( nSizeClassIndex < m_SizeClassSelector.size() );
+            assert( nSizeClassIndex < m_SizeClassSelector.size());
 
             block_header * pBlock;
             processor_heap * pProcHeap;
@@ -1681,7 +1694,7 @@ namespace michael {
             block_header * pBlock = int_alloc( nSize + sizeof(block_header) + bound_checker::trailer_size );
 
             // Bound checking is only for our blocks
-            if ( !pBlock->isOSAllocated() ) {
+            if ( !pBlock->isOSAllocated()) {
                 // the block is allocated from our heap - bound checker is applicable
                 m_BoundChecker.make_trailer(
                     reinterpret_cast<byte *>(pBlock + 1),
@@ -1705,14 +1718,14 @@ namespace michael {
             block_header * pRedirect = (reinterpret_cast<block_header *>( pMemory ) - 1);
             block_header * pBlock = pRedirect->begin();
 
-            if ( pBlock->isOSAllocated() ) {
+            if ( pBlock->isOSAllocated()) {
                 // Block has been allocated from OS
-                m_OSAllocStat.incBytesDeallocated( pBlock->getOSAllocSize() );
+                m_OSAllocStat.incBytesDeallocated( pBlock->getOSAllocSize());
                 m_LargeHeap.free( pBlock );
                 return;
             }
 
-            assert( !pBlock->isAligned() );
+            assert( !pBlock->isAligned());
             superblock_desc * pDesc = pBlock->desc();
 
             m_BoundChecker.check_bounds(
@@ -1747,7 +1760,7 @@ namespace michael {
                 }
                 else
                     newAnchor.count += 1;
-            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+            } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
 
             pProcHeap->stat.incFreeCount();
 
@@ -1789,12 +1802,12 @@ namespace michael {
             block_header * pBlock = reinterpret_cast<block_header *>( pMemory ) - 1;
 
             // Reallocation of aligned block is not possible
-            if ( pBlock->isAligned() ) {
+            if ( pBlock->isAligned()) {
                 assert( false );
                 return nullptr;
             }
 
-            if ( pBlock->isOSAllocated() ) {
+            if ( pBlock->isOSAllocated()) {
                 // The block has been allocated from OS
                 size_t nCurSize = pBlock->getOSAllocSize();
 
@@ -1804,7 +1817,7 @@ namespace michael {
                 // Grow block size
                 void * pNewBuf = alloc( nOrigSize );
                 if ( pNewBuf ) {
-                    memcpy( pNewBuf, pMemory, nCurSize - sizeof(block_header) );
+                    memcpy( pNewBuf, pMemory, nCurSize - sizeof(block_header));
                     free( pMemory );
                 }
                 return pNewBuf;
@@ -1824,7 +1837,7 @@ namespace michael {
 
             void * pNew = alloc( nNewSize );
             if ( pNew ) {
-                memcpy( pNew, pMemory, pDesc->nBlockSize - sizeof(block_header) );
+                memcpy( pNew, pMemory, pDesc->nBlockSize - sizeof(block_header));
                 free( pMemory );
                 return pNew;
             }
@@ -1859,7 +1872,7 @@ namespace michael {
 
 
             // Bound checking is only for our blocks
-            if ( !pBlock->isOSAllocated() ) {
+            if ( !pBlock->isOSAllocated()) {
                 // the block is allocated from our heap - bound checker is applicable
                 m_BoundChecker.make_trailer(
                     reinterpret_cast<byte *>(pRedirect + 1),