-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
-#ifndef __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
-#define __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+#define CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
/*
Michael allocator implementation
/// Allocates memory block of \p nSize bytes (\p malloc wrapper)
static void * alloc( size_t nSize )
{
- return ::malloc( nSize );
+ void * p = ::malloc( nSize );
+ return p;
}
/// Returning memory block to the system (\p free wrapper)
static void free( void * p )
#endif
struct free_list_traits : public cds::container::vyukov_queue::traits
{
- typedef opt::v::static_buffer<void *, FreeListCapacity> buffer;
+ typedef opt::v::initialized_static_buffer<void *, FreeListCapacity> buffer;
#ifdef _DEBUG
typedef make_null_ptr value_cleaner;
#endif
~page_cached_allocator()
{
void * pPage;
- while ( m_FreeList.pop(pPage) )
+ while ( m_FreeList.pop(pPage))
base_class::free( pPage );
}
//@endcond
void * alloc()
{
void * pPage;
- if ( !m_FreeList.pop( pPage ) )
+ if ( !m_FreeList.pop( pPage ))
pPage = base_class::alloc();
return pPage;
}
/// Gets details::size_class struct for size-class index \p nIndex
static const size_class * at( sizeclass_index nIndex )
{
- assert( nIndex < size() );
+ assert( nIndex < size());
return m_szClass + nIndex;
}
};
/// Push superblock descriptor to free-list
void push( T * pDesc )
{
- assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) );
+ assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc)));
auto_lock al(m_access);
base_class::push_back( *pDesc );
}
T * pop()
{
auto_lock al(m_access);
- if ( base_class::empty() )
+ if ( base_class::empty())
return nullptr;
T& rDesc = base_class::front();
base_class::pop_front();
- assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
+ assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc)));
return &rDesc;
}
void push( T * pDesc )
{
auto_lock al( m_access );
- assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) );
+ assert( base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc)));
base_class::push_back( *pDesc );
}
T * pop()
{
auto_lock al( m_access );
- if ( base_class::empty() )
+ if ( base_class::empty())
return nullptr;
T& rDesc = base_class::front();
base_class::pop_front();
- assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc) ) );
+ assert( base_class::node_algorithms::inited( static_cast<item_hook *>(&rDesc)));
return &rDesc;
}
assert(pDesc != nullptr);
auto_lock al( m_access );
// !inited(pDesc) is equal to "pDesc is being linked to partial list"
- if ( !base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc) ) ) {
- base_class::erase( base_class::iterator_to( *pDesc ) );
+ if ( !base_class::node_algorithms::inited( static_cast<item_hook *>(pDesc))) {
+ base_class::erase( base_class::iterator_to( *pDesc ));
return true;
}
return false;
size_t nPageDeallocCount ; ///< Count of page (superblock) deallocated
size_t nDescAllocCount ; ///< Count of superblock descriptors
size_t nDescFull ; ///< Count of full superblock
- atomic64u_t nBytesAllocated ; ///< Count of allocated bytes (for heap managed memory blocks)
- atomic64u_t nBytesDeallocated ; ///< Count of deallocated bytes (for heap managed memory blocks)
+ uint64_t nBytesAllocated ; ///< Count of allocated bytes (for heap managed memory blocks)
+ uint64_t nBytesDeallocated ; ///< Count of deallocated bytes (for heap managed memory blocks)
size_t nSysAllocCount ; ///< Count of \p alloc and \p alloc_aligned function call (for large memory blocks that allocated directly from OS)
size_t nSysFreeCount ; ///< Count of \p free and \p free_aligned function call (for large memory blocks that allocated directly from OS)
- atomic64u_t nSysBytesAllocated ; ///< Count of allocated bytes (for large memory blocks that allocated directly from OS)
- atomic64_t nSysBytesDeallocated; ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS)
+ uint64_t nSysBytesAllocated ; ///< Count of allocated bytes (for large memory blocks that allocated directly from OS)
+ int64_t nSysBytesDeallocated; ///< Count of deallocated bytes (for large memory blocks that allocated directly from OS)
// Internal contention indicators
/// CAS failure counter for updating active field of active block of \p alloc_from_active Heap internal function
size_t getOSAllocSize() const
{
- assert( isOSAllocated() );
+ assert( isOSAllocated());
return nSize;
}
superblock_desc * desc()
{
- assert( !isOSAllocated() );
- return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr() )->desc() : pDesc.ptr();
+ assert( !isOSAllocated());
+ return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr())->desc() : pDesc.ptr();
}
block_header * begin()
{
- return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr() ) : this;
+ return (pDesc.bits() & bitAligned) ? reinterpret_cast<block_header *>( pDesc.ptr()) : this;
}
bool isAligned() const
size_t getOSAllocSize() const
{
- assert( isOSAllocated() );
- return reinterpret_cast<uintptr_t>( pDesc.ptr() ) >> 2;
+ assert( isOSAllocated());
+ return reinterpret_cast<uintptr_t>( pDesc.ptr()) >> 2;
}
};
pDesc = partialList.pop();
break;
}
- } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+ } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ));
- //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
- //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+ //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc)));
+ //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc)));
return pDesc;
}
void add_partial( superblock_desc * pDesc )
{
assert( pPartial != pDesc );
- //assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+ //assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc)));
superblock_desc * pCur = nullptr;
- if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed) )
+ if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed))
partialList.push( pDesc );
}
/// Allocates large block from system memory
block_header * alloc_from_OS( size_t nSize )
{
- block_header * p = reinterpret_cast<block_header *>( m_LargeHeap.alloc( nSize ) );
+ block_header * p = reinterpret_cast<block_header *>( m_LargeHeap.alloc( nSize ));
m_OSAllocStat.incBytesAllocated( nSize );
p->setOSAllocated( nSize );
return p;
while ( true ) {
++nCollision;
oldActive = pProcHeap->active.load(atomics::memory_order_acquire);
- if ( !oldActive.ptr() )
+ if ( !oldActive.ptr())
return nullptr;
unsigned int nCredits = oldActive.credits();
active_tag newActive ; // default = 0
assert( oldAnchor.avail < pDesc->nCapacity );
pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
+
+ // TSan reports data race if the block contained atomic ops before
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
newAnchor.tag += 1;
if ( oldActive.credits() == 0 ) {
// block_header fields is not needed to setup
// It was set in alloc_from_new_superblock
assert( reinterpret_cast<block_header *>( pAddr )->desc() == pDesc );
- assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated() );
- assert( !reinterpret_cast<block_header *>( pAddr )->isAligned() );
+ assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated());
+ assert( !reinterpret_cast<block_header *>( pAddr )->isAligned());
return reinterpret_cast<block_header *>( pAddr );
}
newAnchor.count -= nMoreCredits + 1;
newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL;
newAnchor.tag += 1;
- } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
+ } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed));
if ( nCollision )
pProcHeap->stat.incPartialDescCASFailureCount( nCollision );
pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
++newAnchor.tag;
- } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
+ } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed));
if ( nCollision )
pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision );
// block_header fields is not needed to setup
// It was set in alloc_from_new_superblock
assert( reinterpret_cast<block_header *>( pAddr )->desc() == pDesc );
- assert( !reinterpret_cast<block_header *>( pAddr )->isAligned() );
- assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated() );
+ assert( !reinterpret_cast<block_header *>( pAddr )->isAligned());
+ assert( !reinterpret_cast<block_header *>( pAddr )->isOSAllocated());
return reinterpret_cast<block_header *>( pAddr );
}
/// Find appropriate processor heap based on size-class selected
processor_heap * find_heap( typename sizeclass_selector::sizeclass_index nSizeClassIndex )
{
- assert( nSizeClassIndex < m_SizeClassSelector.size() );
+ assert( nSizeClassIndex < m_SizeClassSelector.size());
unsigned int nProcessorId = m_Topology.current_processor();
assert( nProcessorId < m_nProcessorCount );
while ( !pDesc ) {
processor_desc * pNewDesc = new_processor_desc( nProcessorId );
- if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed ) ) {
+ if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed )) {
pDesc = pNewDesc;
break;
}
active_tag newActive;
newActive.set( pDesc, nCredits - 1 );
- if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
+ if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
return;
// Someone installed another active superblock.
static_assert( (sizeof(processor_heap) % c_nAlignment) == 0, "sizeof(processor_heap) error" );
- pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment ) ) processor_desc;
+ // TSan false positive: a new descriptor will be linked further with release fence
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+
+ pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment )) processor_desc;
pDesc->pageHeaps = reinterpret_cast<page_heap *>( pDesc + 1 );
for ( size_t i = 0; i < nPageHeapCount; ++i )
else
pProcHeap->nPageIdx = pProcHeap->pSizeClass->nSBSizeIdx;
}
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return pDesc;
}
void free_processor_heap( processor_heap * pProcHeap )
{
- if ( pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) {
- superblock_desc * pDesc;
-
- for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
- free( pDesc->pSB );
- m_AlignedHeap.free( pDesc );
- }
+ assert( pProcHeap->nPageIdx != processor_heap::c_nPageSelfAllocation );
- superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
- if ( pPartial ) {
- free( pPartial->pSB );
- m_AlignedHeap.free( pPartial );
- }
+ page_heap& pageHeap = pProcHeap->pProcDesc->pageHeaps[pProcHeap->nPageIdx];
+ superblock_desc * pDesc;
- pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
- if ( pDesc ) {
- free( pDesc->pSB );
- m_AlignedHeap.free( pDesc );
- }
+ for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
+ pageHeap.free( pDesc->pSB );
+ m_AlignedHeap.free( pDesc );
}
- else {
- page_heap& pageHeap = pProcHeap->pProcDesc->pageHeaps[pProcHeap->nPageIdx];
- superblock_desc * pDesc;
-
- for ( pDesc = pProcHeap->partialList.pop(); pDesc; pDesc = pProcHeap->partialList.pop()) {
- pageHeap.free( pDesc->pSB );
- m_AlignedHeap.free( pDesc );
- }
- superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
- if ( pPartial ) {
- pageHeap.free( pPartial->pSB );
- m_AlignedHeap.free( pPartial );
- }
+ superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
+ if ( pPartial ) {
+ pageHeap.free( pPartial->pSB );
+ m_AlignedHeap.free( pPartial );
+ }
- pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
- if ( pDesc ) {
- pageHeap.free( pDesc->pSB );
- m_AlignedHeap.free( pDesc );
- }
+ pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
+ if ( pDesc ) {
+ pageHeap.free( pDesc->pSB );
+ m_AlignedHeap.free( pDesc );
}
- pProcHeap->~processor_heap();
}
/// Frees processor descriptor
{
const size_t nPageHeapCount = m_SizeClassSelector.pageTypeCount();
- for (unsigned int j = 0; j < m_SizeClassSelector.size(); ++j )
- free_processor_heap( pDesc->arrProcHeap + j );
+ {
+ processor_heap * const pProcHeapEnd = pDesc->arrProcHeap + m_SizeClassSelector.size();
+
+ // free large blocks only
+ for ( processor_heap * pProcHeap = pDesc->arrProcHeap; pProcHeap < pProcHeapEnd; ++pProcHeap ) {
+ if ( pProcHeap->nPageIdx != processor_heap::c_nPageSelfAllocation )
+ free_processor_heap( pProcHeap );
+
+ pProcHeap->~processor_heap();
+ }
+ }
for ( superblock_desc * pSBDesc = pDesc->listSBDescFree.pop(); pSBDesc; pSBDesc = pDesc->listSBDescFree.pop())
m_AlignedHeap.free( pSBDesc );
for (size_t i = 0; i < nPageHeapCount; ++i )
(pDesc->pageHeaps + i)->page_heap::~page_heap();
- //m_IntHeap.free( pDesc->pageHeaps );
pDesc->pageHeaps = nullptr;
pDesc->processor_desc::~processor_desc();
anchor_tag anchor;
superblock_desc * pDesc = pProcHeap->pProcDesc->listSBDescFree.pop();
if ( pDesc == nullptr ) {
- pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
+ pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment )) superblock_desc;
assert( (uintptr_t(pDesc) & (c_nAlignment - 1)) == 0 );
anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
pDesc->pProcHeap->stat.incBlockDeallocated();
processor_desc * pProcDesc = pDesc->pProcHeap->pProcDesc;
if ( pDesc->pSB ) {
- if ( pDesc->pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation ) {
+ if ( pDesc->pProcHeap->nPageIdx == processor_heap::c_nPageSelfAllocation )
free( pDesc->pSB );
- }
- else {
+ else
pProcDesc->pageHeaps[pDesc->pProcHeap->nPageIdx].free( pDesc->pSB );
- }
}
pProcDesc->listSBDescFree.push( pDesc );
}
if ( nSizeClassIndex == sizeclass_selector::c_nNoSizeClass ) {
return alloc_from_OS( nSize );
}
- assert( nSizeClassIndex < m_SizeClassSelector.size() );
+ assert( nSizeClassIndex < m_SizeClassSelector.size());
block_header * pBlock;
processor_heap * pProcHeap;
block_header * pBlock = int_alloc( nSize + sizeof(block_header) + bound_checker::trailer_size );
// Bound checking is only for our blocks
- if ( !pBlock->isOSAllocated() ) {
+ if ( !pBlock->isOSAllocated()) {
// the block is allocated from our heap - bound checker is applicable
m_BoundChecker.make_trailer(
reinterpret_cast<byte *>(pBlock + 1),
);
}
+ CDS_TSAN_ANNOTATE_NEW_MEMORY( pBlock + 1, nSize );
return pBlock + 1;
}
block_header * pRedirect = (reinterpret_cast<block_header *>( pMemory ) - 1);
block_header * pBlock = pRedirect->begin();
- if ( pBlock->isOSAllocated() ) {
+ if ( pBlock->isOSAllocated()) {
// Block has been allocated from OS
- m_OSAllocStat.incBytesDeallocated( pBlock->getOSAllocSize() );
+ m_OSAllocStat.incBytesDeallocated( pBlock->getOSAllocSize());
m_LargeHeap.free( pBlock );
return;
}
- assert( !pBlock->isAligned() );
+ assert( !pBlock->isAligned());
superblock_desc * pDesc = pBlock->desc();
m_BoundChecker.check_bounds(
}
else
newAnchor.count += 1;
- } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+ } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
pProcHeap->stat.incFreeCount();
block_header * pBlock = reinterpret_cast<block_header *>( pMemory ) - 1;
// Reallocation of aligned block is not possible
- if ( pBlock->isAligned() ) {
+ if ( pBlock->isAligned()) {
assert( false );
return nullptr;
}
- if ( pBlock->isOSAllocated() ) {
+ if ( pBlock->isOSAllocated()) {
// The block has been allocated from OS
size_t nCurSize = pBlock->getOSAllocSize();
// Grow block size
void * pNewBuf = alloc( nOrigSize );
if ( pNewBuf ) {
- memcpy( pNewBuf, pMemory, nCurSize - sizeof(block_header) );
+ memcpy( pNewBuf, pMemory, nCurSize - sizeof(block_header));
free( pMemory );
}
return pNewBuf;
void * pNew = alloc( nNewSize );
if ( pNew ) {
- memcpy( pNew, pMemory, pDesc->nBlockSize - sizeof(block_header) );
+ memcpy( pNew, pMemory, pDesc->nBlockSize - sizeof(block_header));
free( pMemory );
return pNew;
}
// Bound checking is only for our blocks
- if ( !pBlock->isOSAllocated() ) {
+ if ( !pBlock->isOSAllocated()) {
// the block is allocated from our heap - bound checker is applicable
m_BoundChecker.make_trailer(
reinterpret_cast<byte *>(pRedirect + 1),
}}} // namespace cds::memory::michael
-#endif // __CDS_MEMORY_MICHAEL_ALLOCATOR_TMPL_H
+#endif // CDSLIB_MEMORY_MICHAEL_ALLOCATOR_TMPL_H