2011.01.02 khizmax Created
*/
+#include <stdlib.h>
+#include <mutex> // unique_lock
#include <cds/init.h>
#include <cds/memory/michael/options.h>
#include <cds/memory/michael/bound_check.h>
#include <cds/user_setup/cache_line.h>
#include <cds/details/lib.h>
-#include <stdlib.h>
#include <boost/intrusive/list.hpp>
namespace cds {
}
};
#endif
-
- typedef container::VyukovMPMCCycleQueue<
- void *,
- opt::buffer< opt::v::static_buffer<void *, FreeListCapacity> >
+ struct free_list_traits : public cds::container::vyukov_queue::traits
+ {
+ typedef opt::v::static_buffer<void *, FreeListCapacity> buffer;
#ifdef _DEBUG
- , opt::value_cleaner< make_null_ptr >
+ typedef make_null_ptr value_cleaner;
#endif
- > free_list;
+ };
+ typedef container::VyukovMPMCCycleQueue< void *, free_list_traits > free_list;
free_list m_FreeList;
//@endcond
typedef details::free_list_locked_hook item_hook;
typedef Lock lock_type;
protected:
- typedef cds::lock::scoped_lock<lock_type> auto_lock;
+ typedef std::unique_lock<lock_type> auto_lock;
mutable lock_type m_access;
//@endcond
typedef details::partial_list_locked_hook item_hook;
typedef Lock lock_type;
protected:
- typedef cds::lock::scoped_lock<lock_type> auto_lock;
+ typedef std::unique_lock<lock_type> auto_lock;
mutable lock_type m_access;
//@endcond
\endcode
*/
-#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT
template <typename... Options>
-#else
- template <
- typename O1 = opt::none,
- typename O2 = opt::none,
- typename O3 = opt::none,
- typename O4 = opt::none,
- typename O5 = opt::none,
- typename O6 = opt::none,
- typename O7 = opt::none,
- typename O8 = opt::none,
- typename O9 = opt::none,
- typename O10= opt::none
- >
-#endif
class Heap {
protected:
protected:
//@cond
-#ifdef CDS_CXX11_VARIADIC_TEMPLATE_SUPPORT
typedef typename opt::make_options<default_options, Options...>::type options;
-#else
- typedef typename opt::make_options<default_options, O1, O2, O3, O4, O5, O6, O7, O8, O9, O10 >::type options;
-#endif
//@endcond
//@cond
: public options::free_list::item_hook
, public options::partial_list::item_hook
{
- CDS_ATOMIC::atomic<anchor_tag> anchor ; ///< anchor, see \ref anchor_tag
+ atomics::atomic<anchor_tag> anchor ; ///< anchor, see \ref anchor_tag
byte * pSB ; ///< ptr to superblock
processor_heap_base * pProcHeap ; ///< pointer to owner processor heap
unsigned int nBlockSize ; ///< block size in bytes
/// Processor heap's \p active field
/**
The \p active field in the processor heap structure is primarily a pointer to the descriptor
- of the active superblock owned by the processor heap. If the value of \p active is not \p NULL, it is
+ of the active superblock owned by the processor heap. If the value of \p active is not \p nullptr, it is
guaranteed that the active superblock has at least one block available for reservation.
Since the addresses of superblock descriptors can be guaranteed to be aligned to some power
of 2 (e.g., 64), as an optimization, we can carve a credits subfield to hold the number
of credits is n, then the active superblock contains n+1 blocks available for reservation
through the \p active field. Note that the number of blocks in a superblock is not limited
to the maximum reservations that can be held in the credits subfield. In a typical malloc operation
- (i.e., when \p active != \p NULL and \p credits > 0), the thread reads \p active and then
+ (i.e., when \p active != \p nullptr and \p credits > 0), the thread reads \p active and then
atomically decrements credits while validating that the active superblock is still valid.
*/
class active_tag {
, nCredits(0)
{}
-# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
- active_tag( active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
- ~active_tag() CDS_NOEXCEPT_DEFAULTED = default;
- active_tag& operator=(active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
-# if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
- active_tag( active_tag&& ) CDS_NOEXCEPT_DEFAULTED = default;
- active_tag& operator=(active_tag&&) CDS_NOEXCEPT_DEFAULTED = default;
+ active_tag( active_tag const& ) CDS_NOEXCEPT = default;
+ ~active_tag() CDS_NOEXCEPT = default;
+ active_tag& operator=(active_tag const& ) CDS_NOEXCEPT = default;
+# if !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
+ active_tag( active_tag&& ) CDS_NOEXCEPT = default;
+ active_tag& operator=(active_tag&&) CDS_NOEXCEPT = default;
# endif
-# endif
/// Returns pointer to superblock descriptor
superblock_desc * ptr() const
active_tag() CDS_NOEXCEPT
: pDesc( nullptr )
{}
-# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
// Clang 3.1: error: first argument to atomic operation must be a pointer to a trivially-copyable type
- //active_tag() CDS_NOEXCEPT_DEFAULTED = default;
- active_tag( active_tag const& ) CDS_NOEXCEPT_DEFAULTED = default;
- ~active_tag() CDS_NOEXCEPT_DEFAULTED = default;
- active_tag& operator=(active_tag const&) CDS_NOEXCEPT_DEFAULTED = default;
-# if defined(CDS_MOVE_SEMANTICS_SUPPORT) && !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
- active_tag( active_tag&& ) CDS_NOEXCEPT_DEFAULTED = default;
- active_tag& operator=(active_tag&&) CDS_NOEXCEPT_DEFAULTED = default;
+ //active_tag() CDS_NOEXCEPT = default;
+ active_tag( active_tag const& ) CDS_NOEXCEPT = default;
+ ~active_tag() CDS_NOEXCEPT = default;
+ active_tag& operator=(active_tag const&) CDS_NOEXCEPT = default;
+# if !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR)
+ active_tag( active_tag&& ) CDS_NOEXCEPT = default;
+ active_tag& operator=(active_tag&&) CDS_NOEXCEPT = default;
# endif
-# endif
superblock_desc * ptr() const
{
return pDesc.ptr();
/// Processor heap
struct processor_heap_base
{
- CDS_DATA_ALIGNMENT(8) CDS_ATOMIC::atomic<active_tag> active; ///< pointer to the descriptor of active superblock owned by processor heap
+ CDS_DATA_ALIGNMENT(8) atomics::atomic<active_tag> active; ///< pointer to the descriptor of active superblock owned by processor heap
processor_desc * pProcDesc ; ///< pointer to parent processor descriptor
const size_class * pSizeClass ; ///< pointer to size class
- CDS_ATOMIC::atomic<superblock_desc *> pPartial ; ///< pointer to partial filled superblock (may be NULL)
+ atomics::atomic<superblock_desc *> pPartial ; ///< pointer to partial filled superblock (may be \p nullptr)
partial_list partialList ; ///< list of partial filled superblocks owned by the processor heap
unsigned int nPageIdx ; ///< page size-class index, \ref c_nPageSelfAllocation - "small page"
/// Get partial superblock owned by the processor heap
superblock_desc * get_partial()
{
- superblock_desc * pDesc = pPartial.load(CDS_ATOMIC::memory_order_acquire);
+ superblock_desc * pDesc = pPartial.load(atomics::memory_order_acquire);
do {
if ( !pDesc ) {
pDesc = partialList.pop();
break;
}
- } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
- //assert( pDesc == NULL || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
- //assert( pDesc == NULL || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
+ //assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
+ //assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
return pDesc;
}
//assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
superblock_desc * pCur = nullptr;
- if ( !pPartial.compare_exchange_strong(pCur, pDesc, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed) )
+ if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed) )
partialList.push( pDesc );
}
system_heap m_LargeHeap ; ///< Heap for large block
aligned_heap m_AlignedHeap ; ///< Internal aligned heap
sizeclass_selector m_SizeClassSelector ; ///< Size-class selector
- CDS_ATOMIC::atomic<processor_desc *> * m_arrProcDesc ; ///< array of pointers to the processor descriptors
+ atomics::atomic<processor_desc *> * m_arrProcDesc ; ///< array of pointers to the processor descriptors
unsigned int m_nProcessorCount ; ///< Processor count
bound_checker m_BoundChecker ; ///< Bound checker
// Reserve block
while ( true ) {
++nCollision;
- oldActive = pProcHeap->active.load(CDS_ATOMIC::memory_order_acquire);
+ oldActive = pProcHeap->active.load(atomics::memory_order_acquire);
if ( !oldActive.ptr() )
return nullptr;
unsigned int nCredits = oldActive.credits();
newActive = oldActive;
newActive.credits( nCredits - 1 );
}
- if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed ))
break;
}
nCollision = -1;
do {
++nCollision;
- newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
assert( oldAnchor.avail < pDesc->nCapacity );
pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
newAnchor.count -= nMoreCredits;
}
}
- } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
if ( nCollision )
pProcHeap->stat.incActiveAnchorCASFailureCount( nCollision );
do {
++nCollision;
- newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
if ( oldAnchor.state == SBSTATE_EMPTY ) {
free_superblock( pDesc );
goto retry;
newAnchor.count -= nMoreCredits + 1;
newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL;
newAnchor.tag += 1;
- } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) );
+ } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
if ( nCollision )
pProcHeap->stat.incPartialDescCASFailureCount( nCollision );
do {
++nCollision;
- newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
assert( oldAnchor.avail < pDesc->nCapacity );
pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
++newAnchor.tag;
- } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) );
+ } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
if ( nCollision )
pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision );
assert( pDesc != nullptr );
pDesc->pSB = new_superblock_buffer( pProcHeap );
- anchor_tag anchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_relaxed);
+ anchor_tag anchor = pDesc->anchor.load(atomics::memory_order_relaxed);
anchor.tag += 1;
// Make single-linked list of free blocks in superblock
anchor.count = pDesc->nCapacity - 1 - (newActive.credits() + 1);
anchor.state = SBSTATE_ACTIVE;
- pDesc->anchor.store(anchor, CDS_ATOMIC::memory_order_relaxed);
+ pDesc->anchor.store(anchor, atomics::memory_order_relaxed);
active_tag curActive;
- if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed )) {
pProcHeap->stat.incAllocFromNew();
//reinterpret_cast<block_header *>( pDesc->pSB )->set( pDesc, 0 );
return reinterpret_cast<block_header *>( pDesc->pSB );
if ( nProcessorId >= m_nProcessorCount )
nProcessorId = 0;
- processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( CDS_ATOMIC::memory_order_relaxed );
+ processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( atomics::memory_order_relaxed );
while ( !pDesc ) {
processor_desc * pNewDesc = new_processor_desc( nProcessorId );
- if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+ if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed ) ) {
pDesc = pNewDesc;
break;
}
active_tag newActive;
newActive.set( pDesc, nCredits - 1 );
- if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
return;
// Someone installed another active superblock.
anchor_tag newAnchor;
do {
- newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
newAnchor.count += nCredits;
newAnchor.state = SBSTATE_PARTIAL;
- } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
pDesc->pProcHeap->add_partial( pDesc );
}
/// Allocates new processor descriptor
processor_desc * new_processor_desc( unsigned int nProcessorId )
{
+ CDS_UNUSED( nProcessorId );
processor_desc * pDesc;
const size_t nPageHeapCount = m_SizeClassSelector.pageTypeCount();
m_AlignedHeap.free( pDesc );
}
- superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed);
+ superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
if ( pPartial ) {
free( pPartial->pSB );
m_AlignedHeap.free( pPartial );
}
- pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr();
+ pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
if ( pDesc ) {
free( pDesc->pSB );
m_AlignedHeap.free( pDesc );
m_AlignedHeap.free( pDesc );
}
- superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed);
+ superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
if ( pPartial ) {
pageHeap.free( pPartial->pSB );
m_AlignedHeap.free( pPartial );
}
- pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr();
+ pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
if ( pDesc ) {
pageHeap.free( pDesc->pSB );
m_AlignedHeap.free( pDesc );
pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 );
- anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed );
+ anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
anchor.tag = 0;
- pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed );
+ pDesc->anchor.store( anchor, atomics::memory_order_relaxed );
pProcHeap->stat.incDescAllocCount();
}
assert( pDesc->nCapacity <= c_nMaxBlockInSuperBlock );
pDesc->pProcHeap = pProcHeap;
- anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed );
+ anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
anchor.avail = 1;
- pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed );
+ pDesc->anchor.store( anchor, atomics::memory_order_relaxed );
return pDesc;
}
m_nProcessorCount = m_Topology.processor_count();
m_arrProcDesc = new( m_AlignedHeap.alloc(sizeof(processor_desc *) * m_nProcessorCount, c_nAlignment ))
- CDS_ATOMIC::atomic<processor_desc *>[ m_nProcessorCount ];
+ atomics::atomic<processor_desc *>[ m_nProcessorCount ];
memset( m_arrProcDesc, 0, sizeof(processor_desc *) * m_nProcessorCount ) ; // ?? memset for atomic<>
}
~Heap()
{
for ( unsigned int i = 0; i < m_nProcessorCount; ++i ) {
- processor_desc * pDesc = m_arrProcDesc[i].load(CDS_ATOMIC::memory_order_relaxed);
+ processor_desc * pDesc = m_arrProcDesc[i].load(atomics::memory_order_relaxed);
if ( pDesc )
free_processor_desc( pDesc );
}
pProcHeap->stat.incDeallocatedBytes( pDesc->nBlockSize );
- oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
do {
newAnchor = oldAnchor;
reinterpret_cast<free_block_header *>( pBlock )->nNextFree = oldAnchor.avail;
}
else
newAnchor.count += 1;
- } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ) );
pProcHeap->stat.incFreeCount();
/// Reallocate memory block
/**
If \p nNewSize is zero, then the block pointed to by \p pMemory is freed;
- the return value is \p NULL, and \p pMemory is left pointing at a freed block.
+ the return value is \p nullptr, and \p pMemory is left pointing at a freed block.
If there is not enough available memory to expand the block to the given size,
- the original block is left unchanged, and \p NULL is returned.
+ the original block is left unchanged, and \p nullptr is returned.
Aligned memory block cannot be realloc'ed: if \p pMemory has been allocated by \ref alloc_aligned,
- then the return value is \p NULL and the original block is left unchanged.
+ then the return value is \p nullptr and the original block is left unchanged.
*/
void * realloc(
void * pMemory, ///< Pointer to previously allocated memory block
{
size_t nProcHeapCount = m_SizeClassSelector.size();
for ( unsigned int nProcessor = 0; nProcessor < m_nProcessorCount; ++nProcessor ) {
- processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(CDS_ATOMIC::memory_order_relaxed);
+ processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(atomics::memory_order_relaxed);
if ( pProcDesc ) {
for ( unsigned int i = 0; i < nProcHeapCount; ++i ) {
processor_heap_base * pProcHeap = pProcDesc->arrProcHeap + i;