: arr_{ nullptr }
{}
- constexpr size_t capacity() const
+ static constexpr size_t capacity()
{
return c_nCapacity;
}
: next_( nullptr )
{}
- retired_ptr* first()
+ retired_ptr* first() const
{
- return reinterpret_cast<retired_ptr*>( this + 1 );
+ return reinterpret_cast<retired_ptr*>( const_cast<retired_block*>( this ) + 1 );
}
- retired_ptr* last()
+ retired_ptr* last() const
{
return first() + c_capacity;
}
atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
char pad2_[cds::c_nCacheLineSize];
+ // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
+ // cppcheck-suppress uninitMemberVar
thread_data( guard* guards, size_t guard_count )
: hazards_( guards, guard_count )
, sync_( 0 )
}
private:
- CDS_EXPORT_API smr(
+ CDS_EXPORT_API explicit smr(
size_t nInitialHazardPtrCount
);
private:
//@cond
CDS_EXPORT_API thread_record* create_thread_data();
- CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
+ static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
/// Allocates Hazard Pointer SMR thread private data
CDS_EXPORT_API thread_record* alloc_thread_data();
By perforce the local thread's guard pool is grown automatically from common pool.
When the thread terminated its guard pool is backed to common GC's pool.
*/
- DHP(
+ explicit DHP(
size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread
)
{
free_head_ = gList;
}
+ // cppcheck-suppress functionConst
void clear()
{
for ( guard* cur = array_, *last = array_ + capacity(); cur < last; ++cur )
atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
char pad2_[cds::c_nCacheLineSize];
+ // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor
+ // cppcheck-suppress uninitMemberVar
thread_data( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity )
: hazards_( guards, guard_count )
, retired_( retired_arr, retired_capacity )
private:
//@cond
CDS_EXPORT_API thread_record* create_thread_data();
- CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
+ static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec );
/// Allocates Hazard Pointer SMR thread private data
CDS_EXPORT_API thread_record* alloc_thread_data();
allocator() {}
allocator( allocator const& ) {}
template <class U>
- allocator( allocator<U> const& ) {}
+ explicit allocator( allocator<U> const& ) {}
- T* allocate( size_t nCount )
+ static T* allocate( size_t nCount )
{
return reinterpret_cast<T*>( s_alloc_memory( sizeof( value_type ) * nCount ));
}
- void deallocate( T* p, size_t /*nCount*/ )
+ static void deallocate( T* p, size_t /*nCount*/ )
{
s_free_memory( reinterpret_cast<void*>( p ));
}
);
}
- CDS_EXPORT_API void smr::destroy_thread_data( thread_record* pRec )
+ /*static*/ CDS_EXPORT_API void smr::destroy_thread_data( thread_record* pRec )
{
// all retired pointers must be freed
pRec->~thread_record();
allocator() {}
allocator( allocator const& ) {}
template <class U>
- allocator( allocator<U> const& ) {}
+ explicit allocator( allocator<U> const& ) {}
- T* allocate( size_t nCount )
+ static T* allocate( size_t nCount )
{
return reinterpret_cast<T*>( s_alloc_memory( sizeof( value_type ) * nCount ) );
}
- void deallocate( T* p, size_t /*nCount*/ )
+ static void deallocate( T* p, size_t /*nCount*/ )
{
s_free_memory( reinterpret_cast<void*>( p ) );
}
);
}
- CDS_EXPORT_API void smr::destroy_thread_data( thread_record* pRec )
+ /*static*/ CDS_EXPORT_API void smr::destroy_thread_data( thread_record* pRec )
{
// all retired pointers must be freed
assert( pRec->retired_.size() == 0 );
}
}
+ // cppcheck-suppress functionConst
CDS_EXPORT_API void smr::classic_scan( thread_data* pThreadRec )
{
thread_record* pRec = static_cast<thread_record*>( pThreadRec );