if(CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -pedantic -fno-strict-aliasing")
if(CMAKE_TARGET_ARCHITECTURE STREQUAL "x86_64")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcx16 -m64")
- elseif(CMAKE_TARGET_ARCHITECTURE STREQUAL "i386")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m32")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcx16")
endif()
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9.0")
# gcc 4.8: disable noise -Wunused-local-typedefs
return nBucket & ~(1 << bitop::MSBnz( nBucket ));
}
- aux_node_type * init_bucket( size_t nBucket )
+ aux_node_type * init_bucket( size_t const nBucket )
{
assert( nBucket > 0 );
size_t nParent = parent_bucket( nBucket );
assert( pParentBucket != nullptr );
- // Allocate a dummy node for new bucket
- aux_node_type * pBucket;
- if ( ( pBucket = m_Buckets.bucket( nBucket )) == nullptr ) {
+ // Allocate an aux node for new bucket
+ aux_node_type * pBucket = m_Buckets.bucket( nBucket );
+
+ back_off bkoff;
+ for ( ;; pBucket = m_Buckets.bucket( nBucket )) {
+ if ( pBucket )
+ return pBucket;
+
pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
if ( pBucket ) {
if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
m_Stat.onNewBucket();
return pBucket;
}
- else {
- // Another thread set the bucket. Wait while it done
- free_aux_node( pBucket );
- }
- }
- else {
- // There are no free buckets. It means that the bucket table is full
- // Wait while another thread set th bucket
- m_Stat.onBucketsExhausted();
+
+ // Another thread set the bucket. Wait while it done
+ free_aux_node( pBucket );
+ m_Stat.onBucketInitContenton();
+ break;
}
+
+ // There are no free buckets. It means that the bucket table is full
+ // Wait while another thread set the bucket or a free bucket will be available
+ m_Stat.onBucketsExhausted();
+ bkoff();
}
- else
- return pBucket;
// Another thread set the bucket. Wait while it done
-
- // In this point, we must wait while nBucket is empty.
- // The compiler can decide that waiting loop can be "optimized" (stripped)
- // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
-
- m_Stat.onBucketInitContenton();
- back_off bkoff;
- while ( true ) {
- aux_node_type volatile * p = m_Buckets.bucket( nBucket );
- if ( p != nullptr )
- return const_cast<aux_node_type *>(p);
+ for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) {
bkoff();
m_Stat.onBusyWaitBucketInit();
}
+
+ return pBucket;
}
aux_node_type * get_bucket( size_t nHash )
return nBucket & ~(1 << bitop::MSBnz( nBucket ));
}
- aux_node_type * init_bucket( size_t nBucket )
+ aux_node_type * init_bucket( size_t const nBucket )
{
assert( nBucket > 0 );
size_t nParent = parent_bucket( nBucket );
assert( pParentBucket != nullptr );
- // Allocate a dummy node for new bucket
- {
- aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
- if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
- m_Buckets.bucket( nBucket, pBucket );
- m_Stat.onNewBucket();
+ // Allocate an aux node for new bucket
+ aux_node_type * pBucket = m_Buckets.bucket( nBucket );
+
+ back_off bkoff;
+ for ( ;; pBucket = m_Buckets.bucket( nBucket ) ) {
+ if ( pBucket )
return pBucket;
+
+ pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
+ if ( pBucket ) {
+ if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
+ m_Buckets.bucket( nBucket, pBucket );
+ m_Stat.onNewBucket();
+ return pBucket;
+ }
+
+ // Another thread set the bucket. Wait while it done
+ free_aux_node( pBucket );
+ m_Stat.onBucketInitContenton();
+ break;
}
- free_aux_node( pBucket );
+
+ // There are no free buckets. It means that the bucket table is full
+ // Wait while another thread set the bucket or a free bucket will be available
+ m_Stat.onBucketsExhausted();
+ bkoff();
}
// Another thread set the bucket. Wait while it done
-
- // In this point, we must wait while nBucket is empty.
- // The compiler can decide that waiting loop can be "optimized" (stripped)
- // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
- //
- m_Stat.onBucketInitContenton();
- back_off bkoff;
- while ( true ) {
- aux_node_type volatile * p = m_Buckets.bucket( nBucket );
- if ( p && p != nullptr )
- return const_cast<aux_node_type *>(p);
+ for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket ) ) {
bkoff();
m_Stat.onBusyWaitBucketInit();
}
+
+ return pBucket;
}
aux_node_type * get_bucket( size_t nHash )
return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
}
- aux_node_type * init_bucket( size_t nBucket )
+ aux_node_type * init_bucket( size_t const nBucket )
{
assert( nBucket > 0 );
size_t nParent = parent_bucket( nBucket );
assert( pParentBucket != nullptr );
- // Allocate a dummy node for new bucket
- {
- aux_node_type * pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
- if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
- m_Buckets.bucket( nBucket, pBucket );
- m_Stat.onNewBucket();
+ // Allocate an aux node for new bucket
+ aux_node_type * pBucket = m_Buckets.bucket( nBucket );
+
+ back_off bkoff;
+ for ( ;; pBucket = m_Buckets.bucket( nBucket ) ) {
+ if ( pBucket )
return pBucket;
+
+ pBucket = alloc_aux_node( split_list::dummy_hash( nBucket ) );
+ if ( pBucket ) {
+ if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
+ m_Buckets.bucket( nBucket, pBucket );
+ m_Stat.onNewBucket();
+ return pBucket;
+ }
+
+ // Another thread set the bucket. Wait while it done
+ free_aux_node( pBucket );
+ m_Stat.onBucketInitContenton();
+ break;
}
- free_aux_node( pBucket );
+
+ // There are no free buckets. It means that the bucket table is full
+ // Wait while another thread set the bucket or a free bucket will be available
+ m_Stat.onBucketsExhausted();
+ bkoff();
}
// Another thread set the bucket. Wait while it done
-
- // In this point, we must wait while nBucket is empty.
- // The compiler can decide that waiting loop can be "optimized" (stripped)
- // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
- //
- m_Stat.onBucketInitContenton();
- back_off bkoff;
- while ( true ) {
- aux_node_type volatile * p = m_Buckets.bucket( nBucket );
- if ( p != nullptr )
- return const_cast<aux_node_type *>( p );
+ for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket ) ) {
bkoff();
m_Stat.onBusyWaitBucketInit();
}
+
+ return pBucket;
}
aux_node_type * get_bucket( size_t nHash )
<UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>\r
<Extensions>h;hh;hpp;hxx;hm;inl;inc;xsd</Extensions>\r
</Filter>\r
- <Filter Include="Resource Files">\r
- <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>\r
- <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>\r
- </Filter>\r
<Filter Include="Source Files\MichaelSet">\r
<UniqueIdentifier>{9f3cc5d8-7142-4a8e-a520-512911312b13}</UniqueIdentifier>\r
</Filter>\r