- \p Traits - tree traits, default is \p bronson_avltree::traits
It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
instead of \p Traits template argument.
-
+
There is \ref cds_container_BronsonAVLTreeMap_rcu_ptr "a specialization" for "key -> value pointer" map.
@note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
//TODO: study how to pass a parameter pack to a lambda efficiently using perfect forwarding
// see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#904 - this is what we need
return base_class::do_update( key, key_comparator(),
- [&args...]( node_type * pNode ) -> mapped_type *
+ [&args...]( node_type * pNode ) -> mapped_type *
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
CDS_UNUSED( pNode );
},
update_flags::allow_insert
) == update_flags::result_inserted;
-# else
+# else
// gcc 4.8 error: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=47226
// workaround (from http://stackoverflow.com/questions/14191989/how-do-i-use-variadic-perfect-forwarding-into-a-lambda)
- auto f = std::bind<mapped_type *>(
+ auto f = std::bind<mapped_type *>(
[]( Args... args) -> mapped_type* { return cxx_allocator().New( std::move(args)...); },
std::forward<Args>(args)...
);
return base_class::do_update( key, key_comparator(),
- [&f]( node_type * pNode ) -> mapped_type *
+ [&f]( node_type * pNode ) -> mapped_type *
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
CDS_UNUSED( pNode );
std::pair<bool, bool> update( K const& key, Func func )
{
int result = base_class::do_update( key, key_comparator(),
- [&func]( node_type * pNode ) -> mapped_type*
+ [&func]( node_type * pNode ) -> mapped_type*
{
mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
if ( !pVal ) {
func( false, pNode->m_key, *pVal );
return pVal;
},
- update_flags::allow_insert | update_flags::allow_update
+ update_flags::allow_insert | update_flags::allow_update
);
return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
}
void operator()( size_t nLevel, size_t hLeft, size_t hRight );
};
\endcode
- where
+ where
- \p nLevel - the level where the violation is found
- \p hLeft - the height of left subtree
- \p hRight - the height of right subtree
return pNode->parent( order );
}
- // RCU safe disposer
+ // RCU safe disposer
class rcu_disposer
{
node_type * m_pRetiredList; ///< head of retired node list
pNode->m_pNextRemoved = m_pRetiredList;
m_pRetiredList = pNode;
}
-
+
void dispose_value( mapped_type pVal )
{
assert( m_pRetiredValue == nullptr );
m_pRetiredValue = pVal;
}
-
+
private:
struct internal_disposer
{
void clean()
{
assert( !gc::is_locked() );
-
+
// TODO: use RCU::batch_retire
// Dispose nodes
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
CDS_UNUSED( pNode );
return pVal;
- },
+ },
update_flags::allow_insert
) == update_flags::result_inserted;
}
std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
{
int result = do_update( key, key_comparator(),
- [pVal]( node_type * ) -> mapped_type
+ [pVal]( node_type * ) -> mapped_type
{
return pVal;
},
- update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
+ update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
);
return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
}
bool erase_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- return do_remove(
- key,
+ return do_remove(
+ key,
cds::opt::details::make_comparator_from_less<Less>(),
[]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
);
template <typename K, typename Func>
bool erase( K const& key, Func f )
{
- return do_remove(
- key,
- key_comparator(),
- [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
+ return do_remove(
+ key,
+ key_comparator(),
+ [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
assert( pVal );
- f( key, *pVal );
- disp.dispose_value(pVal);
+ f( key, *pVal );
+ disp.dispose_value(pVal);
return true;
}
);
bool erase_with( K const& key, Less pred, Func f )
{
CDS_UNUSED( pred );
- return do_remove(
- key,
+ return do_remove(
+ key,
cds::opt::details::make_comparator_from_less<Less>(),
- [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
+ [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
assert( pVal );
- f( key, *pVal );
- disp.dispose_value(pVal);
+ f( key, *pVal );
+ disp.dispose_value(pVal);
return true;
}
);
template <typename K, typename Func>
bool find( K const& key, Func f )
{
- return do_find( key, key_comparator(),
+ return do_find( key, key_comparator(),
[&f]( node_type * pNode ) -> bool {
assert( pNode != nullptr );
mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
bool find_with( K const& key, Less pred, Func f )
{
CDS_UNUSED( pred );
- return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
+ return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
[&f]( node_type * pNode ) -> bool {
assert( pNode != nullptr );
mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
return true;
}
return false;
- }
+ }
);
}
void operator()( size_t nLevel, size_t hLeft, size_t hRight );
};
\endcode
- where
+ where
- \p nLevel - the level where the violation is found
- \p hLeft - the height of left subtree
- \p hRight - the height of right subtree
result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
else
result = update_flags::retry;
- }
+ }
else {
// the tree is empty
if ( nFlags & update_flags::allow_insert ) {
if ( c_bRelaxedInsert ) {
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
- || child( pNode, nDir ) != nullptr )
+ || child( pNode, nDir ) != nullptr )
{
m_stat.onInsertRetry();
return update_flags::retry;
node_scoped_lock l( m_Monitor, *pNode );
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
- || child( pNode, nDir ) != nullptr )
+ || child( pNode, nDir ) != nullptr )
{
if ( c_bRelaxedInsert ) {
mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
return update_flags::failed;
- if ( child( pNode, left_child ) == nullptr || child( pNode, right_child ) == nullptr ) {
+ if ( child( pNode, left_child ) == nullptr || child( pNode, right_child ) == nullptr ) {
node_type * pDamaged;
mapped_type pOld;
{
node_scoped_lock ln( m_Monitor, *pNode );
pOld = pNode->value( memory_model::memory_order_relaxed );
if ( !( pNode->version( memory_model::memory_order_acquire ) == nVersion
- && pOld
+ && pOld
&& try_unlink_locked( pParent, pNode, disp )))
{
return update_flags::retry;