//@cond
namespace details {
- template <typename Key, typename T, typename Lock>
- struct value_node : public bronson_avltree::node< Key, T, Lock >
- {
- T m_data; // placeholder for data
- };
-
template <typename Key, typename T, typename Traits>
struct pointer_oriented_traits: public Traits
{
typedef value_node<Key, T, typename Traits::lock_type > node_type;
};
+
+ template < typename Key, typename T, typename Traits>
+ struct make_map
+ {
+ typedef Key key_type;
+ typedef T mapped_type;
+ typedef Traits original_traits;
+
+ typedef cds::details::Allocator< mapped_type, typename original_traits::allocator > cxx_allocator;
+
+ struct traits : public original_traits
+ {
+ struct disposer {
+ template <typename T>
+ void operator()( mapped_type * p )
+ {
+ cxx_allocator().Delete( p );
+ }
+ };
+ };
+
+ // Metafunction result
+ typedef BronsonAVLTreeMap <
+ cds::urcu::gc<RCU>,
+ Key,
+ T *,
+ traits
+ > type;
+ };
} // namespace details
//@endcond
} // namespace bronson_avltree
#endif
>
class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T, Traits >
- : private BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, bronson_avltree::details::pointer_oriented_traits<Key, T, Traits>>
+#ifdef CDS_DOXYGEN_INVOKED
+ : private BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
+#else
+ : private bronson_avltree::details::make_map< Key, T, Traits >::type;
+#endif
{
//@cond
- typedef BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, bronson_avltree::details::pointer_oriented_traits<Traits>> base_class;
+ typedef bronson_avltree::details::make_map< Key, T, Traits > maker;
+ typedef typename maker::type base_class;
//@endcond
public:
typedef typename base_class::key_comparator key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
typedef typename traits::item_counter item_counter; ///< Item counting policy
typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
- typedef typename traits::allocator allocator_type; ///< allocator for maintaining internal node
+ typedef typename traits::allocator allocator_type; ///< allocator for value
+ typedef typename traits::node_allocator node_allocator_type;///< allocator for maintaining internal nodes
typedef typename traits::stat stat; ///< internal statistics
typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
typedef typename traits::back_off back_off; ///< Back-off strategy
protected:
//@cond
- typedef typename base_class::alloc_node_type node_type;
- typedef typename base_class::node_type base_node_type;
- typedef base_class::node_scoped_lock node_scoped_lock;
+ typedef typename base_class::node_type node_type;
+ typedef typename base_class::node_scoped_lock node_scoped_lock;
+ typedef typename maker::cxx_allocator cxx_allocator;
using base_class::update_flags;
//@endcond
bool insert( K const& key )
{
return base_class::do_update(key, key_comparator(),
- []( base_node_type * pNode ) -> mapped_type*
+ []( node_type * pNode ) -> mapped_type*
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
- node_type * p = static_cast<node_type *>(pNode);
- new (&p->m_data) mapped_type;
- return &p->m_data;
+ return cxx_allocator().New();
},
update_flags::allow_insert
) == update_flags::result_insert;
bool insert( K const& key, V const& val )
{
return base_class::do_update( key, key_comparator(),
- [&val]( base_node_type * pNode )
+ [&val]( node_type * pNode )
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
- node_type * p = static_cast<node_type *>(pNode);
- new (&p->m_data) mapped_type( val );
- return &p->m_data;
+ return cxx_allocator().New( val );
},
update_flags::allow_insert
) == update_flags::result_insert;
bool insert_with( K const& key, Func func )
{
return base_class::do_update( key, key_comparator(),
- [&func]( base_node_type * pNode ) -> mapped_type*
+ [&func]( node_type * pNode ) -> mapped_type*
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
- node_type * p = static_cast<node_type *>(pNode);
- func( p->m_data );
- return &p->m_data;
+ mapped_type * pVal = cxx_allocator().New();
+ func( *pVal );
+ return pVal;
},
update_flags::allow_insert
) == update_flags::result_insert;
bool emplace( K&& key, Args&&... args )
{
return base_class::do_update( key, key_comparator(),
- [&]( base_node_type * pNode ) -> mapped_type*
+ [&]( node_type * pNode ) -> mapped_type*
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
- node_type * p = static_cast<node_type *>(pNode);
- new (&p->m_data) mapped_type( std::forward<Args>(args)... );
- return &p->m_data;
+ return cxx_allocator().New( std::forward<Args>(args)...);
},
update_flags::allow_insert
) == update_flags::result_insert;
std::pair<bool, bool> update( K const& key, Func func )
{
int result = base_class::do_update( key, key_comparator(),
- [&func]( base_node_type * pNode ) -> mapped_type*
+ [&func]( node_type * pNode ) -> mapped_type*
{
- node_type * p = static_cast<node_type *>(pNode);
- //new (&p->m_data) mapped_type;
- func( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr, p->m_data );
- return &p->m_data;
+ mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed ));
+ if ( !pVal ) {
+ pVal = cxx_allocator().New();
+ func( true, pVal );
+ }
+ else
+ func( false, pVal );
+ return pVal;
},
update_flags::allow_insert | update_flags::allow_update
);
#include <cds/container/details/base.h>
#include <cds/opt/compare.h>
#include <cds/urcu/options.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
namespace cds { namespace container {
typedef opt::none less;
/// Allocator for internal node
- typedef CDS_DEFAULT_ALLOCATOR allocator;
+ typedef CDS_DEFAULT_ALLOCATOR node_allocator;
/// Disposer (only for pointer-oriented tree specialization)
/**
List of available options see \p opt::rcu_check_deadlock
*/
typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock;
-
- //@cond
- // Internal traits, not for direct usage
- typedef opt::none node_type;
- //@endcond
};
/// Metafunction converting option list to BronsonAVLTreeMap traits
- \p opt::compare - key compare functor. No default functor is provided.
If the option is not specified, \p %opt::less is used.
- \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined.
- - \p opt::allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR.
+ - \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR.
- \ref cds::intrusive::opt::disposer "container::opt::disposer" - the functor used for dispose removed values.
The user-provided disposer is used only for pointer-oriented tree specialization
like \p BronsonAVLTreeMap<GC, Key, T*, Traits>. When the node becomes the rounting node without value,
#endif
typedef typename traits::item_counter item_counter; ///< Item counting policy
typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
- typedef typename traits::allocator allocator_type; ///< allocator for maintaining internal node
+ typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
typedef typename traits::stat stat; ///< internal statistics
typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
typedef typename traits::back_off back_off; ///< Back-off strategy
typedef bronson_avltree::node< key_type, mapped_type, lock_type > node_type;
typedef typename node_type::version_type version_type;
- typedef typename std::conditional <
- std::is_same< typename traits::node_type, opt::none >::value,
- bronson_avltree::node< key_type, mapped_type, lock_type >,
- typename traits::node_type
- >::type alloc_node_type;
-
- typedef typename allocator_type::template rebind<alloc_node_type>::other memory_allocator;
- typedef cds::details::Allocator< alloc_node_type, memory_allocator > cxx_allocator;
-
+ typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
enum class find_result
template <typename K>
static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
{
- alloc_node_type * pNode = memory_allocator().allocate( 1 );
- return new (static_cast<node_type *>(pNode)) node_type( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
+ return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
}
- static void internal_free_node( node_type * pNode )
+ static void free_node( node_type * pNode )
{
// Free node without disposer
- cxx_allocator().Delete( static_cast<alloc_node_type *>(pNode) );
+ cxx_allocator().Delete( pNode );
}
// RCU safe disposer
private:
struct internal_disposer
{
- void operator()( alloc_node_type * p ) const
+ void operator()( node_type * p ) const
{
- static_cast<node_type *>(p)->~node_type();
- memory_allocator().deallocate( p, 1 );
+ free_node( p );
}
};
for ( node_type * p = m_pRetiredList; p; ) {
node_type * pNext = p->m_pNextRemoved;
// Value already disposed
- gc::template retire_ptr<internal_disposer>( static_cast<alloc_node_type *>(p) );
+ gc::template retire_ptr<internal_disposer>( p );
p = pNext;
}
}
|| pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
{
if ( c_RelaxedInsert ) {
- internal_free_node( pNew );
+ free_node( pNew );
m_stat.onRelaxedInsertFailed();
}