-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
#ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
#define CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
//@endcond
};
+ /// Hash size option
+ /**
+ @copydetails traits::hash_size
+ */
+ template <size_t Size>
+ struct hash_size {
+ //@cond
+ template <typename Base> struct pack: public Base
+ {
+ enum: size_t {
+ hash_size = Size
+ };
+ };
+ //@endcond
+ };
+
/// \p FeldmanHashSet internal statistics
template <typename EventCounter = cds::atomicity::event_counter>
struct stat {
*/
typedef cds::opt::none hash_accessor;
+ /// The size of hash value in bytes
+ /**
+ By default, the size of hash value is <tt>sizeof( hash_type )</tt>.
+ Sometimes it is not correct, for example, for that 6-byte struct \p static_assert will be thrown:
+ \code
+ struct key_type {
+ uint32_t key1;
+ uint16_t subkey;
+ };
+
+ static_assert( sizeof( key_type ) == 6, "Key type size mismatch" );
+ \endcode
+ For that case you can specify \p hash_size explicitly.
+
+ Value \p 0 means <tt>sizeof( hash_type )</tt>.
+ */
+ static CDS_CONSTEXPR size_t const hash_size = 0;
+
/// Disposer for removing data nodes
typedef cds::intrusive::opt::v::empty_disposer disposer;
/// Array node allocator
/**
- Allocator for array nodes. That allocator is used for creating \p headNode and \p arrayNode when the set grows.
+ Allocator for array nodes. The allocator is used for creating \p headNode and \p arrayNode when the set grows.
Default is \ref CDS_DEFAULT_ALLOCATOR
*/
typedef CDS_DEFAULT_ALLOCATOR node_allocator;
Supported \p Options are:
- \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor.
@copydetails traits::hash_accessor
+ - \p feldman_hashset::hash_size - the size of hash value in bytes.
+ @copydetails traits::hash_size
- \p opt::node_allocator - array node allocator.
@copydetails traits::node_allocator
- \p opt::compare - hash comparison functor. No default functor is provided.
//@cond
namespace details {
- template <typename HashType, typename UInt = size_t >
- using hash_splitter = cds::algo::split_bitstring< HashType, UInt >;
+ template <typename HashType, size_t HashSize >
+ using hash_splitter = cds::algo::split_bitstring< HashType, HashSize >;
struct metrics {
size_t head_node_size; // power-of-two
/// Hash type deduced from \p hash_accessor return type
typedef typename std::decay<
typename std::remove_reference<
- decltype(hash_accessor()(std::declval<value_type>()))
+ decltype(hash_accessor()(std::declval<value_type>()))
>::type
>::type hash_type;
- //typedef typename std::result_of< hash_accessor( std::declval<value_type>()) >::type hash_type;
static_assert(!std::is_pointer<hash_type>::value, "hash_accessor should return a reference to hash value");
typedef typename cds::opt::details::make_comparator_from<
feldman_hashset::bitwise_compare< hash_type >
>::type hash_comparator;
- typedef feldman_hashset::details::hash_splitter< hash_type > hash_splitter;
+ /// The size of hash_type in bytes, see \p traits::hash_size for explanation
+ static CDS_CONSTEXPR size_t const c_hash_size = traits::hash_size == 0 ? sizeof( hash_type ) : static_cast<size_t>( traits::hash_size );
+
+ typedef feldman_hashset::details::hash_splitter< hash_type, c_hash_size > hash_splitter;
- protected:
enum node_flags {
flag_array_converting = 1, ///< the cell is converting from data node to an array node
flag_array_node = 2 ///< the cell is a pointer to an array node
};
+ protected:
+
typedef cds::details::marked_ptr< value_type, 3 > node_ptr;
typedef atomics::atomic< node_ptr > atomic_node_ptr;
struct traverse_data {
hash_splitter splitter;
array_node * pArr;
- size_t nOffset;
size_t nSlot;
size_t nHeight;
traverse_data( hash_type const& hash, multilevel_array& arr )
: splitter( hash )
- , pArr( arr.head() )
- , nOffset( arr.metrics().head_node_size_log )
- , nSlot(splitter.cut(arr.metrics().head_node_size_log))
- , nHeight( 1 )
- {}
+ {
+ reset( arr );
+ }
+
+ void reset( multilevel_array& arr )
+ {
+ splitter.reset();
+ pArr = arr.head();
+ nSlot = splitter.cut( arr.metrics().head_node_size_log );
+ nHeight = 1;
+ }
};
protected:
public:
multilevel_array(size_t head_bits, size_t array_bits )
- : m_Metrics(feldman_hashset::details::metrics::make(head_bits, array_bits, sizeof(hash_type)))
+ : m_Metrics(feldman_hashset::details::metrics::make( head_bits, array_bits, c_hash_size ))
, m_Head( alloc_head_node())
{}
if (slot.bits() == flag_array_node) {
// array node, go down the tree
assert(slot.ptr() != nullptr);
+ assert( !pos.splitter.eos());
pos.nSlot = pos.splitter.cut( metrics().array_node_size_log );
assert( pos.nSlot < metrics().array_node_size );
pos.pArr = to_array(slot.ptr());
- pos.nOffset += metrics().array_node_size_log;
++pos.nHeight;
}
else if (slot.bits() == flag_array_converting) {
bool expand_slot( traverse_data& pos, node_ptr current)
{
- return expand_slot( pos.pArr, pos.nSlot, current, pos.nOffset );
+ return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset());
}
+ private:
bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset)
{
assert(current.bits() == 0);
assert(current.ptr());
- size_t idx = hash_splitter(hash_accessor()(*current.ptr()), nOffset).cut(m_Metrics.array_node_size_log);
array_node * pArr = alloc_array_node(pParent, idxParent);
node_ptr cur(current.ptr());
return false;
}
+ size_t idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( m_Metrics.array_node_size_log );
pArr->nodes[idx].store(current, memory_model::memory_order_release);
cur = cur | flag_array_converting;
stats().onArrayNodeCreated();
return true;
}
-
};
//@endcond
} // namespace feldman_hashset