1 #ifndef _SMALL_UNORDERED_MAP_H_
2 #define _SMALL_UNORDERED_MAP_H_
7 #include <unordered_map>
13 #include "ndb_type_traits.h"
18 struct is_eq_expensive { static const bool value = true; };
20 struct cheap_eq { static const bool value = false; };
22 // equals is cheap for integer types
23 template <> struct is_eq_expensive<bool> : public cheap_eq {};
24 template <> struct is_eq_expensive<uint8_t> : public cheap_eq {};
25 template <> struct is_eq_expensive<int8_t> : public cheap_eq {};
26 template <> struct is_eq_expensive<uint16_t> : public cheap_eq {};
27 template <> struct is_eq_expensive<int16_t> : public cheap_eq {};
28 template <> struct is_eq_expensive<uint32_t> : public cheap_eq {};
29 template <> struct is_eq_expensive<int32_t> : public cheap_eq {};
30 template <> struct is_eq_expensive<uint64_t> : public cheap_eq {};
31 template <> struct is_eq_expensive<int64_t> : public cheap_eq {};
33 static event_avg_counter evt_avg_max_unordered_map_chain_length CACHE_ALIGNED
34 ("avg_max_unordered_map_chain_length");
37 struct fast_func_param {
38 typedef typename std::conditional<std::is_scalar<T>::value, T, const T &>::type type;
43 inline ALWAYS_INLINE size_t
44 operator()(typename fast_func_param<T>::type t) const
46 return std::hash<T>()(t);
50 template <typename Tp>
52 inline ALWAYS_INLINE size_t
53 operator()(Tp *t) const
55 // std::hash for ptrs is bad
56 // tommyhash: http://tommyds.sourceforge.net/doc/tommyhash_8h_source.html
57 size_t key = reinterpret_cast<size_t>(t) >> 3; // shift out 8-byte pointer alignment
59 key = (~key) + (key << 21); // key = (key << 21) - key - 1;
60 key = key ^ (key >> 24);
61 key = (key + (key << 3)) + (key << 8); // key * 265
62 key = key ^ (key >> 14);
63 key = (key + (key << 2)) + (key << 4); // key * 21
64 key = key ^ (key >> 28);
65 key = key + (key << 31);
67 key = (~key) + (key << 21); // key = (key << 21) - key - 1;
68 key = key ^ (key >> 24);
74 static inline constexpr size_t
75 TableSize(size_t small_size)
77 return round_up_to_pow2_const(small_size);
82 * For under SmallSize, uses linear probing on a fixed size array. Otherwise,
83 * delegates to a regular std::unordered_map
85 * XXX(stephentu): allow custom allocator
87 template <typename Key,
89 size_t SmallSize = SMALL_SIZE_MAP,
90 typename Hash = private_::myhash<Key>>
91 class small_unordered_map {
94 typedef T mapped_type;
95 typedef std::pair<const key_type, mapped_type> value_type;
97 typedef T & reference;
98 typedef const T & const_reference;
101 typedef std::unordered_map<Key, T, Hash> large_table_type;
102 typedef std::pair<key_type, mapped_type> bucket_value_type;
104 typedef typename large_table_type::iterator large_table_iterator;
105 typedef typename large_table_type::const_iterator large_table_const_iterator;
107 static const bool is_trivially_destructible =
108 private_::is_trivially_destructible<bucket_value_type>::value;
110 static const size_t TableSize = private_::TableSize(SmallSize);
111 static_assert(SmallSize >= 1, "XXX");
112 static_assert(TableSize >= 1, "XXX");
115 inline ALWAYS_INLINE bucket_value_type *
118 return reinterpret_cast<bucket_value_type *>(&buf[0]);
121 inline ALWAYS_INLINE const bucket_value_type *
124 return reinterpret_cast<const bucket_value_type *>(&buf[0]);
127 inline ALWAYS_INLINE bucket_value_type &
133 inline ALWAYS_INLINE const bucket_value_type &
139 template <class... Args>
140 inline ALWAYS_INLINE void
141 construct(size_t hash, Args &&... args)
144 new (&ref()) bucket_value_type(std::forward<Args>(args)...);
147 inline ALWAYS_INLINE void
150 if (!is_trivially_destructible)
151 ref().~bucket_value_type();
154 struct bucket *bnext;
156 char buf[sizeof(value_type)];
159 // iterators are not stable across mutation
160 template <typename SmallIterType,
161 typename LargeIterType,
163 class iterator_ : public std::iterator<std::forward_iterator_tag, ValueType> {
164 friend class small_unordered_map;
166 inline iterator_() : large(false), b(0) {}
168 template <typename S, typename L, typename V>
169 inline iterator_(const iterator_<S, L, V> &other)
170 : large(other.large), b(other.b)
178 return reinterpret_cast<ValueType &>(b->ref());
186 return reinterpret_cast<ValueType *>(b->ptr());
190 operator==(const iterator_ &o) const
192 INVARIANT(large == o.large);
195 return large_it == o.large_it;
199 operator!=(const iterator_ &o) const
201 return !operator==(o);
207 if (unlikely(large)) {
218 iterator_ cur = *this;
224 inline iterator_(SmallIterType *b)
228 inline iterator_(LargeIterType large_it)
229 : large(true), large_it(large_it) {}
234 LargeIterType large_it;
238 chain_length(bucket *b)
253 large_table_iterator,
260 large_table_const_iterator,
264 small_unordered_map()
265 : n(0), large_elems(0)
267 NDB_MEMSET(&table[0], 0, sizeof(table));
270 ~small_unordered_map()
272 if (unlikely(large_elems)) {
276 for (size_t i = 0; i < n; i++)
277 small_elems[i].destroy();
280 small_unordered_map(const small_unordered_map &other)
281 : n(0), large_elems(0)
283 NDB_MEMSET(&table[0], 0, sizeof(table));
287 small_unordered_map &
288 operator=(const small_unordered_map &other)
291 if (unlikely(this == &other))
299 find_bucket(const key_type &k, size_t *hash_value)
301 INVARIANT(!large_elems);
302 const size_t h = Hash()(k);
305 const size_t i = h % TableSize;
306 bucket *b = table[i];
308 const bool check_hash = private_::is_eq_expensive<key_type>::value;
309 if ((!check_hash || b->h == h) && b->ref().first == k)
316 inline ALWAYS_INLINE const bucket *
317 find_bucket(const key_type &k, size_t *hash_value) const
319 return const_cast<small_unordered_map *>(this)->find_bucket(k, hash_value);
324 // XXX(stephentu): template away this stuff
327 operator[](const key_type &k)
329 if (unlikely(large_elems))
330 return large_elems->operator[](k);
332 bucket *b = find_bucket(k, &h);
334 return b->ref().second;
335 if (unlikely(n == SmallSize)) {
336 large_elems = new large_table_type;
337 for (size_t n = 0; n < SmallSize; n++) {
338 bucket &b = small_elems[n];
340 large_elems->emplace(std::move(b.ref()));
342 large_elems->operator[](std::move(b.ref().first)) = std::move(b.ref().second);
347 return large_elems->operator[](k);
349 INVARIANT(n < SmallSize);
350 b = &small_elems[n++];
351 b->construct(h, k, mapped_type());
352 const size_t i = h % TableSize;
355 return b->ref().second;
359 operator[](key_type &&k)
361 if (unlikely(large_elems))
362 return large_elems->operator[](std::move(k));
364 bucket *b = find_bucket(k, &h);
366 return b->ref().second;
367 if (unlikely(n == SmallSize)) {
368 large_elems = new large_table_type;
369 for (size_t n = 0; n < SmallSize; n++) {
370 bucket &b = small_elems[n];
372 large_elems->emplace(std::move(b.ref()));
374 large_elems->operator[](std::move(b.ref().first)) = std::move(b.ref().second);
379 return large_elems->operator[](std::move(k));
381 INVARIANT(n < SmallSize);
382 b = &small_elems[n++];
383 b->construct(h, std::move(k), mapped_type());
384 const size_t i = h % TableSize;
387 return b->ref().second;
393 if (unlikely(large_elems))
394 return large_elems->size();
407 if (unlikely(large_elems))
408 return iterator(large_elems->begin());
409 return iterator(&small_elems[0]);
415 if (unlikely(large_elems))
416 return const_iterator(large_elems->begin());
417 return const_iterator(&small_elems[0]);
423 if (unlikely(large_elems))
424 return iterator(large_elems->end());
425 return iterator(&small_elems[n]);
428 inline const_iterator
431 if (unlikely(large_elems))
432 return const_iterator(large_elems->end());
433 return const_iterator(&small_elems[n]);
437 find(const key_type &k)
439 if (unlikely(large_elems))
440 return iterator(large_elems->find(k));
441 bucket * const b = find_bucket(k, 0);
448 find(const key_type &k) const
450 if (unlikely(large_elems))
451 return const_iterator(large_elems->find(k));
452 const bucket * const b = find_bucket(k, 0);
454 return const_iterator(b);
461 if (unlikely(large_elems)) {
469 NDB_MEMSET(&table[0], 0, sizeof(table));
470 for (size_t i = 0; i < n; i++)
471 small_elems[i].destroy();
477 inline bool is_small_type() const { return !large_elems; }
481 // doesn't check for self assignment
483 assignFrom(const small_unordered_map &that)
486 if (that.large_elems) {
489 large_elems = new large_table_type(*that.large_elems);
492 INVARIANT(!large_elems);
493 for (size_t i = 0; i < that.n; i++) {
494 bucket * const b = &small_elems[n++];
495 const bucket * const that_b = &that.small_elems[i];
496 b->construct(that_b->h, that_b->ref().first, that_b->ref().second);
497 const size_t idx = b->h % TableSize;
498 b->bnext = table[idx];
505 bucket small_elems[SmallSize];
506 bucket *table[TableSize];
508 large_table_type *large_elems;
511 #endif /* _SMALL_UNORDERED_MAP_H_ */