2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Various low-level, bit-manipulation routines.
20 * findFirstSet(x) [constexpr]
21 * find first (least significant) bit set in a value of an integral type,
22 * 1-based (like ffs()). 0 = no bits are set (x == 0)
24 * findLastSet(x) [constexpr]
25 * find last (most significant) bit set in a value of an integral type,
26 * 1-based. 0 = no bits are set (x == 0)
27 * for x != 0, findLastSet(x) == 1 + floor(log2(x))
29 * nextPowTwo(x) [constexpr]
30 * Finds the next power of two >= x.
32 * isPowTwo(x) [constexpr]
33 * return true iff x is a power of two
36 * return the number of 1 bits in x
39 * convert between native, big, and little endian representation
40 * Endian::big(x) big <-> native
41 * Endian::little(x) little <-> native
42 * Endian::swap(x) big <-> little
45 * Wrapper around an iterator over an integral type that iterates
46 * over its underlying bits in MSb to LSb order
48 * findFirstSet(BitIterator begin, BitIterator end)
49 * return a BitIterator pointing to the first 1 bit in [begin, end), or
50 * end if all bits in [begin, end) are 0
52 * @author Tudor Bosman (tudorb@fb.com)
57 #if !defined(__clang__) && !(defined(_MSC_VER) && (_MSC_VER < 1900))
58 #define FOLLY_INTRINSIC_CONSTEXPR constexpr
60 // GCC and MSVC 2015+ are the only compilers with
61 // intrinsics constexpr.
62 #define FOLLY_INTRINSIC_CONSTEXPR const
65 #include <folly/Portability.h>
66 #include <folly/portability/Builtins.h>
68 #include <folly/Assume.h>
69 #include <folly/detail/BitsDetail.h>
70 #include <folly/detail/BitIteratorDetail.h>
71 #include <folly/Likely.h>
73 #if FOLLY_HAVE_BYTESWAP_H
74 # include <byteswap.h>
81 #include <type_traits>
82 #include <boost/iterator/iterator_adaptor.hpp>
87 // Generate overloads for findFirstSet as wrappers around
88 // appropriate ffs, ffsl, ffsll gcc builtins
90 inline FOLLY_INTRINSIC_CONSTEXPR
91 typename std::enable_if<
92 (std::is_integral<T>::value &&
93 std::is_unsigned<T>::value &&
94 sizeof(T) <= sizeof(unsigned int)),
97 return __builtin_ffs(x);
101 inline FOLLY_INTRINSIC_CONSTEXPR
102 typename std::enable_if<
103 (std::is_integral<T>::value &&
104 std::is_unsigned<T>::value &&
105 sizeof(T) > sizeof(unsigned int) &&
106 sizeof(T) <= sizeof(unsigned long)),
109 return __builtin_ffsl(x);
113 inline FOLLY_INTRINSIC_CONSTEXPR
114 typename std::enable_if<
115 (std::is_integral<T>::value &&
116 std::is_unsigned<T>::value &&
117 sizeof(T) > sizeof(unsigned long) &&
118 sizeof(T) <= sizeof(unsigned long long)),
121 return __builtin_ffsll(x);
125 inline FOLLY_INTRINSIC_CONSTEXPR
126 typename std::enable_if<
127 (std::is_integral<T>::value && std::is_signed<T>::value),
130 // Note that conversion from a signed type to the corresponding unsigned
131 // type is technically implementation-defined, but will likely work
132 // on any impementation that uses two's complement.
133 return findFirstSet(static_cast<typename std::make_unsigned<T>::type>(x));
136 // findLastSet: return the 1-based index of the highest bit set
137 // for x > 0, findLastSet(x) == 1 + floor(log2(x))
139 inline FOLLY_INTRINSIC_CONSTEXPR
140 typename std::enable_if<
141 (std::is_integral<T>::value &&
142 std::is_unsigned<T>::value &&
143 sizeof(T) <= sizeof(unsigned int)),
146 // If X is a power of two X - Y = ((X - 1) ^ Y) + 1. Doing this transformation
147 // allows GCC to remove its own xor that it adds to implement clz using bsr
148 return x ? ((8 * sizeof(unsigned int) - 1) ^ __builtin_clz(x)) + 1 : 0;
152 inline FOLLY_INTRINSIC_CONSTEXPR
153 typename std::enable_if<
154 (std::is_integral<T>::value &&
155 std::is_unsigned<T>::value &&
156 sizeof(T) > sizeof(unsigned int) &&
157 sizeof(T) <= sizeof(unsigned long)),
160 return x ? ((8 * sizeof(unsigned long) - 1) ^ __builtin_clzl(x)) + 1 : 0;
164 inline FOLLY_INTRINSIC_CONSTEXPR
165 typename std::enable_if<
166 (std::is_integral<T>::value &&
167 std::is_unsigned<T>::value &&
168 sizeof(T) > sizeof(unsigned long) &&
169 sizeof(T) <= sizeof(unsigned long long)),
172 return x ? ((8 * sizeof(unsigned long long) - 1) ^ __builtin_clzll(x)) + 1
177 inline FOLLY_INTRINSIC_CONSTEXPR
178 typename std::enable_if<
179 (std::is_integral<T>::value &&
180 std::is_signed<T>::value),
183 return findLastSet(static_cast<typename std::make_unsigned<T>::type>(x));
187 inline FOLLY_INTRINSIC_CONSTEXPR
188 typename std::enable_if<
189 std::is_integral<T>::value && std::is_unsigned<T>::value,
192 return v ? (T(1) << findLastSet(v - 1)) : 1;
196 inline FOLLY_INTRINSIC_CONSTEXPR typename std::
197 enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value, T>::type
199 return v ? (T(1) << (findLastSet(v) - 1)) : 0;
203 inline constexpr typename std::enable_if<
204 std::is_integral<T>::value && std::is_unsigned<T>::value,
207 return (v != 0) && !(v & (v - 1));
214 inline typename std::enable_if<
215 (std::is_integral<T>::value &&
216 std::is_unsigned<T>::value &&
217 sizeof(T) <= sizeof(unsigned int)),
220 return detail::popcount(x);
224 inline typename std::enable_if<
225 (std::is_integral<T>::value &&
226 std::is_unsigned<T>::value &&
227 sizeof(T) > sizeof(unsigned int) &&
228 sizeof(T) <= sizeof(unsigned long long)),
231 return detail::popcountll(x);
235 * Endianness detection and manipulation primitives.
240 struct EndianIntBase {
248 * If we have the bswap_16 macro from byteswap.h, use it; otherwise, provide our
252 # define our_bswap16 bswap_16
255 template<class Int16>
256 inline constexpr typename std::enable_if<
259 our_bswap16(Int16 x) {
260 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
266 #define FB_GEN(t, fn) \
267 template<> inline t EndianIntBase<t>::swap(t x) { return fn(x); }
269 // fn(x) expands to (x) if the second argument is empty, which is exactly
270 // what we want for [u]int8_t. Also, gcc 4.7 on Intel doesn't have
271 // __builtin_bswap16 for some reason, so we have to provide our own.
275 FB_GEN( int64_t, _byteswap_uint64)
276 FB_GEN(uint64_t, _byteswap_uint64)
277 FB_GEN( int32_t, _byteswap_ulong)
278 FB_GEN(uint32_t, _byteswap_ulong)
279 FB_GEN( int16_t, _byteswap_ushort)
280 FB_GEN(uint16_t, _byteswap_ushort)
282 FB_GEN( int64_t, __builtin_bswap64)
283 FB_GEN(uint64_t, __builtin_bswap64)
284 FB_GEN( int32_t, __builtin_bswap32)
285 FB_GEN(uint32_t, __builtin_bswap32)
286 FB_GEN( int16_t, our_bswap16)
287 FB_GEN(uint16_t, our_bswap16)
293 struct EndianInt : public EndianIntBase<T> {
296 return kIsLittleEndian ? EndianInt::swap(x) : x;
298 static T little(T x) {
299 return kIsBigEndian ? EndianInt::swap(x) : x;
303 } // namespace detail
305 // big* convert between native and big-endian representations
306 // little* convert between native and little-endian representations
307 // swap* convert between big-endian and little-endian representations
309 // ntohs, htons == big16
310 // ntohl, htonl == big32
311 #define FB_GEN1(fn, t, sz) \
312 static t fn##sz(t x) { return fn<t>(x); } \
314 #define FB_GEN2(t, sz) \
315 FB_GEN1(swap, t, sz) \
316 FB_GEN1(big, t, sz) \
317 FB_GEN1(little, t, sz)
320 FB_GEN2(uint##sz##_t, sz) \
321 FB_GEN2(int##sz##_t, sz)
325 enum class Order : uint8_t {
330 static constexpr Order order = kIsLittleEndian ? Order::LITTLE : Order::BIG;
332 template <class T> static T swap(T x) {
333 return folly::detail::EndianInt<T>::swap(x);
335 template <class T> static T big(T x) {
336 return folly::detail::EndianInt<T>::big(x);
338 template <class T> static T little(T x) {
339 return folly::detail::EndianInt<T>::little(x);
342 #if !defined(__ANDROID__)
355 * Fast bit iteration facility.
359 template <class BaseIter> class BitIterator;
360 template <class BaseIter>
361 BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter>,
362 BitIterator<BaseIter>);
364 * Wrapper around an iterator over an integer type that iterates
365 * over its underlying bits in LSb to MSb order.
367 * BitIterator models the same iterator concepts as the base iterator.
369 template <class BaseIter>
371 : public bititerator_detail::BitIteratorBase<BaseIter>::type {
374 * Return the number of bits in an element of the underlying iterator.
376 static unsigned int bitsPerBlock() {
377 return std::numeric_limits<
378 typename std::make_unsigned<
379 typename std::iterator_traits<BaseIter>::value_type
385 * Construct a BitIterator that points at a given bit offset (default 0)
388 explicit BitIterator(const BaseIter& iter, size_t bitOff=0)
389 : bititerator_detail::BitIteratorBase<BaseIter>::type(iter),
391 assert(bitOffset_ < bitsPerBlock());
394 size_t bitOffset() const {
398 void advanceToNextBlock() {
400 ++this->base_reference();
403 BitIterator& operator=(const BaseIter& other) {
404 this->~BitIterator();
405 new (this) BitIterator(other);
410 friend class boost::iterator_core_access;
411 friend BitIterator findFirstSet<>(BitIterator, BitIterator);
413 typedef bititerator_detail::BitReference<
414 typename std::iterator_traits<BaseIter>::reference,
415 typename std::iterator_traits<BaseIter>::value_type
418 void advanceInBlock(size_t n) {
420 assert(bitOffset_ < bitsPerBlock());
423 BitRef dereference() const {
424 return BitRef(*this->base_reference(), bitOffset_);
427 void advance(ssize_t n) {
428 size_t bpb = bitsPerBlock();
429 ssize_t blocks = n / bpb;
430 bitOffset_ += n % bpb;
431 if (bitOffset_ >= bpb) {
435 this->base_reference() += blocks;
439 if (++bitOffset_ == bitsPerBlock()) {
440 advanceToNextBlock();
445 if (bitOffset_-- == 0) {
446 bitOffset_ = bitsPerBlock() - 1;
447 --this->base_reference();
451 bool equal(const BitIterator& other) const {
452 return (bitOffset_ == other.bitOffset_ &&
453 this->base_reference() == other.base_reference());
456 ssize_t distance_to(const BitIterator& other) const {
458 (other.base_reference() - this->base_reference()) * bitsPerBlock() +
459 other.bitOffset_ - bitOffset_;
462 unsigned int bitOffset_;
466 * Helper function, so you can write
467 * auto bi = makeBitIterator(container.begin());
469 template <class BaseIter>
470 BitIterator<BaseIter> makeBitIterator(const BaseIter& iter) {
471 return BitIterator<BaseIter>(iter);
476 * Find first bit set in a range of bit iterators.
477 * 4.5x faster than the obvious std::find(begin, end, true);
479 template <class BaseIter>
480 BitIterator<BaseIter> findFirstSet(BitIterator<BaseIter> begin,
481 BitIterator<BaseIter> end) {
482 // shortcut to avoid ugly static_cast<>
483 static const typename BaseIter::value_type one = 1;
485 while (begin.base() != end.base()) {
486 typename BaseIter::value_type v = *begin.base();
487 // mask out the bits that don't matter (< begin.bitOffset)
488 v &= ~((one << begin.bitOffset()) - 1);
489 size_t firstSet = findFirstSet(v);
491 --firstSet; // now it's 0-based
492 assert(firstSet >= begin.bitOffset());
493 begin.advanceInBlock(firstSet - begin.bitOffset());
496 begin.advanceToNextBlock();
499 // now begin points to the same block as end
500 if (end.bitOffset() != 0) { // assume end is dereferenceable
501 typename BaseIter::value_type v = *begin.base();
502 // mask out the bits that don't matter (< begin.bitOffset)
503 v &= ~((one << begin.bitOffset()) - 1);
504 // mask out the bits that don't matter (>= end.bitOffset)
505 v &= (one << end.bitOffset()) - 1;
506 size_t firstSet = findFirstSet(v);
508 --firstSet; // now it's 0-based
509 assert(firstSet >= begin.bitOffset());
510 begin.advanceInBlock(firstSet - begin.bitOffset());
519 template <class T, class Enable=void> struct Unaligned;
522 * Representation of an unaligned value of a POD type.
528 typename std::enable_if<std::is_pod<T>::value>::type> {
529 Unaligned() = default; // uninitialized
530 /* implicit */ Unaligned(T v) : value(v) { }
536 * Read an unaligned value of type T and return it.
539 inline T loadUnaligned(const void* p) {
540 static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
541 static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
542 if (kHasUnalignedAccess) {
543 return static_cast<const Unaligned<T>*>(p)->value;
546 memcpy(&value, p, sizeof(T));
552 * Write an unaligned value of type T.
555 inline void storeUnaligned(void* p, T value) {
556 static_assert(sizeof(Unaligned<T>) == sizeof(T), "Invalid unaligned size");
557 static_assert(alignof(Unaligned<T>) == 1, "Invalid alignment");
558 if (kHasUnalignedAccess) {
559 // Prior to C++14, the spec says that a placement new like this
560 // is required to check that p is not nullptr, and to do nothing
561 // if p is a nullptr. By assuming it's not a nullptr, we get a
562 // nice loud segfault in optimized builds if p is nullptr, rather
563 // than just silently doing nothing.
564 folly::assume(p != nullptr);
565 new (p) Unaligned<T>(value);
567 memcpy(p, &value, sizeof(T));