2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_EXPERIMENTAL_BITS_H_
18 #define FOLLY_EXPERIMENTAL_BITS_H_
21 #include <type_traits>
24 #include "folly/Bits.h"
25 #include "folly/Range.h"
29 // As a general rule, bit operations work on unsigned values only;
30 // right-shift is arithmetic for signed values, and that can lead to
36 * Helper class to make Bits<T> (below) work with both aligned values
37 * (T, where T is an unsigned integral type) or unaligned values
38 * (Unaligned<T>, where T is an unsigned integral type)
40 template <class T, class Enable=void> struct BitsTraits;
42 // Partial specialization for Unaligned<T>, where T is unsigned integral
43 // loadRMW is the same as load, but it indicates that it loads for a
44 // read-modify-write operation (we write back the bits we won't change);
45 // silence the GCC warning in that case.
47 struct BitsTraits<Unaligned<T>, typename std::enable_if<
48 (std::is_integral<T>::value && std::is_unsigned<T>::value)>::type> {
49 typedef T UnderlyingType;
50 static T load(const Unaligned<T>& x) { return x.value; }
51 static void store(Unaligned<T>& x, T v) { x.value = v; }
52 static T loadRMW(const Unaligned<T>& x) {
53 #pragma GCC diagnostic push
54 #pragma GCC diagnostic ignored "-Wuninitialized"
55 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
57 #pragma GCC diagnostic pop
61 // Partial specialization for T, where T is unsigned integral
63 struct BitsTraits<T, typename std::enable_if<
64 (std::is_integral<T>::value && std::is_unsigned<T>::value)>::type> {
65 typedef T UnderlyingType;
66 static T load(const T& x) { return x; }
67 static void store(T& x, T v) { x = v; }
68 static T loadRMW(const T& x) {
69 #pragma GCC diagnostic push
70 #pragma GCC diagnostic ignored "-Wuninitialized"
71 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
73 #pragma GCC diagnostic pop
79 * Wrapper class with static methods for various bit-level operations,
80 * treating an array of T as an array of bits (in little-endian order).
81 * (T is either an unsigned integral type or Unaligned<X>, where X is
82 * an unsigned integral type)
84 template <class T, class Traits=detail::BitsTraits<T>>
86 typedef typename Traits::UnderlyingType UnderlyingType;
88 static_assert(sizeof(T) == sizeof(UnderlyingType), "Size mismatch");
91 * Number of bits in a block.
93 static constexpr size_t bitsPerBlock =
94 std::numeric_limits<UnderlyingType>::digits;
97 * Byte index of the given bit.
99 static constexpr size_t blockIndex(size_t bit) {
100 return bit / bitsPerBlock;
104 * Offset in block of the given bit.
106 static constexpr size_t bitOffset(size_t bit) {
107 return bit % bitsPerBlock;
111 * Number of blocks used by the given number of bits.
113 static constexpr size_t blockCount(size_t nbits) {
114 return nbits / bitsPerBlock + (nbits % bitsPerBlock != 0);
120 static void set(T* p, size_t bit);
123 * Clear the given bit.
125 static void clear(T* p, size_t bit);
128 * Test the given bit.
130 static bool test(const T* p, size_t bit);
133 * Set count contiguous bits starting at bitStart to the values
134 * from the least significant count bits of value; little endian.
135 * (value & 1 becomes the bit at bitStart, etc)
136 * Precondition: count <= sizeof(T) * 8
138 static void set(T* p, size_t bitStart, size_t count, UnderlyingType value);
141 * Get count contiguous bits starting at bitStart.
142 * Precondition: count <= sizeof(T) * 8
144 static UnderlyingType get(const T* p, size_t bitStart, size_t count);
147 * Count the number of bits set in a range of blocks.
149 static size_t count(const T* begin, const T* end);
152 // Same as set, assumes all bits are in the same block.
153 // (bitStart < sizeof(T) * 8, bitStart + count <= sizeof(T) * 8)
154 static void innerSet(T* p, size_t bitStart, size_t count,
155 UnderlyingType value);
157 // Same as get, assumes all bits are in the same block.
158 // (bitStart < sizeof(T) * 8, bitStart + count <= sizeof(T) * 8)
159 static UnderlyingType innerGet(const T* p, size_t bitStart, size_t count);
161 static constexpr UnderlyingType zero = UnderlyingType(0);
162 static constexpr UnderlyingType one = UnderlyingType(1);
164 static constexpr UnderlyingType ones(size_t count) {
165 return count < bitsPerBlock ? (one << count) - 1 : ~zero;
169 // gcc 4.8 needs more -Wmaybe-uninitialized tickling, as it propagates the
170 // taint upstream from loadRMW
172 #pragma GCC diagnostic push
173 #pragma GCC diagnostic ignored "-Wuninitialized"
174 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
176 template <class T, class Traits>
177 inline void Bits<T, Traits>::set(T* p, size_t bit) {
178 T& block = p[blockIndex(bit)];
179 Traits::store(block, Traits::loadRMW(block) | (one << bitOffset(bit)));
182 template <class T, class Traits>
183 inline void Bits<T, Traits>::clear(T* p, size_t bit) {
184 T& block = p[blockIndex(bit)];
185 Traits::store(block, Traits::loadRMW(block) & ~(one << bitOffset(bit)));
188 template <class T, class Traits>
189 inline void Bits<T, Traits>::set(T* p, size_t bitStart, size_t count,
190 UnderlyingType value) {
191 assert(count <= sizeof(UnderlyingType) * 8);
192 size_t idx = blockIndex(bitStart);
193 size_t offset = bitOffset(bitStart);
194 if (offset + count <= bitsPerBlock) {
195 innerSet(p + idx, offset, count, value);
197 size_t countInThisBlock = bitsPerBlock - offset;
198 size_t countInNextBlock = count - countInThisBlock;
199 innerSet(p + idx, offset, countInThisBlock,
200 value & ((one << countInThisBlock) - 1));
201 innerSet(p + idx + 1, 0, countInNextBlock, value >> countInThisBlock);
205 template <class T, class Traits>
206 inline void Bits<T, Traits>::innerSet(T* p, size_t offset, size_t count,
207 UnderlyingType value) {
208 // Mask out bits and set new value
209 UnderlyingType v = Traits::loadRMW(*p);
210 v &= ~(ones(count) << offset);
211 v |= (value << offset);
212 Traits::store(*p, v);
215 #pragma GCC diagnostic pop
217 template <class T, class Traits>
218 inline bool Bits<T, Traits>::test(const T* p, size_t bit) {
219 return Traits::load(p[blockIndex(bit)]) & (one << bitOffset(bit));
222 template <class T, class Traits>
223 inline auto Bits<T, Traits>::get(const T* p, size_t bitStart, size_t count)
225 assert(count <= sizeof(UnderlyingType) * 8);
226 size_t idx = blockIndex(bitStart);
227 size_t offset = bitOffset(bitStart);
228 if (offset + count <= bitsPerBlock) {
229 return innerGet(p + idx, offset, count);
231 size_t countInThisBlock = bitsPerBlock - offset;
232 size_t countInNextBlock = count - countInThisBlock;
233 UnderlyingType thisBlockValue = innerGet(p + idx, offset, countInThisBlock);
234 UnderlyingType nextBlockValue = innerGet(p + idx + 1, 0, countInNextBlock);
235 return (nextBlockValue << countInThisBlock) | thisBlockValue;
239 template <class T, class Traits>
240 inline auto Bits<T, Traits>::innerGet(const T* p, size_t offset, size_t count)
242 return (Traits::load(*p) >> offset) & ones(count);
245 template <class T, class Traits>
246 inline size_t Bits<T, Traits>::count(const T* begin, const T* end) {
248 for (; begin != end; ++begin) {
249 n += popcount(Traits::load(*begin));
256 #endif /* FOLLY_EXPERIMENTAL_BITS_H_ */