2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_EXPERIMENTAL_BITS_H_
18 #define FOLLY_EXPERIMENTAL_BITS_H_
21 #include <type_traits>
24 #include "folly/Bits.h"
25 #include "folly/Portability.h"
26 #include "folly/Range.h"
30 // As a general rule, bit operations work on unsigned values only;
31 // right-shift is arithmetic for signed values, and that can lead to
37 * Helper class to make Bits<T> (below) work with both aligned values
38 * (T, where T is an unsigned integral type) or unaligned values
39 * (Unaligned<T>, where T is an unsigned integral type)
41 template <class T, class Enable=void> struct BitsTraits;
43 // Partial specialization for Unaligned<T>, where T is unsigned integral
44 // loadRMW is the same as load, but it indicates that it loads for a
45 // read-modify-write operation (we write back the bits we won't change);
46 // silence the GCC warning in that case.
48 struct BitsTraits<Unaligned<T>, typename std::enable_if<
49 (std::is_integral<T>::value && std::is_unsigned<T>::value)>::type> {
50 typedef T UnderlyingType;
51 static T load(const Unaligned<T>& x) { return x.value; }
52 static void store(Unaligned<T>& x, T v) { x.value = v; }
53 static T loadRMW(const Unaligned<T>& x) {
54 #pragma GCC diagnostic push
55 #pragma GCC diagnostic ignored "-Wuninitialized"
56 // make sure we compile without warning on gcc 4.6 with -Wpragmas
57 #if __GNUC_PREREQ(4, 7)
58 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
61 #pragma GCC diagnostic pop
65 // Partial specialization for T, where T is unsigned integral
67 struct BitsTraits<T, typename std::enable_if<
68 (std::is_integral<T>::value && std::is_unsigned<T>::value)>::type> {
69 typedef T UnderlyingType;
70 static T load(const T& x) { return x; }
71 static void store(T& x, T v) { x = v; }
72 static T loadRMW(const T& x) {
73 #pragma GCC diagnostic push
74 #pragma GCC diagnostic ignored "-Wuninitialized"
75 #if __GNUC_PREREQ(4, 7)
76 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
79 #pragma GCC diagnostic pop
85 * Wrapper class with static methods for various bit-level operations,
86 * treating an array of T as an array of bits (in little-endian order).
87 * (T is either an unsigned integral type or Unaligned<X>, where X is
88 * an unsigned integral type)
90 template <class T, class Traits=detail::BitsTraits<T>>
92 typedef typename Traits::UnderlyingType UnderlyingType;
94 static_assert(sizeof(T) == sizeof(UnderlyingType), "Size mismatch");
97 * Number of bits in a block.
99 static constexpr size_t bitsPerBlock =
100 std::numeric_limits<UnderlyingType>::digits;
103 * Byte index of the given bit.
105 static constexpr size_t blockIndex(size_t bit) {
106 return bit / bitsPerBlock;
110 * Offset in block of the given bit.
112 static constexpr size_t bitOffset(size_t bit) {
113 return bit % bitsPerBlock;
117 * Number of blocks used by the given number of bits.
119 static constexpr size_t blockCount(size_t nbits) {
120 return nbits / bitsPerBlock + (nbits % bitsPerBlock != 0);
126 static void set(T* p, size_t bit);
129 * Clear the given bit.
131 static void clear(T* p, size_t bit);
134 * Test the given bit.
136 static bool test(const T* p, size_t bit);
139 * Set count contiguous bits starting at bitStart to the values
140 * from the least significant count bits of value; little endian.
141 * (value & 1 becomes the bit at bitStart, etc)
142 * Precondition: count <= sizeof(T) * 8
144 static void set(T* p, size_t bitStart, size_t count, UnderlyingType value);
147 * Get count contiguous bits starting at bitStart.
148 * Precondition: count <= sizeof(T) * 8
150 static UnderlyingType get(const T* p, size_t bitStart, size_t count);
153 * Count the number of bits set in a range of blocks.
155 static size_t count(const T* begin, const T* end);
158 // Same as set, assumes all bits are in the same block.
159 // (bitStart < sizeof(T) * 8, bitStart + count <= sizeof(T) * 8)
160 static void innerSet(T* p, size_t bitStart, size_t count,
161 UnderlyingType value);
163 // Same as get, assumes all bits are in the same block.
164 // (bitStart < sizeof(T) * 8, bitStart + count <= sizeof(T) * 8)
165 static UnderlyingType innerGet(const T* p, size_t bitStart, size_t count);
167 static constexpr UnderlyingType zero = UnderlyingType(0);
168 static constexpr UnderlyingType one = UnderlyingType(1);
170 static constexpr UnderlyingType ones(size_t count) {
171 return count < bitsPerBlock ? (one << count) - 1 : ~zero;
175 // gcc 4.8 needs more -Wmaybe-uninitialized tickling, as it propagates the
176 // taint upstream from loadRMW
178 #pragma GCC diagnostic push
179 #pragma GCC diagnostic ignored "-Wuninitialized"
180 #if __GNUC_PREREQ(4, 7)
181 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
184 template <class T, class Traits>
185 inline void Bits<T, Traits>::set(T* p, size_t bit) {
186 T& block = p[blockIndex(bit)];
187 Traits::store(block, Traits::loadRMW(block) | (one << bitOffset(bit)));
190 template <class T, class Traits>
191 inline void Bits<T, Traits>::clear(T* p, size_t bit) {
192 T& block = p[blockIndex(bit)];
193 Traits::store(block, Traits::loadRMW(block) & ~(one << bitOffset(bit)));
196 template <class T, class Traits>
197 inline void Bits<T, Traits>::set(T* p, size_t bitStart, size_t count,
198 UnderlyingType value) {
199 assert(count <= sizeof(UnderlyingType) * 8);
200 size_t idx = blockIndex(bitStart);
201 size_t offset = bitOffset(bitStart);
202 if (offset + count <= bitsPerBlock) {
203 innerSet(p + idx, offset, count, value);
205 size_t countInThisBlock = bitsPerBlock - offset;
206 size_t countInNextBlock = count - countInThisBlock;
207 innerSet(p + idx, offset, countInThisBlock,
208 value & ((one << countInThisBlock) - 1));
209 innerSet(p + idx + 1, 0, countInNextBlock, value >> countInThisBlock);
213 template <class T, class Traits>
214 inline void Bits<T, Traits>::innerSet(T* p, size_t offset, size_t count,
215 UnderlyingType value) {
216 // Mask out bits and set new value
217 UnderlyingType v = Traits::loadRMW(*p);
218 v &= ~(ones(count) << offset);
219 v |= (value << offset);
220 Traits::store(*p, v);
223 #pragma GCC diagnostic pop
225 template <class T, class Traits>
226 inline bool Bits<T, Traits>::test(const T* p, size_t bit) {
227 return Traits::load(p[blockIndex(bit)]) & (one << bitOffset(bit));
230 template <class T, class Traits>
231 inline auto Bits<T, Traits>::get(const T* p, size_t bitStart, size_t count)
233 assert(count <= sizeof(UnderlyingType) * 8);
234 size_t idx = blockIndex(bitStart);
235 size_t offset = bitOffset(bitStart);
236 if (offset + count <= bitsPerBlock) {
237 return innerGet(p + idx, offset, count);
239 size_t countInThisBlock = bitsPerBlock - offset;
240 size_t countInNextBlock = count - countInThisBlock;
241 UnderlyingType thisBlockValue = innerGet(p + idx, offset, countInThisBlock);
242 UnderlyingType nextBlockValue = innerGet(p + idx + 1, 0, countInNextBlock);
243 return (nextBlockValue << countInThisBlock) | thisBlockValue;
247 template <class T, class Traits>
248 inline auto Bits<T, Traits>::innerGet(const T* p, size_t offset, size_t count)
250 return (Traits::load(*p) >> offset) & ones(count);
253 template <class T, class Traits>
254 inline size_t Bits<T, Traits>::count(const T* begin, const T* end) {
256 for (; begin != end; ++begin) {
257 n += popcount(Traits::load(*begin));
264 #endif /* FOLLY_EXPERIMENTAL_BITS_H_ */