2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_EXPERIMENTAL_BITS_H_
18 #define FOLLY_EXPERIMENTAL_BITS_H_
21 #include <type_traits>
24 #include "folly/Bits.h"
25 #include "folly/Range.h"
29 // As a general rule, bit operations work on unsigned values only;
30 // right-shift is arithmetic for signed values, and that can lead to
36 * Helper class to make Bits<T> (below) work with both aligned values
37 * (T, where T is an unsigned integral type) or unaligned values
38 * (Unaligned<T>, where T is an unsigned integral type)
40 template <class T, class Enable=void> struct BitsTraits;
42 // Partial specialization for Unaligned<T>, where T is unsigned integral
43 // loadRMW is the same as load, but it indicates that it loads for a
44 // read-modify-write operation (we write back the bits we won't change);
45 // silence the GCC warning in that case.
47 struct BitsTraits<Unaligned<T>, typename std::enable_if<
48 (std::is_integral<T>::value && std::is_unsigned<T>::value)>::type> {
49 typedef T UnderlyingType;
50 static T load(const Unaligned<T>& x) { return x.value; }
51 static void store(Unaligned<T>& x, T v) { x.value = v; }
52 static T loadRMW(const Unaligned<T>& x) {
53 #pragma GCC diagnostic push
54 #pragma GCC diagnostic ignored "-Wuninitialized"
56 #pragma GCC diagnostic pop
60 // Partial specialization for T, where T is unsigned integral
62 struct BitsTraits<T, typename std::enable_if<
63 (std::is_integral<T>::value && std::is_unsigned<T>::value)>::type> {
64 typedef T UnderlyingType;
65 static T load(const T& x) { return x; }
66 static void store(T& x, T v) { x = v; }
67 static T loadRMW(const T& x) {
68 #pragma GCC diagnostic push
69 #pragma GCC diagnostic ignored "-Wuninitialized"
71 #pragma GCC diagnostic pop
77 * Wrapper class with static methods for various bit-level operations,
78 * treating an array of T as an array of bits (in little-endian order).
79 * (T is either an unsigned integral type or Unaligned<X>, where X is
80 * an unsigned integral type)
82 template <class T, class Traits=detail::BitsTraits<T>>
84 typedef typename Traits::UnderlyingType UnderlyingType;
86 static_assert(sizeof(T) == sizeof(UnderlyingType), "Size mismatch");
89 * Number of bits in a block.
91 static constexpr size_t bitsPerBlock =
92 std::numeric_limits<UnderlyingType>::digits;
95 * Byte index of the given bit.
97 static constexpr size_t blockIndex(size_t bit) {
98 return bit / bitsPerBlock;
102 * Offset in block of the given bit.
104 static constexpr size_t bitOffset(size_t bit) {
105 return bit % bitsPerBlock;
109 * Number of blocks used by the given number of bits.
111 static constexpr size_t blockCount(size_t nbits) {
112 return nbits / bitsPerBlock + (nbits % bitsPerBlock != 0);
118 static void set(T* p, size_t bit);
121 * Clear the given bit.
123 static void clear(T* p, size_t bit);
126 * Test the given bit.
128 static bool test(const T* p, size_t bit);
131 * Set count contiguous bits starting at bitStart to the values
132 * from the least significant count bits of value; little endian.
133 * (value & 1 becomes the bit at bitStart, etc)
134 * Precondition: count <= sizeof(T) * 8
136 static void set(T* p, size_t bitStart, size_t count, UnderlyingType value);
139 * Get count contiguous bits starting at bitStart.
140 * Precondition: count <= sizeof(T) * 8
142 static UnderlyingType get(const T* p, size_t bitStart, size_t count);
145 * Count the number of bits set in a range of blocks.
147 static size_t count(const T* begin, const T* end);
150 // Same as set, assumes all bits are in the same block.
151 // (bitStart < sizeof(T) * 8, bitStart + count <= sizeof(T) * 8)
152 static void innerSet(T* p, size_t bitStart, size_t count,
153 UnderlyingType value);
155 // Same as get, assumes all bits are in the same block.
156 // (bitStart < sizeof(T) * 8, bitStart + count <= sizeof(T) * 8)
157 static UnderlyingType innerGet(const T* p, size_t bitStart, size_t count);
159 static constexpr UnderlyingType zero = UnderlyingType(0);
160 static constexpr UnderlyingType one = UnderlyingType(1);
162 static constexpr UnderlyingType ones(size_t count) {
163 return count < bitsPerBlock ? (one << count) - 1 : ~zero;
167 template <class T, class Traits>
168 inline void Bits<T, Traits>::set(T* p, size_t bit) {
169 T& block = p[blockIndex(bit)];
170 Traits::store(block, Traits::loadRMW(block) | (one << bitOffset(bit)));
173 template <class T, class Traits>
174 inline void Bits<T, Traits>::clear(T* p, size_t bit) {
175 T& block = p[blockIndex(bit)];
176 Traits::store(block, Traits::loadRMW(block) & ~(one << bitOffset(bit)));
179 template <class T, class Traits>
180 inline bool Bits<T, Traits>::test(const T* p, size_t bit) {
181 return Traits::load(p[blockIndex(bit)]) & (one << bitOffset(bit));
184 template <class T, class Traits>
185 inline void Bits<T, Traits>::set(T* p, size_t bitStart, size_t count,
186 UnderlyingType value) {
187 assert(count <= sizeof(UnderlyingType) * 8);
188 size_t idx = blockIndex(bitStart);
189 size_t offset = bitOffset(bitStart);
190 if (offset + count <= bitsPerBlock) {
191 innerSet(p + idx, offset, count, value);
193 size_t countInThisBlock = bitsPerBlock - offset;
194 size_t countInNextBlock = count - countInThisBlock;
195 innerSet(p + idx, offset, countInThisBlock,
196 value & ((one << countInThisBlock) - 1));
197 innerSet(p + idx + 1, 0, countInNextBlock, value >> countInThisBlock);
201 template <class T, class Traits>
202 inline auto Bits<T, Traits>::get(const T* p, size_t bitStart, size_t count)
204 assert(count <= sizeof(UnderlyingType) * 8);
205 size_t idx = blockIndex(bitStart);
206 size_t offset = bitOffset(bitStart);
207 if (offset + count <= bitsPerBlock) {
208 return innerGet(p + idx, offset, count);
210 size_t countInThisBlock = bitsPerBlock - offset;
211 size_t countInNextBlock = count - countInThisBlock;
212 UnderlyingType thisBlockValue = innerGet(p + idx, offset, countInThisBlock);
213 UnderlyingType nextBlockValue = innerGet(p + idx + 1, 0, countInNextBlock);
214 return (nextBlockValue << countInThisBlock) | thisBlockValue;
218 template <class T, class Traits>
219 inline void Bits<T, Traits>::innerSet(T* p, size_t offset, size_t count,
220 UnderlyingType value) {
221 // Mask out bits and set new value
222 UnderlyingType v = Traits::loadRMW(*p);
223 v &= ~(ones(count) << offset);
224 v |= (value << offset);
225 Traits::store(*p, v);
228 template <class T, class Traits>
229 inline auto Bits<T, Traits>::innerGet(const T* p, size_t offset, size_t count)
231 return (Traits::load(*p) >> offset) & ones(count);
234 template <class T, class Traits>
235 inline size_t Bits<T, Traits>::count(const T* begin, const T* end) {
237 for (; begin != end; ++begin) {
238 n += popcount(Traits::load(*begin));
245 #endif /* FOLLY_EXPERIMENTAL_BITS_H_ */