2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * @author Philip Pronin (philipp@fb.com)
20 * Based on the paper by Sebastiano Vigna,
21 * "Quasi-succinct indices" (arxiv:1206.4300).
29 #include <type_traits>
31 #include <folly/Bits.h>
32 #include <folly/Likely.h>
33 #include <folly/Portability.h>
34 #include <folly/Range.h>
35 #include <folly/experimental/CodingDetail.h>
36 #include <folly/experimental/Instructions.h>
37 #include <folly/experimental/Select64.h>
38 #include <folly/lang/Assume.h>
39 #include <glog/logging.h>
42 #error EliasFanoCoding.h requires x86_64
45 namespace folly { namespace compression {
47 static_assert(kIsLittleEndian, "EliasFanoCoding.h requires little endianness");
49 constexpr size_t kCacheLineSize = 64;
51 template <class Pointer>
52 struct EliasFanoCompressedListBase {
53 EliasFanoCompressedListBase() = default;
55 template <class OtherPointer>
56 EliasFanoCompressedListBase(
57 const EliasFanoCompressedListBase<OtherPointer>& other)
59 numLowerBits(other.numLowerBits),
61 skipPointers(reinterpret_cast<Pointer>(other.skipPointers)),
62 forwardPointers(reinterpret_cast<Pointer>(other.forwardPointers)),
63 lower(reinterpret_cast<Pointer>(other.lower)),
64 upper(reinterpret_cast<Pointer>(other.upper)) { }
66 template <class T = Pointer>
67 auto free() -> decltype(::free(T(nullptr))) {
68 return ::free(data.data());
71 size_t upperSize() const {
72 return size_t(data.end() - upper);
76 uint8_t numLowerBits = 0;
78 // WARNING: EliasFanoCompressedList has no ownership of data. The 7
79 // bytes following the last byte should be readable.
80 folly::Range<Pointer> data;
82 Pointer skipPointers = nullptr;
83 Pointer forwardPointers = nullptr;
84 Pointer lower = nullptr;
85 Pointer upper = nullptr;
88 typedef EliasFanoCompressedListBase<const uint8_t*> EliasFanoCompressedList;
89 typedef EliasFanoCompressedListBase<uint8_t*> MutableEliasFanoCompressedList;
93 class SkipValue = size_t,
94 size_t kSkipQuantum = 0, // 0 = disabled
95 size_t kForwardQuantum = 0> // 0 = disabled
96 struct EliasFanoEncoderV2 {
97 static_assert(std::is_integral<Value>::value &&
98 std::is_unsigned<Value>::value,
99 "Value should be unsigned integral");
101 typedef EliasFanoCompressedList CompressedList;
102 typedef MutableEliasFanoCompressedList MutableCompressedList;
104 typedef Value ValueType;
105 typedef SkipValue SkipValueType;
108 static constexpr size_t skipQuantum = kSkipQuantum;
109 static constexpr size_t forwardQuantum = kForwardQuantum;
111 static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
112 if (UNLIKELY(size == 0 || upperBound < size)) {
115 // Result that should be returned is "floor(log(upperBound / size))".
116 // In order to avoid expensive division, we rely on
117 // "floor(a) - floor(b) - 1 <= floor(a - b) <= floor(a) - floor(b)".
118 // Assuming "candidate = floor(log(upperBound)) - floor(log(upperBound))",
119 // then result is either "candidate - 1" or "candidate".
120 auto candidate = folly::findLastSet(upperBound) - folly::findLastSet(size);
121 // NOTE: As size != 0, "candidate" is always < 64.
122 return (size > (upperBound >> candidate)) ? candidate - 1 : candidate;
125 // Requires: input range (begin, end) is sorted (encoding
126 // crashes if it's not).
127 // WARNING: encode() mallocates EliasFanoCompressedList::data. As
128 // EliasFanoCompressedList has no ownership of it, you need to call
129 // free() explicitly.
130 template <class RandomAccessIterator>
131 static MutableCompressedList encode(RandomAccessIterator begin,
132 RandomAccessIterator end) {
134 return MutableCompressedList();
136 EliasFanoEncoderV2 encoder(size_t(end - begin), *(end - 1));
137 for (; begin != end; ++begin) {
140 return encoder.finish();
143 explicit EliasFanoEncoderV2(const MutableCompressedList& result)
144 : lower_(result.lower),
145 upper_(result.upper),
146 skipPointers_(reinterpret_cast<SkipValueType*>(
147 result.skipPointers)),
148 forwardPointers_(reinterpret_cast<SkipValueType*>(
149 result.forwardPointers)),
151 std::fill(result.data.begin(), result.data.end(), '\0');
154 EliasFanoEncoderV2(size_t size, ValueType upperBound)
155 : EliasFanoEncoderV2(
156 Layout::fromUpperBoundAndSize(upperBound, size).allocList()) { }
158 void add(ValueType value) {
159 CHECK_LT(value, std::numeric_limits<ValueType>::max());
160 CHECK_GE(value, lastValue_);
162 const auto numLowerBits = result_.numLowerBits;
163 const ValueType upperBits = value >> numLowerBits;
165 // Upper sequence consists of upperBits 0-bits and (size_ + 1) 1-bits.
166 const size_t pos = upperBits + size_;
167 upper_[pos / 8] |= 1U << (pos % 8);
168 // Append numLowerBits bits to lower sequence.
169 if (numLowerBits != 0) {
170 const ValueType lowerBits = value & ((ValueType(1) << numLowerBits) - 1);
171 writeBits56(lower_, size_ * numLowerBits, numLowerBits, lowerBits);
174 /* static */ if (skipQuantum != 0) {
175 while ((skipPointersSize_ + 1) * skipQuantum <= upperBits) {
176 // Store the number of preceding 1-bits.
177 skipPointers_[skipPointersSize_++] = SkipValue(size_);
181 /* static */ if (forwardQuantum != 0) {
182 if ((size_ + 1) % forwardQuantum == 0) {
183 const auto k = size_ / forwardQuantum;
184 // Store the number of preceding 0-bits.
185 forwardPointers_[k] = upperBits;
193 const MutableCompressedList& finish() const {
194 CHECK_EQ(size_, result_.size);
199 // Writes value (with len up to 56 bits) to data starting at pos-th bit.
200 static void writeBits56(unsigned char* data, size_t pos,
201 uint8_t len, uint64_t value) {
202 DCHECK_LE(uint32_t(len), 56);
203 DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
204 unsigned char* const ptr = data + (pos / 8);
205 uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
206 ptrv |= value << (pos % 8);
207 folly::storeUnaligned<uint64_t>(ptr, ptrv);
210 unsigned char* lower_ = nullptr;
211 unsigned char* upper_ = nullptr;
212 SkipValueType* skipPointers_ = nullptr;
213 SkipValueType* forwardPointers_ = nullptr;
215 ValueType lastValue_ = 0;
217 size_t skipPointersSize_ = 0;
219 MutableCompressedList result_;
226 size_t kForwardQuantum>
227 struct EliasFanoEncoderV2<Value,
230 kForwardQuantum>::Layout {
231 static Layout fromUpperBoundAndSize(size_t upperBound, size_t size) {
232 // numLowerBits can be at most 56 because of detail::writeBits56.
233 const uint8_t numLowerBits = std::min(defaultNumLowerBits(upperBound,
237 // Upper bits are stored using unary delta encoding.
238 // For example, (3 5 5 9) will be encoded as 1000011001000_2.
239 const size_t upperSizeBits =
240 (upperBound >> numLowerBits) + // Number of 0-bits to be stored.
242 const size_t upper = (upperSizeBits + 7) / 8;
244 // *** Validity checks.
245 // Shift by numLowerBits must be valid.
246 CHECK_LT(numLowerBits, 8 * sizeof(Value));
247 CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
248 CHECK_LT(upperBound >> numLowerBits,
249 std::numeric_limits<SkipValueType>::max());
251 return fromInternalSizes(numLowerBits, upper, size);
254 static Layout fromInternalSizes(uint8_t numLowerBits,
259 layout.numLowerBits = numLowerBits;
261 layout.lower = (numLowerBits * size + 7) / 8;
262 layout.upper = upper;
264 // *** Skip pointers.
265 // Store (1-indexed) position of every skipQuantum-th
266 // 0-bit in upper bits sequence.
267 /* static */ if (skipQuantum != 0) {
268 // 8 * upper is used here instead of upperSizeBits, as that is
269 // more serialization-friendly way (upperSizeBits doesn't need
270 // to be known by this function, unlike upper).
272 size_t numSkipPointers = (8 * upper - size) / skipQuantum;
273 layout.skipPointers = numSkipPointers * sizeof(SkipValueType);
276 // *** Forward pointers.
277 // Store (1-indexed) position of every forwardQuantum-th
278 // 1-bit in upper bits sequence.
279 /* static */ if (forwardQuantum != 0) {
280 size_t numForwardPointers = size / forwardQuantum;
281 layout.forwardPointers = numForwardPointers * sizeof(SkipValueType);
287 size_t bytes() const {
288 return lower + upper + skipPointers + forwardPointers;
291 template <class Range>
292 EliasFanoCompressedListBase<typename Range::iterator>
293 openList(Range& buf) const {
294 EliasFanoCompressedListBase<typename Range::iterator> result;
296 result.numLowerBits = numLowerBits;
297 result.data = buf.subpiece(0, bytes());
299 auto advance = [&] (size_t n) {
300 auto begin = buf.data();
305 result.skipPointers = advance(skipPointers);
306 result.forwardPointers = advance(forwardPointers);
307 result.lower = advance(lower);
308 result.upper = advance(upper);
313 MutableCompressedList allocList() const {
314 uint8_t* buf = nullptr;
315 // WARNING: Current read/write logic assumes that the 7 bytes
316 // following the last byte of lower and upper sequences are
317 // readable (stored value doesn't matter and won't be changed), so
318 // we allocate additional 7 bytes, but do not include them in size
319 // of returned value.
321 buf = static_cast<uint8_t*>(malloc(bytes() + 7));
323 folly::MutableByteRange bufRange(buf, bytes());
324 return openList(bufRange);
328 uint8_t numLowerBits = 0;
333 size_t skipPointers = 0;
334 size_t forwardPointers = 0;
339 template <class Encoder, class Instructions, class SizeType>
340 class UpperBitsReader : ForwardPointers<Encoder::forwardQuantum>,
341 SkipPointers<Encoder::skipQuantum> {
342 typedef typename Encoder::SkipValueType SkipValueType;
344 typedef typename Encoder::ValueType ValueType;
346 explicit UpperBitsReader(const typename Encoder::CompressedList& list)
347 : ForwardPointers<Encoder::forwardQuantum>(list.forwardPointers),
348 SkipPointers<Encoder::skipQuantum>(list.skipPointers),
354 block_ = start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0;
355 position_ = std::numeric_limits<SizeType>::max();
360 SizeType position() const {
363 ValueType value() const {
368 // Skip to the first non-zero block.
369 while (block_ == 0) {
370 outer_ += sizeof(block_t);
371 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
375 size_t inner = Instructions::ctz(block_);
376 block_ = Instructions::blsr(block_);
378 return setValue(inner);
381 ValueType skip(SizeType n) {
384 position_ += n; // n 1-bits will be read.
386 // Use forward pointer.
387 if (Encoder::forwardQuantum > 0 && n > Encoder::forwardQuantum) {
388 const size_t steps = position_ / Encoder::forwardQuantum;
389 const size_t dest = folly::loadUnaligned<SkipValueType>(
390 this->forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
392 reposition(dest + steps * Encoder::forwardQuantum);
393 n = position_ + 1 - steps * Encoder::forwardQuantum; // n is > 0.
397 // Find necessary block.
398 while ((cnt = Instructions::popcount(block_)) < n) {
400 outer_ += sizeof(block_t);
401 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
404 // Skip to the n-th one in the block.
406 size_t inner = select64<Instructions>(block_, n - 1);
407 block_ &= (block_t(-1) << inner) << 1;
409 return setValue(inner);
412 // Skip to the first element that is >= v and located *after* the current
413 // one (so even if current value equals v, position will be increased by 1).
414 ValueType skipToNext(ValueType v) {
415 DCHECK_GE(v, value_);
418 if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
419 const size_t steps = v / Encoder::skipQuantum;
420 const size_t dest = folly::loadUnaligned<SkipValueType>(
421 this->skipPointers_ + (steps - 1) * sizeof(SkipValueType));
423 reposition(dest + Encoder::skipQuantum * steps);
424 position_ = dest - 1;
426 // Correct value_ will be set during the next() call at the end.
428 // NOTE: Corresponding block of lower bits sequence may be
429 // prefetched here (via __builtin_prefetch), but experiments
430 // didn't show any significant improvements.
435 size_t skip = v - (8 * outer_ - position_ - 1);
437 constexpr size_t kBitsPerBlock = 8 * sizeof(block_t);
438 while ((cnt = Instructions::popcount(~block_)) < skip) {
440 position_ += kBitsPerBlock - cnt;
441 outer_ += sizeof(block_t);
442 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
446 auto inner = select64<Instructions>(~block_, skip - 1);
447 position_ += inner - skip + 1;
448 block_ &= block_t(-1) << inner;
456 * Prepare to skip to `value`. This is a constant-time operation that will
457 * prefetch memory required for a `skipTo(value)` call.
459 * @return position of reader
461 SizeType prepareSkipTo(ValueType v) const {
462 auto position = position_;
464 if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
466 const size_t steps = v / Encoder::skipQuantum;
467 const size_t dest = folly::loadUnaligned<SkipValueType>(
468 this->skipPointers_ + (steps - 1) * sizeof(SkipValueType));
471 outer = (dest + Encoder::skipQuantum * steps) / 8;
473 // Prefetch up to the beginning of where we linear search. After that,
474 // hardware prefetching will outperform our own. In addition, this
475 // simplifies calculating what to prefetch as we don't have to calculate
476 // the entire destination address. Two cache lines are prefetched because
477 // this results in fewer cycles used (based on practical results) than
478 // one. However, three cache lines does not have any additional effect.
479 const auto addr = start_ + outer;
480 __builtin_prefetch(addr);
481 __builtin_prefetch(addr + kCacheLineSize);
487 ValueType jump(size_t n) {
488 if (Encoder::forwardQuantum == 0 || n <= Encoder::forwardQuantum) {
491 // Avoid reading the head, skip() will reposition.
492 position_ = std::numeric_limits<SizeType>::max();
497 ValueType jumpToNext(ValueType v) {
498 if (Encoder::skipQuantum == 0 || v < Encoder::skipQuantum) {
501 value_ = 0; // Avoid reading the head, skipToNext() will reposition.
503 return skipToNext(v);
506 ValueType previousValue() const {
507 DCHECK_NE(position(), std::numeric_limits<SizeType>::max());
508 DCHECK_GT(position(), 0);
511 auto inner = size_t(value_) - 8 * outer_ + position_;
512 block_t block = folly::loadUnaligned<block_t>(start_ + outer);
513 block &= (block_t(1) << inner) - 1;
515 while (UNLIKELY(block == 0)) {
517 outer -= std::min<OuterType>(sizeof(block_t), outer);
518 block = folly::loadUnaligned<block_t>(start_ + outer);
521 inner = 8 * sizeof(block_t) - 1 - Instructions::clz(block);
522 return static_cast<ValueType>(8 * outer + inner - (position_ - 1));
525 void setDone(SizeType endPos) {
530 ValueType setValue(size_t inner) {
531 value_ = static_cast<ValueType>(8 * outer_ + inner - position_);
535 void reposition(SizeType dest) {
537 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
538 block_ &= ~((block_t(1) << (dest % 8)) - 1);
541 using block_t = uint64_t;
542 // The size in bytes of the upper bits is limited by n + universe / 8,
543 // so a type that can hold either sizes or values is sufficient.
544 using OuterType = typename std::common_type<ValueType, SizeType>::type;
546 const unsigned char* const start_;
548 SizeType position_; // Index of current value (= #reads - 1).
549 OuterType outer_; // Outer offset: number of consumed bytes in upper.
553 } // namespace detail
555 // If kUnchecked = true the caller must guarantee that all the
556 // operations return valid elements, i.e., they would never return
560 class Instructions = instructions::Default,
561 bool kUnchecked = false,
562 class SizeType = size_t>
563 class EliasFanoReader {
565 typedef Encoder EncoderType;
566 typedef typename Encoder::ValueType ValueType;
568 explicit EliasFanoReader(const typename Encoder::CompressedList& list)
572 numLowerBits_(list.numLowerBits) {
573 DCHECK(Instructions::supported());
574 // To avoid extra branching during skipTo() while reading
575 // upper sequence we need to know the last element.
576 // If kUnchecked == true, we do not check that skipTo() is called
577 // within the bounds, so we can avoid initializing lastValue_.
578 if (kUnchecked || UNLIKELY(list.size == 0)) {
582 ValueType lastUpperValue = ValueType(8 * list.upperSize() - size_);
583 auto it = list.upper + list.upperSize() - 1;
585 lastUpperValue -= 8 - folly::findLastSet(*it);
586 lastValue_ = readLowerPart(size_ - 1) | (lastUpperValue << numLowerBits_);
591 value_ = kInvalidValue;
595 if (!kUnchecked && UNLIKELY(position() + 1 >= size_)) {
599 value_ = readLowerPart(upper_.position()) |
600 (upper_.value() << numLowerBits_);
604 bool skip(SizeType n) {
607 if (kUnchecked || LIKELY(position() + n < size_)) {
608 if (LIKELY(n < kLinearScanThreshold)) {
609 for (SizeType i = 0; i < n; ++i) {
615 value_ = readLowerPart(upper_.position()) |
616 (upper_.value() << numLowerBits_);
623 bool skipTo(ValueType value) {
624 // Also works when value_ == kInvalidValue.
625 if (value != kInvalidValue) { DCHECK_GE(value + 1, value_ + 1); }
627 if (!kUnchecked && value > lastValue_) {
629 } else if (value == value_) {
633 ValueType upperValue = (value >> numLowerBits_);
634 ValueType upperSkip = upperValue - upper_.value();
635 // The average density of ones in upper bits is 1/2.
636 // LIKELY here seems to make things worse, even for small skips.
637 if (upperSkip < 2 * kLinearScanThreshold) {
640 } while (UNLIKELY(upper_.value() < upperValue));
642 upper_.skipToNext(upperValue);
650 * Prepare to skip to `value` by prefetching appropriate memory in both the
651 * upper and lower bits.
653 void prepareSkipTo(ValueType value) const {
654 // Also works when value_ == kInvalidValue.
655 if (value != kInvalidValue) {
656 DCHECK_GE(value + 1, value_ + 1);
659 if ((!kUnchecked && value > lastValue_) || (value == value_)) {
663 // Do minimal computation required to prefetch address used in
664 // `readLowerPart()`.
665 ValueType upperValue = (value >> numLowerBits_);
666 const auto upperPosition = upper_.prepareSkipTo(upperValue);
667 const auto addr = lower_ + (upperPosition * numLowerBits_ / 8);
668 __builtin_prefetch(addr);
669 __builtin_prefetch(addr + kCacheLineSize);
672 bool jump(SizeType n) {
673 if (LIKELY(n < size_)) { // Also checks that n != -1.
674 value_ = readLowerPart(n) | (upper_.jump(n + 1) << numLowerBits_);
680 bool jumpTo(ValueType value) {
681 if (!kUnchecked && value > lastValue_) {
685 upper_.jumpToNext(value >> numLowerBits_);
690 ValueType previousValue() const {
691 DCHECK_GT(position(), 0);
692 DCHECK_LT(position(), size());
693 return readLowerPart(upper_.position() - 1) |
694 (upper_.previousValue() << numLowerBits_);
697 SizeType size() const {
702 return position() < size(); // Also checks that position() != -1.
705 SizeType position() const {
706 return upper_.position();
708 ValueType value() const {
714 // Must hold kInvalidValue + 1 == 0.
715 constexpr static ValueType kInvalidValue =
716 std::numeric_limits<ValueType>::max();
719 value_ = kInvalidValue;
720 upper_.setDone(size_);
724 ValueType readLowerPart(SizeType i) const {
726 const size_t pos = i * numLowerBits_;
727 const unsigned char* ptr = lower_ + (pos / 8);
728 const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
729 // This removes the branch in the fallback implementation of
730 // bzhi. The condition is verified at encoding time.
731 assume(numLowerBits_ < sizeof(ValueType) * 8);
732 return Instructions::bzhi(ptrv >> (pos % 8), numLowerBits_);
735 void iterateTo(ValueType value) {
737 value_ = readLowerPart(upper_.position()) |
738 (upper_.value() << numLowerBits_);
739 if (LIKELY(value_ >= value)) {
746 constexpr static size_t kLinearScanThreshold = 8;
748 detail::UpperBitsReader<Encoder, Instructions, SizeType> upper_;
749 const uint8_t* lower_;
751 ValueType value_ = kInvalidValue;
752 ValueType lastValue_;
753 uint8_t numLowerBits_;
756 } // namespace compression