2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * @author Philip Pronin (philipp@fb.com)
20 * Based on the paper by Sebastiano Vigna,
21 * "Quasi-succinct indices" (arxiv:1206.4300).
29 #include <type_traits>
31 #include <folly/Assume.h>
32 #include <folly/Bits.h>
33 #include <folly/Likely.h>
34 #include <folly/Portability.h>
35 #include <folly/Range.h>
36 #include <folly/experimental/CodingDetail.h>
37 #include <folly/experimental/Instructions.h>
38 #include <folly/experimental/Select64.h>
39 #include <glog/logging.h>
42 #error EliasFanoCoding.h requires x86_64
45 namespace folly { namespace compression {
47 static_assert(kIsLittleEndian, "EliasFanoCoding.h requires little endianness");
49 template <class Pointer>
50 struct EliasFanoCompressedListBase {
51 EliasFanoCompressedListBase() = default;
53 template <class OtherPointer>
54 EliasFanoCompressedListBase(
55 const EliasFanoCompressedListBase<OtherPointer>& other)
57 numLowerBits(other.numLowerBits),
59 skipPointers(reinterpret_cast<Pointer>(other.skipPointers)),
60 forwardPointers(reinterpret_cast<Pointer>(other.forwardPointers)),
61 lower(reinterpret_cast<Pointer>(other.lower)),
62 upper(reinterpret_cast<Pointer>(other.upper)) { }
64 template <class T = Pointer>
65 auto free() -> decltype(::free(T(nullptr))) {
66 return ::free(data.data());
69 size_t upperSize() const {
70 return size_t(data.end() - upper);
74 uint8_t numLowerBits = 0;
76 // WARNING: EliasFanoCompressedList has no ownership of data. The 7
77 // bytes following the last byte should be readable.
78 folly::Range<Pointer> data;
80 Pointer skipPointers = nullptr;
81 Pointer forwardPointers = nullptr;
82 Pointer lower = nullptr;
83 Pointer upper = nullptr;
86 typedef EliasFanoCompressedListBase<const uint8_t*> EliasFanoCompressedList;
87 typedef EliasFanoCompressedListBase<uint8_t*> MutableEliasFanoCompressedList;
91 class SkipValue = size_t,
92 size_t kSkipQuantum = 0, // 0 = disabled
93 size_t kForwardQuantum = 0> // 0 = disabled
94 struct EliasFanoEncoderV2 {
95 static_assert(std::is_integral<Value>::value &&
96 std::is_unsigned<Value>::value,
97 "Value should be unsigned integral");
99 typedef EliasFanoCompressedList CompressedList;
100 typedef MutableEliasFanoCompressedList MutableCompressedList;
102 typedef Value ValueType;
103 typedef SkipValue SkipValueType;
106 static constexpr size_t skipQuantum = kSkipQuantum;
107 static constexpr size_t forwardQuantum = kForwardQuantum;
109 static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
110 if (UNLIKELY(size == 0 || upperBound < size)) {
113 // Result that should be returned is "floor(log(upperBound / size))".
114 // In order to avoid expensive division, we rely on
115 // "floor(a) - floor(b) - 1 <= floor(a - b) <= floor(a) - floor(b)".
116 // Assuming "candidate = floor(log(upperBound)) - floor(log(upperBound))",
117 // then result is either "candidate - 1" or "candidate".
118 auto candidate = folly::findLastSet(upperBound) - folly::findLastSet(size);
119 // NOTE: As size != 0, "candidate" is always < 64.
120 return (size > (upperBound >> candidate)) ? candidate - 1 : candidate;
123 // Requires: input range (begin, end) is sorted (encoding
124 // crashes if it's not).
125 // WARNING: encode() mallocates EliasFanoCompressedList::data. As
126 // EliasFanoCompressedList has no ownership of it, you need to call
127 // free() explicitly.
128 template <class RandomAccessIterator>
129 static MutableCompressedList encode(RandomAccessIterator begin,
130 RandomAccessIterator end) {
132 return MutableCompressedList();
134 EliasFanoEncoderV2 encoder(size_t(end - begin), *(end - 1));
135 for (; begin != end; ++begin) {
138 return encoder.finish();
141 explicit EliasFanoEncoderV2(const MutableCompressedList& result)
142 : lower_(result.lower),
143 upper_(result.upper),
144 skipPointers_(reinterpret_cast<SkipValueType*>(
145 result.skipPointers)),
146 forwardPointers_(reinterpret_cast<SkipValueType*>(
147 result.forwardPointers)),
149 std::fill(result.data.begin(), result.data.end(), '\0');
152 EliasFanoEncoderV2(size_t size, ValueType upperBound)
153 : EliasFanoEncoderV2(
154 Layout::fromUpperBoundAndSize(upperBound, size).allocList()) { }
156 void add(ValueType value) {
157 CHECK_LT(value, std::numeric_limits<ValueType>::max());
158 CHECK_GE(value, lastValue_);
160 const auto numLowerBits = result_.numLowerBits;
161 const ValueType upperBits = value >> numLowerBits;
163 // Upper sequence consists of upperBits 0-bits and (size_ + 1) 1-bits.
164 const size_t pos = upperBits + size_;
165 upper_[pos / 8] |= 1U << (pos % 8);
166 // Append numLowerBits bits to lower sequence.
167 if (numLowerBits != 0) {
168 const ValueType lowerBits = value & ((ValueType(1) << numLowerBits) - 1);
169 writeBits56(lower_, size_ * numLowerBits, numLowerBits, lowerBits);
172 /* static */ if (skipQuantum != 0) {
173 while ((skipPointersSize_ + 1) * skipQuantum <= upperBits) {
174 // Store the number of preceding 1-bits.
175 skipPointers_[skipPointersSize_++] = SkipValue(size_);
179 /* static */ if (forwardQuantum != 0) {
180 if ((size_ + 1) % forwardQuantum == 0) {
181 const auto k = size_ / forwardQuantum;
182 // Store the number of preceding 0-bits.
183 forwardPointers_[k] = upperBits;
191 const MutableCompressedList& finish() const {
192 CHECK_EQ(size_, result_.size);
197 // Writes value (with len up to 56 bits) to data starting at pos-th bit.
198 static void writeBits56(unsigned char* data, size_t pos,
199 uint8_t len, uint64_t value) {
200 DCHECK_LE(uint32_t(len), 56);
201 DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
202 unsigned char* const ptr = data + (pos / 8);
203 uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
204 ptrv |= value << (pos % 8);
205 folly::storeUnaligned<uint64_t>(ptr, ptrv);
208 unsigned char* lower_ = nullptr;
209 unsigned char* upper_ = nullptr;
210 SkipValueType* skipPointers_ = nullptr;
211 SkipValueType* forwardPointers_ = nullptr;
213 ValueType lastValue_ = 0;
215 size_t skipPointersSize_ = 0;
217 MutableCompressedList result_;
224 size_t kForwardQuantum>
225 struct EliasFanoEncoderV2<Value,
228 kForwardQuantum>::Layout {
229 static Layout fromUpperBoundAndSize(size_t upperBound, size_t size) {
230 // numLowerBits can be at most 56 because of detail::writeBits56.
231 const uint8_t numLowerBits = std::min(defaultNumLowerBits(upperBound,
235 // Upper bits are stored using unary delta encoding.
236 // For example, (3 5 5 9) will be encoded as 1000011001000_2.
237 const size_t upperSizeBits =
238 (upperBound >> numLowerBits) + // Number of 0-bits to be stored.
240 const size_t upper = (upperSizeBits + 7) / 8;
242 // *** Validity checks.
243 // Shift by numLowerBits must be valid.
244 CHECK_LT(numLowerBits, 8 * sizeof(Value));
245 CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
246 CHECK_LT(upperBound >> numLowerBits,
247 std::numeric_limits<SkipValueType>::max());
249 return fromInternalSizes(numLowerBits, upper, size);
252 static Layout fromInternalSizes(uint8_t numLowerBits,
257 layout.numLowerBits = numLowerBits;
259 layout.lower = (numLowerBits * size + 7) / 8;
260 layout.upper = upper;
262 // *** Skip pointers.
263 // Store (1-indexed) position of every skipQuantum-th
264 // 0-bit in upper bits sequence.
265 /* static */ if (skipQuantum != 0) {
266 // 8 * upper is used here instead of upperSizeBits, as that is
267 // more serialization-friendly way (upperSizeBits doesn't need
268 // to be known by this function, unlike upper).
270 size_t numSkipPointers = (8 * upper - size) / skipQuantum;
271 layout.skipPointers = numSkipPointers * sizeof(SkipValueType);
274 // *** Forward pointers.
275 // Store (1-indexed) position of every forwardQuantum-th
276 // 1-bit in upper bits sequence.
277 /* static */ if (forwardQuantum != 0) {
278 size_t numForwardPointers = size / forwardQuantum;
279 layout.forwardPointers = numForwardPointers * sizeof(SkipValueType);
285 size_t bytes() const {
286 return lower + upper + skipPointers + forwardPointers;
289 template <class Range>
290 EliasFanoCompressedListBase<typename Range::iterator>
291 openList(Range& buf) const {
292 EliasFanoCompressedListBase<typename Range::iterator> result;
294 result.numLowerBits = numLowerBits;
295 result.data = buf.subpiece(0, bytes());
297 auto advance = [&] (size_t n) {
298 auto begin = buf.data();
303 result.skipPointers = advance(skipPointers);
304 result.forwardPointers = advance(forwardPointers);
305 result.lower = advance(lower);
306 result.upper = advance(upper);
311 MutableCompressedList allocList() const {
312 uint8_t* buf = nullptr;
313 // WARNING: Current read/write logic assumes that the 7 bytes
314 // following the last byte of lower and upper sequences are
315 // readable (stored value doesn't matter and won't be changed), so
316 // we allocate additional 7 bytes, but do not include them in size
317 // of returned value.
319 buf = static_cast<uint8_t*>(malloc(bytes() + 7));
321 folly::MutableByteRange bufRange(buf, bytes());
322 return openList(bufRange);
326 uint8_t numLowerBits = 0;
331 size_t skipPointers = 0;
332 size_t forwardPointers = 0;
337 template <class Encoder, class Instructions, class SizeType>
338 class UpperBitsReader : ForwardPointers<Encoder::forwardQuantum>,
339 SkipPointers<Encoder::skipQuantum> {
340 typedef typename Encoder::SkipValueType SkipValueType;
342 typedef typename Encoder::ValueType ValueType;
344 explicit UpperBitsReader(const typename Encoder::CompressedList& list)
345 : ForwardPointers<Encoder::forwardQuantum>(list.forwardPointers),
346 SkipPointers<Encoder::skipQuantum>(list.skipPointers),
352 block_ = start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0;
353 position_ = std::numeric_limits<SizeType>::max();
358 SizeType position() const {
361 ValueType value() const {
366 // Skip to the first non-zero block.
367 while (block_ == 0) {
368 outer_ += sizeof(block_t);
369 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
373 size_t inner = Instructions::ctz(block_);
374 block_ = Instructions::blsr(block_);
376 return setValue(inner);
379 ValueType skip(SizeType n) {
382 position_ += n; // n 1-bits will be read.
384 // Use forward pointer.
385 if (Encoder::forwardQuantum > 0 && n > Encoder::forwardQuantum) {
386 const size_t steps = position_ / Encoder::forwardQuantum;
387 const size_t dest = folly::loadUnaligned<SkipValueType>(
388 this->forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
390 reposition(dest + steps * Encoder::forwardQuantum);
391 n = position_ + 1 - steps * Encoder::forwardQuantum; // n is > 0.
395 // Find necessary block.
396 while ((cnt = Instructions::popcount(block_)) < n) {
398 outer_ += sizeof(block_t);
399 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
402 // Skip to the n-th one in the block.
404 size_t inner = select64<Instructions>(block_, n - 1);
405 block_ &= (block_t(-1) << inner) << 1;
407 return setValue(inner);
410 // Skip to the first element that is >= v and located *after* the current
411 // one (so even if current value equals v, position will be increased by 1).
412 ValueType skipToNext(ValueType v) {
413 DCHECK_GE(v, value_);
416 if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
417 const size_t steps = v / Encoder::skipQuantum;
418 const size_t dest = folly::loadUnaligned<SkipValueType>(
419 this->skipPointers_ + (steps - 1) * sizeof(SkipValueType));
421 reposition(dest + Encoder::skipQuantum * steps);
422 position_ = dest - 1;
424 // Correct value_ will be set during the next() call at the end.
426 // NOTE: Corresponding block of lower bits sequence may be
427 // prefetched here (via __builtin_prefetch), but experiments
428 // didn't show any significant improvements.
433 size_t skip = v - (8 * outer_ - position_ - 1);
435 constexpr size_t kBitsPerBlock = 8 * sizeof(block_t);
436 while ((cnt = Instructions::popcount(~block_)) < skip) {
438 position_ += kBitsPerBlock - cnt;
439 outer_ += sizeof(block_t);
440 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
444 auto inner = select64<Instructions>(~block_, skip - 1);
445 position_ += inner - skip + 1;
446 block_ &= block_t(-1) << inner;
453 ValueType jump(size_t n) {
454 if (Encoder::forwardQuantum == 0 || n <= Encoder::forwardQuantum) {
457 // Avoid reading the head, skip() will reposition.
458 position_ = std::numeric_limits<SizeType>::max();
463 ValueType jumpToNext(ValueType v) {
464 if (Encoder::skipQuantum == 0 || v < Encoder::skipQuantum) {
467 value_ = 0; // Avoid reading the head, skipToNext() will reposition.
469 return skipToNext(v);
472 ValueType previousValue() const {
473 DCHECK_NE(position(), std::numeric_limits<SizeType>::max());
474 DCHECK_GT(position(), 0);
477 auto inner = size_t(value_) - 8 * outer_ + position_;
478 block_t block = folly::loadUnaligned<block_t>(start_ + outer);
479 block &= (block_t(1) << inner) - 1;
481 while (UNLIKELY(block == 0)) {
483 outer -= std::min<OuterType>(sizeof(block_t), outer);
484 block = folly::loadUnaligned<block_t>(start_ + outer);
487 inner = 8 * sizeof(block_t) - 1 - Instructions::clz(block);
488 return static_cast<ValueType>(8 * outer + inner - (position_ - 1));
491 void setDone(SizeType endPos) {
496 ValueType setValue(size_t inner) {
497 value_ = static_cast<ValueType>(8 * outer_ + inner - position_);
501 void reposition(SizeType dest) {
503 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
504 block_ &= ~((block_t(1) << (dest % 8)) - 1);
507 using block_t = uint64_t;
508 // The size in bytes of the upper bits is limited by n + universe / 8,
509 // so a type that can hold either sizes or values is sufficient.
510 using OuterType = typename std::common_type<ValueType, SizeType>::type;
512 const unsigned char* const start_;
514 SizeType position_; // Index of current value (= #reads - 1).
515 OuterType outer_; // Outer offset: number of consumed bytes in upper.
519 } // namespace detail
521 // If kUnchecked = true the caller must guarantee that all the
522 // operations return valid elements, i.e., they would never return
526 class Instructions = instructions::Default,
527 bool kUnchecked = false,
528 class SizeType = size_t>
529 class EliasFanoReader {
531 typedef Encoder EncoderType;
532 typedef typename Encoder::ValueType ValueType;
534 explicit EliasFanoReader(const typename Encoder::CompressedList& list)
538 numLowerBits_(list.numLowerBits) {
539 DCHECK(Instructions::supported());
540 // To avoid extra branching during skipTo() while reading
541 // upper sequence we need to know the last element.
542 // If kUnchecked == true, we do not check that skipTo() is called
543 // within the bounds, so we can avoid initializing lastValue_.
544 if (kUnchecked || UNLIKELY(list.size == 0)) {
548 ValueType lastUpperValue = ValueType(8 * list.upperSize() - size_);
549 auto it = list.upper + list.upperSize() - 1;
551 lastUpperValue -= 8 - folly::findLastSet(*it);
552 lastValue_ = readLowerPart(size_ - 1) | (lastUpperValue << numLowerBits_);
557 value_ = kInvalidValue;
561 if (!kUnchecked && UNLIKELY(position() + 1 >= size_)) {
565 value_ = readLowerPart(upper_.position()) |
566 (upper_.value() << numLowerBits_);
570 bool skip(SizeType n) {
573 if (kUnchecked || LIKELY(position() + n < size_)) {
574 if (LIKELY(n < kLinearScanThreshold)) {
575 for (SizeType i = 0; i < n; ++i)
580 value_ = readLowerPart(upper_.position()) |
581 (upper_.value() << numLowerBits_);
588 bool skipTo(ValueType value) {
589 // Also works when value_ == kInvalidValue.
590 if (value != kInvalidValue) { DCHECK_GE(value + 1, value_ + 1); }
592 if (!kUnchecked && value > lastValue_) {
594 } else if (value == value_) {
598 ValueType upperValue = (value >> numLowerBits_);
599 ValueType upperSkip = upperValue - upper_.value();
600 // The average density of ones in upper bits is 1/2.
601 // LIKELY here seems to make things worse, even for small skips.
602 if (upperSkip < 2 * kLinearScanThreshold) {
605 } while (UNLIKELY(upper_.value() < upperValue));
607 upper_.skipToNext(upperValue);
614 bool jump(SizeType n) {
615 if (LIKELY(n < size_)) { // Also checks that n != -1.
616 value_ = readLowerPart(n) | (upper_.jump(n + 1) << numLowerBits_);
622 bool jumpTo(ValueType value) {
623 if (!kUnchecked && value > lastValue_) {
627 upper_.jumpToNext(value >> numLowerBits_);
632 ValueType previousValue() const {
633 DCHECK_GT(position(), 0);
634 DCHECK_LT(position(), size());
635 return readLowerPart(upper_.position() - 1) |
636 (upper_.previousValue() << numLowerBits_);
639 SizeType size() const {
644 return position() < size(); // Also checks that position() != -1.
647 SizeType position() const {
648 return upper_.position();
650 ValueType value() const {
656 // Must hold kInvalidValue + 1 == 0.
657 constexpr static ValueType kInvalidValue =
658 std::numeric_limits<ValueType>::max();
661 value_ = kInvalidValue;
662 upper_.setDone(size_);
666 ValueType readLowerPart(SizeType i) const {
668 const size_t pos = i * numLowerBits_;
669 const unsigned char* ptr = lower_ + (pos / 8);
670 const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
671 // This removes the branch in the fallback implementation of
672 // bzhi. The condition is verified at encoding time.
673 assume(numLowerBits_ < sizeof(ValueType) * 8);
674 return Instructions::bzhi(ptrv >> (pos % 8), numLowerBits_);
677 void iterateTo(ValueType value) {
679 value_ = readLowerPart(upper_.position()) |
680 (upper_.value() << numLowerBits_);
681 if (LIKELY(value_ >= value)) break;
686 constexpr static size_t kLinearScanThreshold = 8;
688 detail::UpperBitsReader<Encoder, Instructions, SizeType> upper_;
689 const uint8_t* lower_;
691 ValueType value_ = kInvalidValue;
692 ValueType lastValue_;
693 uint8_t numLowerBits_;