2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * @author Philip Pronin (philipp@fb.com)
20 * Based on the paper by Sebastiano Vigna,
21 * "Quasi-succinct indices" (arxiv:1206.4300).
29 #include <type_traits>
31 #include <folly/Bits.h>
32 #include <folly/Likely.h>
33 #include <folly/Portability.h>
34 #include <folly/Range.h>
35 #include <folly/experimental/Instructions.h>
36 #include <folly/experimental/Select64.h>
37 #include <glog/logging.h>
40 #error EliasFanoCoding.h requires x86_64
43 namespace folly { namespace compression {
45 static_assert(kIsLittleEndian, "EliasFanoCoding.h requires little endianness");
47 template <class Pointer>
48 struct EliasFanoCompressedListBase {
49 EliasFanoCompressedListBase() = default;
51 template <class OtherPointer>
52 EliasFanoCompressedListBase(
53 const EliasFanoCompressedListBase<OtherPointer>& other)
55 numLowerBits(other.numLowerBits),
57 skipPointers(reinterpret_cast<Pointer>(other.skipPointers)),
58 forwardPointers(reinterpret_cast<Pointer>(other.forwardPointers)),
59 lower(reinterpret_cast<Pointer>(other.lower)),
60 upper(reinterpret_cast<Pointer>(other.upper)) { }
62 template <class T = Pointer>
63 auto free() -> decltype(::free(T(nullptr))) {
64 return ::free(data.data());
67 size_t upperSize() const { return data.end() - upper; }
70 uint8_t numLowerBits = 0;
72 // WARNING: EliasFanoCompressedList has no ownership of data. The 7
73 // bytes following the last byte should be readable.
74 folly::Range<Pointer> data;
76 Pointer skipPointers = nullptr;
77 Pointer forwardPointers = nullptr;
78 Pointer lower = nullptr;
79 Pointer upper = nullptr;
82 typedef EliasFanoCompressedListBase<const uint8_t*> EliasFanoCompressedList;
83 typedef EliasFanoCompressedListBase<uint8_t*> MutableEliasFanoCompressedList;
85 template <class Value,
86 class SkipValue = size_t,
87 size_t kSkipQuantum = 0, // 0 = disabled
88 size_t kForwardQuantum = 0> // 0 = disabled
89 struct EliasFanoEncoderV2 {
90 static_assert(std::is_integral<Value>::value &&
91 std::is_unsigned<Value>::value,
92 "Value should be unsigned integral");
94 typedef EliasFanoCompressedList CompressedList;
95 typedef MutableEliasFanoCompressedList MutableCompressedList;
97 typedef Value ValueType;
98 typedef SkipValue SkipValueType;
101 static constexpr size_t skipQuantum = kSkipQuantum;
102 static constexpr size_t forwardQuantum = kForwardQuantum;
104 static uint8_t defaultNumLowerBits(size_t upperBound, size_t size) {
105 if (size == 0 || upperBound < size) {
108 // floor(log(upperBound / size));
109 return folly::findLastSet(upperBound / size) - 1;
112 // Requires: input range (begin, end) is sorted (encoding
113 // crashes if it's not).
114 // WARNING: encode() mallocates EliasFanoCompressedList::data. As
115 // EliasFanoCompressedList has no ownership of it, you need to call
116 // free() explicitly.
117 template <class RandomAccessIterator>
118 static MutableCompressedList encode(RandomAccessIterator begin,
119 RandomAccessIterator end) {
121 return MutableCompressedList();
123 EliasFanoEncoderV2 encoder(end - begin, *(end - 1));
124 for (; begin != end; ++begin) {
127 return encoder.finish();
130 explicit EliasFanoEncoderV2(const MutableCompressedList& result)
131 : lower_(result.lower),
132 upper_(result.upper),
133 skipPointers_(reinterpret_cast<SkipValueType*>(
134 result.skipPointers)),
135 forwardPointers_(reinterpret_cast<SkipValueType*>(
136 result.forwardPointers)),
138 std::fill(result.data.begin(), result.data.end(), 0);
141 EliasFanoEncoderV2(size_t size, ValueType upperBound)
142 : EliasFanoEncoderV2(
143 Layout::fromUpperBoundAndSize(upperBound, size).allocList()) { }
145 void add(ValueType value) {
146 CHECK_LT(value, std::numeric_limits<ValueType>::max());
147 CHECK_GE(value, lastValue_);
149 const auto numLowerBits = result_.numLowerBits;
150 const ValueType upperBits = value >> numLowerBits;
152 // Upper sequence consists of upperBits 0-bits and (size_ + 1) 1-bits.
153 const size_t pos = upperBits + size_;
154 upper_[pos / 8] |= 1U << (pos % 8);
155 // Append numLowerBits bits to lower sequence.
156 if (numLowerBits != 0) {
157 const ValueType lowerBits = value & ((ValueType(1) << numLowerBits) - 1);
158 writeBits56(lower_, size_ * numLowerBits, numLowerBits, lowerBits);
161 /* static */ if (skipQuantum != 0) {
162 while ((skipPointersSize_ + 1) * skipQuantum <= upperBits) {
163 // Store the number of preceding 1-bits.
164 skipPointers_[skipPointersSize_++] = size_;
168 /* static */ if (forwardQuantum != 0) {
169 if ((size_ + 1) % forwardQuantum == 0) {
170 const auto k = size_ / forwardQuantum;
171 // Store the number of preceding 0-bits.
172 forwardPointers_[k] = upperBits;
180 const MutableCompressedList& finish() const {
181 CHECK_EQ(size_, result_.size);
186 // Writes value (with len up to 56 bits) to data starting at pos-th bit.
187 static void writeBits56(unsigned char* data, size_t pos,
188 uint8_t len, uint64_t value) {
189 DCHECK_LE(uint32_t(len), 56);
190 DCHECK_EQ(0, value & ~((uint64_t(1) << len) - 1));
191 unsigned char* const ptr = data + (pos / 8);
192 uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
193 ptrv |= value << (pos % 8);
194 folly::storeUnaligned<uint64_t>(ptr, ptrv);
197 unsigned char* lower_ = nullptr;
198 unsigned char* upper_ = nullptr;
199 SkipValueType* skipPointers_ = nullptr;
200 SkipValueType* forwardPointers_ = nullptr;
202 ValueType lastValue_ = 0;
204 size_t skipPointersSize_ = 0;
206 MutableCompressedList result_;
209 template <class Value,
212 size_t kForwardQuantum>
213 struct EliasFanoEncoderV2<Value,
216 kForwardQuantum>::Layout {
217 static Layout fromUpperBoundAndSize(size_t upperBound, size_t size) {
218 // numLowerBits can be at most 56 because of detail::writeBits56.
219 const uint8_t numLowerBits = std::min(defaultNumLowerBits(upperBound,
223 // Upper bits are stored using unary delta encoding.
224 // For example, (3 5 5 9) will be encoded as 1000011001000_2.
225 const size_t upperSizeBits =
226 (upperBound >> numLowerBits) + // Number of 0-bits to be stored.
228 const size_t upper = (upperSizeBits + 7) / 8;
230 // *** Validity checks.
231 // Shift by numLowerBits must be valid.
232 CHECK_LT(numLowerBits, 8 * sizeof(Value));
233 CHECK_LT(size, std::numeric_limits<SkipValueType>::max());
234 CHECK_LT(upperBound >> numLowerBits,
235 std::numeric_limits<SkipValueType>::max());
237 return fromInternalSizes(numLowerBits, upper, size);
240 static Layout fromInternalSizes(uint8_t numLowerBits,
245 layout.numLowerBits = numLowerBits;
247 layout.lower = (numLowerBits * size + 7) / 8;
248 layout.upper = upper;
250 // *** Skip pointers.
251 // Store (1-indexed) position of every skipQuantum-th
252 // 0-bit in upper bits sequence.
253 /* static */ if (skipQuantum != 0) {
254 // 8 * upper is used here instead of upperSizeBits, as that is
255 // more serialization-friendly way (upperSizeBits doesn't need
256 // to be known by this function, unlike upper).
258 size_t numSkipPointers = (8 * upper - size) / skipQuantum;
259 layout.skipPointers = numSkipPointers * sizeof(SkipValueType);
262 // *** Forward pointers.
263 // Store (1-indexed) position of every forwardQuantum-th
264 // 1-bit in upper bits sequence.
265 /* static */ if (forwardQuantum != 0) {
266 size_t numForwardPointers = size / forwardQuantum;
267 layout.forwardPointers = numForwardPointers * sizeof(SkipValueType);
273 size_t bytes() const {
274 return lower + upper + skipPointers + forwardPointers;
277 template <class Range>
278 EliasFanoCompressedListBase<typename Range::iterator>
279 openList(Range& buf) const {
280 EliasFanoCompressedListBase<typename Range::iterator> result;
282 result.numLowerBits = numLowerBits;
283 result.data = buf.subpiece(0, bytes());
285 auto advance = [&] (size_t n) {
286 auto begin = buf.data();
291 result.skipPointers = advance(skipPointers);
292 result.forwardPointers = advance(forwardPointers);
293 result.lower = advance(lower);
294 result.upper = advance(upper);
299 MutableCompressedList allocList() const {
300 uint8_t* buf = nullptr;
301 // WARNING: Current read/write logic assumes that the 7 bytes
302 // following the last byte of lower and upper sequences are
303 // readable (stored value doesn't matter and won't be changed), so
304 // we allocate additional 7 bytes, but do not include them in size
305 // of returned value.
307 buf = static_cast<uint8_t*>(malloc(bytes() + 7));
309 folly::MutableByteRange bufRange(buf, bytes());
310 return openList(bufRange);
314 uint8_t numLowerBits = 0;
319 size_t skipPointers = 0;
320 size_t forwardPointers = 0;
325 template <class Encoder, class Instructions>
326 class UpperBitsReader {
327 typedef typename Encoder::SkipValueType SkipValueType;
329 typedef typename Encoder::ValueType ValueType;
331 explicit UpperBitsReader(const typename Encoder::CompressedList& list)
332 : forwardPointers_(list.forwardPointers),
333 skipPointers_(list.skipPointers),
339 block_ = start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0;
346 size_t position() const { return position_; }
347 ValueType value() const { return value_; }
350 // Skip to the first non-zero block.
351 while (block_ == 0) {
352 outer_ += sizeof(block_t);
353 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
357 inner_ = Instructions::ctz(block_);
358 block_ = Instructions::blsr(block_);
363 ValueType skip(size_t n) {
366 position_ += n; // n 1-bits will be read.
368 // Use forward pointer.
369 if (Encoder::forwardQuantum > 0 && n > Encoder::forwardQuantum) {
370 const size_t steps = position_ / Encoder::forwardQuantum;
372 folly::loadUnaligned<SkipValueType>(
373 forwardPointers_ + (steps - 1) * sizeof(SkipValueType));
375 reposition(dest + steps * Encoder::forwardQuantum);
376 n = position_ + 1 - steps * Encoder::forwardQuantum; // n is > 0.
377 // Correct inner_ will be set at the end.
381 // Find necessary block.
382 while ((cnt = Instructions::popcount(block_)) < n) {
384 outer_ += sizeof(block_t);
385 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
388 // Skip to the n-th one in the block.
390 inner_ = select64<Instructions>(block_, n - 1);
391 block_ &= (block_t(-1) << inner_) << 1;
396 // Skip to the first element that is >= v and located *after* the current
397 // one (so even if current value equals v, position will be increased by 1).
398 ValueType skipToNext(ValueType v) {
399 DCHECK_GE(v, value_);
402 if (Encoder::skipQuantum > 0 && v >= value_ + Encoder::skipQuantum) {
403 const size_t steps = v / Encoder::skipQuantum;
405 folly::loadUnaligned<SkipValueType>(
406 skipPointers_ + (steps - 1) * sizeof(SkipValueType));
408 reposition(dest + Encoder::skipQuantum * steps);
409 position_ = dest - 1;
411 // Correct inner_ and value_ will be set during the next()
414 // NOTE: Corresponding block of lower bits sequence may be
415 // prefetched here (via __builtin_prefetch), but experiments
416 // didn't show any significant improvements.
421 size_t skip = v - (8 * outer_ - position_ - 1);
423 constexpr size_t kBitsPerBlock = 8 * sizeof(block_t);
424 while ((cnt = Instructions::popcount(~block_)) < skip) {
426 position_ += kBitsPerBlock - cnt;
427 outer_ += sizeof(block_t);
428 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
432 auto inner = select64<Instructions>(~block_, skip - 1);
433 position_ += inner - skip + 1;
434 block_ &= block_t(-1) << inner;
441 ValueType jump(size_t n) {
442 if (Encoder::forwardQuantum == 0 || n <= Encoder::forwardQuantum) {
445 position_ = -1; // Avoid reading the head, skip() will reposition.
450 ValueType jumpToNext(ValueType v) {
451 if (Encoder::skipQuantum == 0 || v < Encoder::skipQuantum) {
454 value_ = 0; // Avoid reading the head, skipToNext() will reposition.
456 return skipToNext(v);
459 ValueType previousValue() const {
460 DCHECK_NE(position(), -1);
461 DCHECK_GT(position(), 0);
463 size_t outer = outer_;
464 block_t block = folly::loadUnaligned<block_t>(start_ + outer);
465 block &= (block_t(1) << inner_) - 1;
467 while (UNLIKELY(block == 0)) {
468 DCHECK_GE(outer, sizeof(block_t));
469 outer -= sizeof(block_t);
470 block = folly::loadUnaligned<block_t>(start_ + outer);
473 auto inner = 8 * sizeof(block_t) - 1 - Instructions::clz(block);
474 return static_cast<ValueType>(8 * outer + inner - (position_ - 1));
477 void setDone(size_t endPos) {
482 ValueType setValue() {
483 value_ = static_cast<ValueType>(8 * outer_ + inner_ - position_);
487 void reposition(size_t dest) {
489 block_ = folly::loadUnaligned<block_t>(start_ + outer_);
490 block_ &= ~((block_t(1) << (dest % 8)) - 1);
493 typedef uint64_t block_t;
494 const unsigned char* const forwardPointers_;
495 const unsigned char* const skipPointers_;
496 const unsigned char* const start_;
498 size_t outer_; // Outer offset: number of consumed bytes in upper.
499 size_t inner_; // Inner offset: (bit) position in current block.
500 size_t position_; // Index of current value (= #reads - 1).
504 } // namespace detail
506 // If kUnchecked = true the caller must guarantee that all the
507 // operations return valid elements, i.e., they would never return
509 template <class Encoder,
510 class Instructions = instructions::Default,
511 bool kUnchecked = false>
512 class EliasFanoReader {
514 typedef Encoder EncoderType;
515 typedef typename Encoder::ValueType ValueType;
517 explicit EliasFanoReader(const typename Encoder::CompressedList& list)
521 lowerMask_((ValueType(1) << list.numLowerBits) - 1),
522 numLowerBits_(list.numLowerBits) {
523 DCHECK(Instructions::supported());
524 // To avoid extra branching during skipTo() while reading
525 // upper sequence we need to know the last element.
526 // If kUnchecked == true, we do not check that skipTo() is called
527 // within the bounds, so we can avoid initializing lastValue_.
528 if (kUnchecked || UNLIKELY(list.size == 0)) {
532 ValueType lastUpperValue = 8 * list.upperSize() - size_;
533 auto it = list.upper + list.upperSize() - 1;
535 lastUpperValue -= 8 - folly::findLastSet(*it);
536 lastValue_ = readLowerPart(size_ - 1) | (lastUpperValue << numLowerBits_);
541 value_ = kInvalidValue;
545 if (!kUnchecked && UNLIKELY(position() + 1 >= size_)) {
549 value_ = readLowerPart(upper_.position()) |
550 (upper_.value() << numLowerBits_);
554 bool skip(size_t n) {
557 if (kUnchecked || LIKELY(position() + n < size_)) {
558 if (LIKELY(n < kLinearScanThreshold)) {
559 for (size_t i = 0; i < n; ++i) upper_.next();
563 value_ = readLowerPart(upper_.position()) |
564 (upper_.value() << numLowerBits_);
571 bool skipTo(ValueType value) {
572 // Also works when value_ == kInvalidValue.
573 if (value != kInvalidValue) { DCHECK_GE(value + 1, value_ + 1); }
575 if (!kUnchecked && value > lastValue_) {
577 } else if (value == value_) {
581 size_t upperValue = (value >> numLowerBits_);
582 size_t upperSkip = upperValue - upper_.value();
583 // The average density of ones in upper bits is 1/2.
584 // LIKELY here seems to make things worse, even for small skips.
585 if (upperSkip < 2 * kLinearScanThreshold) {
588 } while (UNLIKELY(upper_.value() < upperValue));
590 upper_.skipToNext(upperValue);
597 bool jump(size_t n) {
598 if (LIKELY(n < size_)) { // Also checks that n != -1.
599 value_ = readLowerPart(n) | (upper_.jump(n + 1) << numLowerBits_);
605 bool jumpTo(ValueType value) {
606 if (!kUnchecked && value > lastValue_) {
610 upper_.jumpToNext(value >> numLowerBits_);
615 ValueType previousValue() const {
616 DCHECK_GT(position(), 0);
617 DCHECK_LT(position(), size());
618 return readLowerPart(upper_.position() - 1) |
619 (upper_.previousValue() << numLowerBits_);
622 size_t size() const { return size_; }
625 return position() < size(); // Also checks that position() != -1.
628 size_t position() const { return upper_.position(); }
629 ValueType value() const {
635 constexpr static ValueType kInvalidValue =
636 std::numeric_limits<ValueType>::max(); // Must hold kInvalidValue + 1 == 0.
639 value_ = kInvalidValue;
640 upper_.setDone(size_);
644 ValueType readLowerPart(size_t i) const {
646 const size_t pos = i * numLowerBits_;
647 const unsigned char* ptr = lower_ + (pos / 8);
648 const uint64_t ptrv = folly::loadUnaligned<uint64_t>(ptr);
649 return lowerMask_ & (ptrv >> (pos % 8));
652 void iterateTo(ValueType value) {
654 value_ = readLowerPart(upper_.position()) |
655 (upper_.value() << numLowerBits_);
656 if (LIKELY(value_ >= value)) break;
661 constexpr static size_t kLinearScanThreshold = 8;
664 const uint8_t* lower_;
665 detail::UpperBitsReader<Encoder, Instructions> upper_;
666 const ValueType lowerMask_;
667 ValueType value_ = kInvalidValue;
668 ValueType lastValue_;
669 uint8_t numLowerBits_;