2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Nicholas Ormrod (njormrod)
19 * Andrei Alexandrescu (aalexandre)
21 * FBVector is Facebook's drop-in implementation of std::vector. It has special
22 * optimizations for use with relocatable types and jemalloc.
25 #ifndef FOLLY_FBVECTOR_H
26 #define FOLLY_FBVECTOR_H
28 //=============================================================================
36 #include <type_traits>
39 #include "folly/Likely.h"
40 #include "folly/Malloc.h"
41 #include "folly/Traits.h"
43 #include <boost/operators.hpp>
45 // some files expected these from FBVector
47 #include "folly/Foreach.h"
48 #include <boost/type_traits.hpp>
49 #include <boost/utility/enable_if.hpp>
51 //=============================================================================
52 // forward declaration
55 template <class T, class Allocator = std::allocator<T>>
59 //=============================================================================
62 #define FOLLY_FBV_UNROLL_PTR(first, last, OP) do { \
63 for (; (last) - (first) >= 4; (first) += 4) { \
69 for (; (first) != (last); ++(first)) OP((first)); \
72 //=============================================================================
73 ///////////////////////////////////////////////////////////////////////////////
77 ///////////////////////////////////////////////////////////////////////////////
81 template <class T, class Allocator>
82 class fbvector : private boost::totally_ordered<fbvector<T, Allocator>> {
84 //===========================================================================
85 //---------------------------------------------------------------------------
89 typedef std::allocator_traits<Allocator> A;
91 struct Impl : public Allocator {
93 typedef typename A::pointer pointer;
94 typedef typename A::size_type size_type;
100 Impl() : Allocator(), b_(nullptr), e_(nullptr), z_(nullptr) {}
101 Impl(const Allocator& a)
102 : Allocator(a), b_(nullptr), e_(nullptr), z_(nullptr) {}
104 : Allocator(std::move(a)), b_(nullptr), e_(nullptr), z_(nullptr) {}
106 Impl(size_type n, const Allocator& a = Allocator())
111 : Allocator(std::move(other)),
112 b_(other.b_), e_(other.e_), z_(other.z_)
113 { other.b_ = other.e_ = other.z_ = nullptr; }
121 // note that 'allocate' and 'deallocate' are inherited from Allocator
122 T* D_allocate(size_type n) {
123 if (usingStdAllocator::value) {
124 return static_cast<T*>(malloc(n * sizeof(T)));
126 return std::allocator_traits<Allocator>::allocate(*this, n);
130 void D_deallocate(T* p, size_type n) noexcept {
131 if (usingStdAllocator::value) {
134 std::allocator_traits<Allocator>::deallocate(*this, p, n);
139 void swapData(Impl& other) {
140 std::swap(b_, other.b_);
141 std::swap(e_, other.e_);
142 std::swap(z_, other.z_);
146 inline void destroy() noexcept {
148 // THIS DISPATCH CODE IS DUPLICATED IN fbvector::D_destroy_range_a.
149 // It has been inlined here for speed. It calls the static fbvector
150 // methods to perform the actual destruction.
151 if (usingStdAllocator::value) {
152 S_destroy_range(b_, e_);
154 S_destroy_range_a(*this, b_, e_);
157 D_deallocate(b_, z_ - b_);
161 void init(size_type n) {
162 if (UNLIKELY(n == 0)) {
163 b_ = e_ = z_ = nullptr;
165 size_type sz = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
173 set(pointer newB, size_type newSize, size_type newCap) {
179 void reset(size_type newCap) {
188 void reset() { // same as reset(0)
190 b_ = e_ = z_ = nullptr;
194 static void swap(Impl& a, Impl& b) {
196 if (!usingStdAllocator::value) swap<Allocator>(a, b);
200 //===========================================================================
201 //---------------------------------------------------------------------------
202 // types and constants
205 typedef T value_type;
206 typedef value_type& reference;
207 typedef const value_type& const_reference;
209 typedef const T* const_iterator;
210 typedef size_t size_type;
211 typedef typename std::make_signed<size_type>::type difference_type;
212 typedef Allocator allocator_type;
213 typedef typename A::pointer pointer;
214 typedef typename A::const_pointer const_pointer;
215 typedef std::reverse_iterator<iterator> reverse_iterator;
216 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
220 typedef std::integral_constant<bool,
221 boost::has_trivial_copy_constructor<T>::value &&
222 sizeof(T) <= 16 // don't force large structures to be passed by value
223 > should_pass_by_value;
224 typedef typename std::conditional<
225 should_pass_by_value::value, T, const T&>::type VT;
226 typedef typename std::conditional<
227 should_pass_by_value::value, T, T&&>::type MT;
229 typedef std::integral_constant<bool,
230 std::is_same<Allocator, std::allocator<T>>::value> usingStdAllocator;
231 typedef std::integral_constant<bool,
232 usingStdAllocator::value ||
233 A::propagate_on_container_move_assignment::value> moveIsSwap;
235 //===========================================================================
236 //---------------------------------------------------------------------------
240 //---------------------------------------------------------------------------
243 T* M_allocate(size_type n) {
244 return impl_.D_allocate(n);
247 //---------------------------------------------------------------------------
250 void M_deallocate(T* p, size_type n) noexcept {
251 impl_.D_deallocate(p, n);
254 //---------------------------------------------------------------------------
257 // GCC is very sensitive to the exact way that construct is called. For
258 // that reason there are several different specializations of construct.
260 template <typename U, typename... Args>
261 void M_construct(U* p, Args&&... args) {
262 if (usingStdAllocator::value) {
263 new (p) U(std::forward<Args>(args)...);
265 std::allocator_traits<Allocator>::construct(
266 impl_, p, std::forward<Args>(args)...);
270 template <typename U, typename... Args>
271 static void S_construct(U* p, Args&&... args) {
272 new (p) U(std::forward<Args>(args)...);
275 template <typename U, typename... Args>
276 static void S_construct_a(Allocator& a, U* p, Args&&... args) {
277 std::allocator_traits<Allocator>::construct(
278 a, p, std::forward<Args>(args)...);
281 // scalar optimization
282 // TODO we can expand this optimization to: default copyable and assignable
283 template <typename U, typename Enable = typename
284 std::enable_if<std::is_scalar<U>::value>::type>
285 void M_construct(U* p, U arg) {
286 if (usingStdAllocator::value) {
289 std::allocator_traits<Allocator>::construct(impl_, p, arg);
293 template <typename U, typename Enable = typename
294 std::enable_if<std::is_scalar<U>::value>::type>
295 static void S_construct(U* p, U arg) {
299 template <typename U, typename Enable = typename
300 std::enable_if<std::is_scalar<U>::value>::type>
301 static void S_construct_a(Allocator& a, U* p, U arg) {
302 std::allocator_traits<Allocator>::construct(a, p, arg);
305 // const& optimization
306 template <typename U, typename Enable = typename
307 std::enable_if<!std::is_scalar<U>::value>::type>
308 void M_construct(U* p, const U& value) {
309 if (usingStdAllocator::value) {
312 std::allocator_traits<Allocator>::construct(impl_, p, value);
316 template <typename U, typename Enable = typename
317 std::enable_if<!std::is_scalar<U>::value>::type>
318 static void S_construct(U* p, const U& value) {
322 template <typename U, typename Enable = typename
323 std::enable_if<!std::is_scalar<U>::value>::type>
324 static void S_construct_a(Allocator& a, U* p, const U& value) {
325 std::allocator_traits<Allocator>::construct(a, p, value);
328 //---------------------------------------------------------------------------
331 void M_destroy(T* p) noexcept {
332 if (usingStdAllocator::value) {
333 if (!boost::has_trivial_destructor<T>::value) p->~T();
335 std::allocator_traits<Allocator>::destroy(impl_, p);
339 //===========================================================================
340 //---------------------------------------------------------------------------
341 // algorithmic helpers
344 //---------------------------------------------------------------------------
348 void M_destroy_range_e(T* pos) noexcept {
349 D_destroy_range_a(pos, impl_.e_);
354 // THIS DISPATCH CODE IS DUPLICATED IN IMPL. SEE IMPL FOR DETAILS.
355 void D_destroy_range_a(T* first, T* last) noexcept {
356 if (usingStdAllocator::value) {
357 S_destroy_range(first, last);
359 S_destroy_range_a(impl_, first, last);
364 static void S_destroy_range_a(Allocator& a, T* first, T* last) noexcept {
365 for (; first != last; ++first)
366 std::allocator_traits<Allocator>::destroy(a, first);
370 static void S_destroy_range(T* first, T* last) noexcept {
371 if (!boost::has_trivial_destructor<T>::value) {
372 // EXPERIMENTAL DATA on fbvector<vector<int>> (where each vector<int> has
374 // The unrolled version seems to work faster for small to medium sized
375 // fbvectors. It gets a 10% speedup on fbvectors of size 1024, 64, and
377 // The simple loop version seems to work faster for large fbvectors. The
378 // unrolled version is about 6% slower on fbvectors on size 16384.
379 // The two methods seem tied for very large fbvectors. The unrolled
380 // version is about 0.5% slower on size 262144.
382 // for (; first != last; ++first) first->~T();
383 #define FOLLY_FBV_OP(p) (p)->~T()
384 FOLLY_FBV_UNROLL_PTR(first, last, FOLLY_FBV_OP)
389 //---------------------------------------------------------------------------
390 // uninitialized_fill_n
393 void M_uninitialized_fill_n_e(size_type sz) {
394 D_uninitialized_fill_n_a(impl_.e_, sz);
398 void M_uninitialized_fill_n_e(size_type sz, VT value) {
399 D_uninitialized_fill_n_a(impl_.e_, sz, value);
404 void D_uninitialized_fill_n_a(T* dest, size_type sz) {
405 if (usingStdAllocator::value) {
406 S_uninitialized_fill_n(dest, sz);
408 S_uninitialized_fill_n_a(impl_, dest, sz);
412 void D_uninitialized_fill_n_a(T* dest, size_type sz, VT value) {
413 if (usingStdAllocator::value) {
414 S_uninitialized_fill_n(dest, sz, value);
416 S_uninitialized_fill_n_a(impl_, dest, sz, value);
421 template <typename... Args>
422 static void S_uninitialized_fill_n_a(Allocator& a, T* dest,
423 size_type sz, Args&&... args) {
428 std::allocator_traits<Allocator>::construct(a, b,
429 std::forward<Args>(args)...);
431 S_destroy_range_a(a, dest, b);
437 static void S_uninitialized_fill_n(T* dest, size_type n) {
438 if (folly::IsZeroInitializable<T>::value) {
439 std::memset(dest, 0, sizeof(T) * n);
444 for (; b != e; ++b) S_construct(b);
447 for (; b >= dest; --b) b->~T();
453 static void S_uninitialized_fill_n(T* dest, size_type n, const T& value) {
457 for (; b != e; ++b) S_construct(b, value);
459 S_destroy_range(dest, b);
464 //---------------------------------------------------------------------------
465 // uninitialized_copy
467 // it is possible to add an optimization for the case where
468 // It = move(T*) and IsRelocatable<T> and Is0Initiailizable<T>
471 template <typename It>
472 void M_uninitialized_copy_e(It first, It last) {
473 D_uninitialized_copy_a(impl_.e_, first, last);
474 impl_.e_ += std::distance(first, last);
477 template <typename It>
478 void M_uninitialized_move_e(It first, It last) {
479 D_uninitialized_move_a(impl_.e_, first, last);
480 impl_.e_ += std::distance(first, last);
484 template <typename It>
485 void D_uninitialized_copy_a(T* dest, It first, It last) {
486 if (usingStdAllocator::value) {
487 if (folly::IsTriviallyCopyable<T>::value) {
488 S_uninitialized_copy_bits(dest, first, last);
490 S_uninitialized_copy(dest, first, last);
493 S_uninitialized_copy_a(impl_, dest, first, last);
497 template <typename It>
498 void D_uninitialized_move_a(T* dest, It first, It last) {
499 D_uninitialized_copy_a(dest,
500 std::make_move_iterator(first), std::make_move_iterator(last));
504 template <typename It>
506 S_uninitialized_copy_a(Allocator& a, T* dest, It first, It last) {
509 for (; first != last; ++first, ++b)
510 std::allocator_traits<Allocator>::construct(a, b, *first);
512 S_destroy_range_a(a, dest, b);
518 template <typename It>
519 static void S_uninitialized_copy(T* dest, It first, It last) {
522 for (; first != last; ++first, ++b)
523 S_construct(b, *first);
525 S_destroy_range(dest, b);
531 S_uninitialized_copy_bits(T* dest, const T* first, const T* last) {
532 std::memcpy(dest, first, (last - first) * sizeof(T));
536 S_uninitialized_copy_bits(T* dest, std::move_iterator<T*> first,
537 std::move_iterator<T*> last) {
538 T* bFirst = first.base();
539 T* bLast = last.base();
540 std::memcpy(dest, bFirst, (bLast - bFirst) * sizeof(T));
543 template <typename It>
545 S_uninitialized_copy_bits(T* dest, It first, It last) {
546 S_uninitialized_copy(dest, first, last);
549 //---------------------------------------------------------------------------
552 // This function is "unsafe": it assumes that the iterator can be advanced at
553 // least n times. However, as a private function, that unsafety is managed
554 // wholly by fbvector itself.
556 template <typename It>
557 static It S_copy_n(T* dest, It first, size_type n) {
559 for (; dest != e; ++dest, ++first) *dest = *first;
563 static const T* S_copy_n(T* dest, const T* first, size_type n) {
564 if (folly::IsTriviallyCopyable<T>::value) {
565 std::memcpy(dest, first, n * sizeof(T));
568 return S_copy_n<const T*>(dest, first, n);
572 static std::move_iterator<T*>
573 S_copy_n(T* dest, std::move_iterator<T*> mIt, size_type n) {
574 if (folly::IsTriviallyCopyable<T>::value) {
575 T* first = mIt.base();
576 std::memcpy(dest, first, n * sizeof(T));
577 return std::make_move_iterator(first + n);
579 return S_copy_n<std::move_iterator<T*>>(dest, mIt, n);
583 //===========================================================================
584 //---------------------------------------------------------------------------
585 // relocation helpers
588 // Relocation is divided into three parts:
591 // Performs the actual movement of data from point a to point b.
594 // Destroys the old data.
597 // Destoys the new data and restores the old data.
599 // The three steps are used because there may be an exception after part 1
600 // has completed. If that is the case, then relocate_undo can nullify the
601 // initial move. Otherwise, relocate_done performs the last bit of tidying
604 // The relocation trio may use either memcpy, move, or copy. It is decided
605 // by the following case statement:
607 // IsRelocatable && usingStdAllocator -> memcpy
608 // has_nothrow_move && usingStdAllocator -> move
609 // cannot copy -> move
612 // If the class is non-copyable then it must be movable. However, if the
613 // move constructor is not noexcept, i.e. an error could be thrown, then
614 // relocate_undo will be unable to restore the old data, for fear of a
615 // second exception being thrown. This is a known and unavoidable
616 // deficiency. In lieu of a strong exception guarantee, relocate_undo does
617 // the next best thing: it provides a weak exception guarantee by
618 // destorying the new data, but leaving the old data in an indeterminate
619 // state. Note that that indeterminate state will be valid, since the
620 // old data has not been destroyed; it has merely been the source of a
621 // move, which is required to leave the source in a valid state.
624 void M_relocate(T* newB) {
625 relocate_move(newB, impl_.b_, impl_.e_);
626 relocate_done(newB, impl_.b_, impl_.e_);
629 // dispatch type trait
630 typedef std::integral_constant<bool,
631 folly::IsRelocatable<T>::value && usingStdAllocator::value
632 > relocate_use_memcpy;
634 typedef std::integral_constant<bool,
635 (std::is_nothrow_move_constructible<T>::value
636 && usingStdAllocator::value)
637 || !std::is_copy_constructible<T>::value
641 void relocate_move(T* dest, T* first, T* last) {
642 relocate_move_or_memcpy(dest, first, last, relocate_use_memcpy());
645 void relocate_move_or_memcpy(T* dest, T* first, T* last, std::true_type) {
646 std::memcpy(dest, first, (last - first) * sizeof(T));
649 void relocate_move_or_memcpy(T* dest, T* first, T* last, std::false_type) {
650 relocate_move_or_copy(dest, first, last, relocate_use_move());
653 void relocate_move_or_copy(T* dest, T* first, T* last, std::true_type) {
654 D_uninitialized_move_a(dest, first, last);
657 void relocate_move_or_copy(T* dest, T* first, T* last, std::false_type) {
658 D_uninitialized_copy_a(dest, first, last);
662 void relocate_done(T* dest, T* first, T* last) noexcept {
663 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
664 // used memcpy; data has been relocated, do not call destructor
666 D_destroy_range_a(first, last);
671 void relocate_undo(T* dest, T* first, T* last) noexcept {
672 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
673 // used memcpy, old data is still valid, nothing to do
674 } else if (std::is_nothrow_move_constructible<T>::value &&
675 usingStdAllocator::value) {
676 // noexcept move everything back, aka relocate_move
677 relocate_move(first, dest, dest + (last - first));
678 } else if (!std::is_copy_constructible<T>::value) {
680 D_destroy_range_a(dest, dest + (last - first));
682 // used copy, old data is still valid
683 D_destroy_range_a(dest, dest + (last - first));
688 //===========================================================================
689 //---------------------------------------------------------------------------
690 // construct/copy/destroy
693 fbvector() = default;
695 explicit fbvector(const Allocator& a) : impl_(a) {}
697 explicit fbvector(size_type n, const Allocator& a = Allocator())
699 { M_uninitialized_fill_n_e(n); }
701 fbvector(size_type n, VT value, const Allocator& a = Allocator())
703 { M_uninitialized_fill_n_e(n, value); }
705 template <class It, class Category = typename
706 std::iterator_traits<It>::iterator_category>
707 fbvector(It first, It last, const Allocator& a = Allocator())
708 : fbvector(first, last, a, Category()) {}
710 fbvector(const fbvector& other)
711 : impl_(other.size(), A::select_on_container_copy_construction(other.impl_))
712 { M_uninitialized_copy_e(other.begin(), other.end()); }
714 fbvector(fbvector&& other) noexcept : impl_(std::move(other.impl_)) {}
716 fbvector(const fbvector& other, const Allocator& a)
717 : fbvector(other.begin(), other.end(), a) {}
719 fbvector(fbvector&& other, const Allocator& a) : impl_(a) {
720 if (impl_ == other.impl_) {
721 impl_.swapData(other.impl_);
723 impl_.init(other.size());
724 M_uninitialized_move_e(other.begin(), other.end());
728 fbvector(std::initializer_list<T> il, const Allocator& a = Allocator())
729 : fbvector(il.begin(), il.end(), a) {}
731 ~fbvector() = default; // the cleanup occurs in impl_
733 fbvector& operator=(const fbvector& other) {
734 if (UNLIKELY(this == &other)) return *this;
736 if (!usingStdAllocator::value &&
737 A::propagate_on_container_copy_assignment::value) {
738 if (impl_ != other.impl_) {
739 // can't use other's different allocator to clean up self
742 (Allocator&)impl_ = (Allocator&)other.impl_;
745 assign(other.begin(), other.end());
749 fbvector& operator=(fbvector&& other) {
750 if (UNLIKELY(this == &other)) return *this;
751 moveFrom(std::move(other), moveIsSwap());
755 fbvector& operator=(std::initializer_list<T> il) {
756 assign(il.begin(), il.end());
760 template <class It, class Category = typename
761 std::iterator_traits<It>::iterator_category>
762 void assign(It first, It last) {
763 assign(first, last, Category());
766 void assign(size_type n, VT value) {
767 if (n > capacity()) {
768 // Not enough space. Do not reserve in place, since we will
769 // discard the old values anyways.
770 if (dataIsInternalAndNotVT(value)) {
771 T copy(std::move(value));
773 M_uninitialized_fill_n_e(n, copy);
776 M_uninitialized_fill_n_e(n, value);
778 } else if (n <= size()) {
779 auto newE = impl_.b_ + n;
780 std::fill(impl_.b_, newE, value);
781 M_destroy_range_e(newE);
783 std::fill(impl_.b_, impl_.e_, value);
784 M_uninitialized_fill_n_e(n - size(), value);
788 void assign(std::initializer_list<T> il) {
789 assign(il.begin(), il.end());
792 allocator_type get_allocator() const noexcept {
798 // contract dispatch for iterator types fbvector(It first, It last)
799 template <class ForwardIterator>
800 fbvector(ForwardIterator first, ForwardIterator last,
801 const Allocator& a, std::forward_iterator_tag)
802 : impl_(std::distance(first, last), a)
803 { M_uninitialized_copy_e(first, last); }
805 template <class InputIterator>
806 fbvector(InputIterator first, InputIterator last,
807 const Allocator& a, std::input_iterator_tag)
809 { for (; first != last; ++first) emplace_back(*first); }
811 // contract dispatch for allocator movement in operator=(fbvector&&)
813 moveFrom(fbvector&& other, std::true_type) {
814 swap(impl_, other.impl_);
816 void moveFrom(fbvector&& other, std::false_type) {
817 if (impl_ == other.impl_) {
818 impl_.swapData(other.impl_);
820 impl_.reset(other.size());
821 M_uninitialized_move_e(other.begin(), other.end());
825 // contract dispatch for iterator types in assign(It first, It last)
826 template <class ForwardIterator>
827 void assign(ForwardIterator first, ForwardIterator last,
828 std::forward_iterator_tag) {
829 auto const newSize = std::distance(first, last);
830 if (newSize > capacity()) {
831 impl_.reset(newSize);
832 M_uninitialized_copy_e(first, last);
833 } else if (newSize <= size()) {
834 auto newEnd = std::copy(first, last, impl_.b_);
835 M_destroy_range_e(newEnd);
837 auto mid = S_copy_n(impl_.b_, first, size());
838 M_uninitialized_copy_e<decltype(last)>(mid, last);
842 template <class InputIterator>
843 void assign(InputIterator first, InputIterator last,
844 std::input_iterator_tag) {
846 for (; first != last && p != impl_.e_; ++first, ++p) {
850 M_destroy_range_e(p);
852 for (; first != last; ++first) emplace_back(*first);
856 // contract dispatch for aliasing under VT optimization
857 bool dataIsInternalAndNotVT(const T& t) {
858 if (should_pass_by_value::value) return false;
859 return dataIsInternal(t);
861 bool dataIsInternal(const T& t) {
862 return UNLIKELY(impl_.b_ <= std::addressof(t) &&
863 std::addressof(t) < impl_.e_);
867 //===========================================================================
868 //---------------------------------------------------------------------------
872 iterator begin() noexcept {
875 const_iterator begin() const noexcept {
878 iterator end() noexcept {
881 const_iterator end() const noexcept {
884 reverse_iterator rbegin() noexcept {
885 return reverse_iterator(end());
887 const_reverse_iterator rbegin() const noexcept {
888 return const_reverse_iterator(end());
890 reverse_iterator rend() noexcept {
891 return reverse_iterator(begin());
893 const_reverse_iterator rend() const noexcept {
894 return const_reverse_iterator(begin());
897 const_iterator cbegin() const noexcept {
900 const_iterator cend() const noexcept {
903 const_reverse_iterator crbegin() const noexcept {
904 return const_reverse_iterator(end());
906 const_reverse_iterator crend() const noexcept {
907 return const_reverse_iterator(begin());
910 //===========================================================================
911 //---------------------------------------------------------------------------
915 size_type size() const noexcept {
916 return impl_.e_ - impl_.b_;
919 size_type max_size() const noexcept {
920 // good luck gettin' there
921 return ~size_type(0);
924 void resize(size_type n) {
926 M_destroy_range_e(impl_.b_ + n);
929 M_uninitialized_fill_n_e(n - size());
933 void resize(size_type n, VT t) {
935 M_destroy_range_e(impl_.b_ + n);
936 } else if (dataIsInternalAndNotVT(t) && n > capacity()) {
939 M_uninitialized_fill_n_e(n - size(), copy);
942 M_uninitialized_fill_n_e(n - size(), t);
946 size_type capacity() const noexcept {
947 return impl_.z_ - impl_.b_;
950 bool empty() const noexcept {
951 return impl_.b_ == impl_.e_;
954 void reserve(size_type n) {
955 if (n <= capacity()) return;
956 if (impl_.b_ && reserve_in_place(n)) return;
958 auto newCap = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
959 auto newB = M_allocate(newCap);
963 M_deallocate(newB, newCap);
967 M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
968 impl_.z_ = newB + newCap;
969 impl_.e_ = newB + (impl_.e_ - impl_.b_);
973 void shrink_to_fit() noexcept {
974 auto const newCapacityBytes = folly::goodMallocSize(size() * sizeof(T));
975 auto const newCap = newCapacityBytes / sizeof(T);
976 auto const oldCap = capacity();
978 if (newCap >= oldCap) return;
981 if ((rallocm && usingStdAllocator::value) &&
982 newCapacityBytes >= folly::jemallocMinInPlaceExpandable &&
983 rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
985 impl_.z_ += newCap - oldCap;
987 T* newB; // intentionally uninitialized
989 newB = M_allocate(newCap);
993 M_deallocate(newB, newCap);
994 return; // swallow the error
1000 M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
1001 impl_.z_ = newB + newCap;
1002 impl_.e_ = newB + (impl_.e_ - impl_.b_);
1009 bool reserve_in_place(size_type n) {
1010 if (!usingStdAllocator::value || !rallocm) return false;
1012 // jemalloc can never grow in place blocks smaller than 4096 bytes.
1013 if ((impl_.z_ - impl_.b_) * sizeof(T) <
1014 folly::jemallocMinInPlaceExpandable) return false;
1016 auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T));
1018 if (rallocm(&p, nullptr, newCapacityBytes, 0, ALLOCM_NO_MOVE)
1019 == ALLOCM_SUCCESS) {
1020 impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T);
1026 //===========================================================================
1027 //---------------------------------------------------------------------------
1031 reference operator[](size_type n) {
1035 const_reference operator[](size_type n) const {
1039 const_reference at(size_type n) const {
1040 if (UNLIKELY(n >= size())) {
1041 throw std::out_of_range("fbvector: index is greater than size.");
1045 reference at(size_type n) {
1046 auto const& cThis = *this;
1047 return const_cast<reference>(cThis.at(n));
1053 const_reference front() const {
1059 return impl_.e_[-1];
1061 const_reference back() const {
1063 return impl_.e_[-1];
1066 //===========================================================================
1067 //---------------------------------------------------------------------------
1071 T* data() noexcept {
1074 const T* data() const noexcept {
1078 //===========================================================================
1079 //---------------------------------------------------------------------------
1080 // modifiers (common)
1083 template <class... Args>
1084 void emplace_back(Args&&... args) {
1085 if (impl_.e_ != impl_.z_) {
1086 M_construct(impl_.e_, std::forward<Args>(args)...);
1089 emplace_back_aux(std::forward<Args>(args)...);
1094 push_back(const T& value) {
1095 if (impl_.e_ != impl_.z_) {
1096 M_construct(impl_.e_, value);
1099 emplace_back_aux(value);
1104 push_back(T&& value) {
1105 if (impl_.e_ != impl_.z_) {
1106 M_construct(impl_.e_, std::move(value));
1109 emplace_back_aux(std::move(value));
1116 M_destroy(impl_.e_);
1119 void swap(fbvector& other) noexcept {
1120 if (!usingStdAllocator::value &&
1121 A::propagate_on_container_swap::value)
1122 swap(impl_, other.impl_);
1123 else impl_.swapData(other.impl_);
1126 void clear() noexcept {
1127 M_destroy_range_e(impl_.b_);
1132 // std::vector implements a similar function with a different growth
1133 // strategy: empty() ? 1 : capacity() * 2.
1135 // fbvector grows differently on two counts:
1138 // Instead of grwoing to size 1 from empty, and fbvector allocates at
1139 // least 64 bytes. You may still use reserve to reserve a lesser amount
1142 // For medium-sized vectors, the growth strategy is 1.5x. See the docs
1144 // This does not apply to very small or very large fbvectors. This is a
1146 // A nice addition to fbvector would be the capability of having a user-
1147 // defined growth strategy, probably as part of the allocator.
1150 size_type computePushBackCapacity() const {
1151 return empty() ? std::max(64 / sizeof(T), size_type(1))
1152 : capacity() < folly::jemallocMinInPlaceExpandable / sizeof(T)
1154 : sizeof(T) > folly::jemallocMinInPlaceExpandable / 2 && capacity() == 1
1156 : capacity() > 4096 * 32 / sizeof(T)
1158 : (capacity() * 3 + 1) / 2;
1161 template <class... Args>
1162 void emplace_back_aux(Args&&... args);
1164 //===========================================================================
1165 //---------------------------------------------------------------------------
1166 // modifiers (erase)
1169 iterator erase(const_iterator position) {
1170 return erase(position, position + 1);
1173 iterator erase(const_iterator first, const_iterator last) {
1174 assert(isValid(first) && isValid(last));
1175 assert(first <= last);
1176 if (first != last) {
1177 if (last == end()) {
1178 M_destroy_range_e((iterator)first);
1180 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1181 D_destroy_range_a((iterator)first, (iterator)last);
1182 if (last - first >= cend() - last) {
1183 std::memcpy((iterator)first, last, (cend() - last) * sizeof(T));
1185 std::memmove((iterator)first, last, (cend() - last) * sizeof(T));
1187 impl_.e_ -= (last - first);
1189 std::copy(std::make_move_iterator((iterator)last),
1190 std::make_move_iterator(end()), (iterator)first);
1191 auto newEnd = impl_.e_ - std::distance(first, last);
1192 M_destroy_range_e(newEnd);
1196 return (iterator)first;
1199 //===========================================================================
1200 //---------------------------------------------------------------------------
1201 // modifiers (insert)
1202 private: // we have the private section first because it defines some macros
1204 bool isValid(const_iterator it) {
1205 return cbegin() <= it && it <= cend();
1208 size_type computeInsertCapacity(size_type n) {
1209 size_type nc = std::max(computePushBackCapacity(), size() + n);
1210 size_type ac = folly::goodMallocSize(nc * sizeof(T)) / sizeof(T);
1214 //---------------------------------------------------------------------------
1216 // make_window takes an fbvector, and creates an uninitialized gap (a
1217 // window) at the given position, of the given size. The fbvector must
1218 // have enough capacity.
1220 // Explanation by picture.
1224 // make_window here of size 3
1228 // If something goes wrong and the window must be destroyed, use
1229 // undo_window to provide a weak exception guarantee. It destroys
1234 //---------------------------------------------------------------------------
1236 // wrap_frame takes an inverse window and relocates an fbvector around it.
1237 // The fbvector must have at least as many elements as the left ledge.
1239 // Explanation by picture.
1242 // fbvector: inverse window:
1243 // 123456789______ _____abcde_______
1247 // _______________ 12345abcde6789___
1249 //---------------------------------------------------------------------------
1251 // insert_use_fresh_memory returns true iff the fbvector should use a fresh
1252 // block of memory for the insertion. If the fbvector does not have enough
1253 // spare capacity, then it must return true. Otherwise either true or false
1256 //---------------------------------------------------------------------------
1258 // These three functions, make_window, wrap_frame, and
1259 // insert_use_fresh_memory, can be combined into a uniform interface.
1260 // Since that interface involves a lot of case-work, it is built into
1261 // some macros: FOLLY_FBVECTOR_INSERT_(START|TRY|END)
1262 // Macros are used in an attempt to let GCC perform better optimizations,
1263 // especially control flow optimization.
1266 //---------------------------------------------------------------------------
1269 void make_window(iterator position, size_type n) {
1270 assert(isValid(position));
1271 assert(size() + n <= capacity());
1274 auto tail = std::distance(position, impl_.e_);
1277 relocate_move(position + n, position, impl_.e_);
1278 relocate_done(position + n, position, impl_.e_);
1281 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1282 std::memmove(position + n, position, tail * sizeof(T));
1285 D_uninitialized_move_a(impl_.e_, impl_.e_ - n, impl_.e_);
1287 std::copy_backward(std::make_move_iterator(position),
1288 std::make_move_iterator(impl_.e_ - n), impl_.e_);
1290 D_destroy_range_a(impl_.e_ - n, impl_.e_ + n);
1295 D_destroy_range_a(position, position + n);
1300 void undo_window(iterator position, size_type n) noexcept {
1301 D_destroy_range_a(position + n, impl_.e_);
1302 impl_.e_ = position;
1305 //---------------------------------------------------------------------------
1308 void wrap_frame(T* ledge, size_type idx, size_type n) {
1309 assert(size() >= idx);
1312 relocate_move(ledge, impl_.b_, impl_.b_ + idx);
1314 relocate_move(ledge + idx + n, impl_.b_ + idx, impl_.e_);
1316 relocate_undo(ledge, impl_.b_, impl_.b_ + idx);
1319 relocate_done(ledge, impl_.b_, impl_.b_ + idx);
1320 relocate_done(ledge + idx + n, impl_.b_ + idx, impl_.e_);
1323 //---------------------------------------------------------------------------
1326 bool insert_use_fresh(const_iterator cposition, size_type n) {
1327 if (cposition == cend()) {
1328 if (size() + n <= capacity()) return false;
1329 if (reserve_in_place(size() + n)) return false;
1333 if (size() + n > capacity()) return true;
1338 //---------------------------------------------------------------------------
1341 #define FOLLY_FBVECTOR_INSERT_START(cpos, n) \
1342 assert(isValid(cpos)); \
1343 T* position = const_cast<T*>(cpos); \
1344 size_type idx = std::distance(impl_.b_, position); \
1345 bool fresh = insert_use_fresh(position, n); \
1347 size_type newCap = 0; \
1350 newCap = computeInsertCapacity(n); \
1351 b = M_allocate(newCap); \
1353 make_window(position, n); \
1357 T* start = b + idx; \
1361 // construct the inserted elements
1363 #define FOLLY_FBVECTOR_INSERT_TRY(cpos, n) \
1366 M_deallocate(b, newCap); \
1368 undo_window(position, n); \
1375 wrap_frame(b, idx, n); \
1379 // delete the inserted elements (exception has been thrown)
1381 #define FOLLY_FBVECTOR_INSERT_END(cpos, n) \
1382 M_deallocate(b, newCap); \
1385 if (impl_.b_) M_deallocate(impl_.b_, capacity()); \
1386 impl_.set(b, size() + n, newCap); \
1387 return impl_.b_ + idx; \
1392 //---------------------------------------------------------------------------
1396 template <class... Args>
1397 iterator emplace(const_iterator cpos, Args&&... args) {
1398 FOLLY_FBVECTOR_INSERT_START(cpos, 1)
1399 M_construct(start, std::forward<Args>(args)...);
1400 FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
1402 FOLLY_FBVECTOR_INSERT_END(cpos, 1)
1405 iterator insert(const_iterator cpos, const T& value) {
1406 if (dataIsInternal(value)) return insert(cpos, T(value));
1408 FOLLY_FBVECTOR_INSERT_START(cpos, 1)
1409 M_construct(start, value);
1410 FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
1412 FOLLY_FBVECTOR_INSERT_END(cpos, 1)
1415 iterator insert(const_iterator cpos, T&& value) {
1416 if (dataIsInternal(value)) return insert(cpos, T(std::move(value)));
1418 FOLLY_FBVECTOR_INSERT_START(cpos, 1)
1419 M_construct(start, std::move(value));
1420 FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
1422 FOLLY_FBVECTOR_INSERT_END(cpos, 1)
1425 iterator insert(const_iterator cpos, size_type n, VT value) {
1426 if (n == 0) return (iterator)cpos;
1427 if (dataIsInternalAndNotVT(value)) return insert(cpos, n, T(value));
1429 FOLLY_FBVECTOR_INSERT_START(cpos, n)
1430 D_uninitialized_fill_n_a(start, n, value);
1431 FOLLY_FBVECTOR_INSERT_TRY(cpos, n)
1432 D_destroy_range_a(start, start + n);
1433 FOLLY_FBVECTOR_INSERT_END(cpos, n)
1436 template <class It, class Category = typename
1437 std::iterator_traits<It>::iterator_category>
1438 iterator insert(const_iterator cpos, It first, It last) {
1439 return insert(cpos, first, last, Category());
1442 iterator insert(const_iterator cpos, std::initializer_list<T> il) {
1443 return insert(cpos, il.begin(), il.end());
1446 //---------------------------------------------------------------------------
1447 // insert dispatch for iterator types
1450 template <class FIt>
1451 iterator insert(const_iterator cpos, FIt first, FIt last,
1452 std::forward_iterator_tag) {
1453 size_type n = std::distance(first, last);
1454 if (n == 0) return (iterator)cpos;
1456 FOLLY_FBVECTOR_INSERT_START(cpos, n)
1457 D_uninitialized_copy_a(start, first, last);
1458 FOLLY_FBVECTOR_INSERT_TRY(cpos, n)
1459 D_destroy_range_a(start, start + n);
1460 FOLLY_FBVECTOR_INSERT_END(cpos, n)
1463 template <class IIt>
1464 iterator insert(const_iterator cpos, IIt first, IIt last,
1465 std::input_iterator_tag) {
1466 T* position = const_cast<T*>(cpos);
1467 assert(isValid(position));
1468 size_type idx = std::distance(begin(), position);
1470 fbvector storage(std::make_move_iterator(position),
1471 std::make_move_iterator(end()),
1472 A::select_on_container_copy_construction(impl_));
1473 M_destroy_range_e(position);
1474 for (; first != last; ++first) emplace_back(*first);
1475 insert(cend(), std::make_move_iterator(storage.begin()),
1476 std::make_move_iterator(storage.end()));
1477 return impl_.b_ + idx;
1480 //===========================================================================
1481 //---------------------------------------------------------------------------
1482 // lexicographical functions (others from boost::totally_ordered superclass)
1485 bool operator==(const fbvector& other) const {
1486 return size() == other.size() && std::equal(begin(), end(), other.begin());
1489 bool operator<(const fbvector& other) const {
1490 return std::lexicographical_compare(
1491 begin(), end(), other.begin(), other.end());
1494 //===========================================================================
1495 //---------------------------------------------------------------------------
1499 template <class _T, class _A>
1500 friend _T* relinquish(fbvector<_T, _A>&);
1502 template <class _T, class _A>
1503 friend void attach(fbvector<_T, _A>&, _T* data, size_t sz, size_t cap);
1505 }; // class fbvector
1508 //=============================================================================
1509 //-----------------------------------------------------------------------------
1510 // outlined functions (gcc, you finicky compiler you)
1512 template <typename T, typename Allocator>
1513 template <class... Args>
1514 void fbvector<T, Allocator>::emplace_back_aux(Args&&... args) {
1515 size_type byte_sz = folly::goodMallocSize(
1516 computePushBackCapacity() * sizeof(T));
1517 if (usingStdAllocator::value
1519 && ((impl_.z_ - impl_.b_) * sizeof(T) >=
1520 folly::jemallocMinInPlaceExpandable)) {
1521 // Try to reserve in place.
1522 // Ask rallocm to allocate in place at least size()+1 and at most sz space.
1523 // rallocm will allocate as much as possible within that range, which
1524 // is the best possible outcome: if sz space is available, take it all,
1525 // otherwise take as much as possible. If nothing is available, then fail.
1526 // In this fashion, we never relocate if there is a possibility of
1527 // expanding in place, and we never relocate by less than the desired
1528 // amount unless we cannot expand further. Hence we will not relocate
1529 // sub-optimally twice in a row (modulo the blocking memory being freed).
1530 size_type lower = folly::goodMallocSize(sizeof(T) + size() * sizeof(T));
1531 size_type upper = byte_sz;
1532 size_type extra = upper - lower;
1537 if (rallocm(&p, &actual, lower, extra, ALLOCM_NO_MOVE)
1538 == ALLOCM_SUCCESS) {
1539 impl_.z_ = impl_.b_ + actual / sizeof(T);
1540 M_construct(impl_.e_, std::forward<Args>(args)...);
1546 // Reallocation failed. Perform a manual relocation.
1547 size_type sz = byte_sz / sizeof(T);
1548 auto newB = M_allocate(sz);
1549 auto newE = newB + size();
1551 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1552 // For linear memory access, relocate before construction.
1553 // By the test condition, relocate is noexcept.
1554 // Note that there is no cleanup to do if M_construct throws - that's
1555 // one of the beauties of relocation.
1556 // Benchmarks for this code have high variance, and seem to be close.
1557 relocate_move(newB, impl_.b_, impl_.e_);
1558 M_construct(newE, std::forward<Args>(args)...);
1561 M_construct(newE, std::forward<Args>(args)...);
1566 M_destroy(newE - 1);
1571 M_deallocate(newB, sz);
1574 if (impl_.b_) M_deallocate(impl_.b_, size());
1577 impl_.z_ = newB + sz;
1580 //=============================================================================
1581 //-----------------------------------------------------------------------------
1582 // specialized functions
1584 template <class T, class A>
1585 void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) noexcept {
1589 //=============================================================================
1590 //-----------------------------------------------------------------------------
1593 template <class T, class A>
1594 void compactResize(fbvector<T, A>* v, size_t sz) {
1601 // relinquish and attach are not a members function specifically so that it is
1602 // awkward to call them. It is very easy to shoot yourself in the foot with
1605 // If you call relinquish, then it is your responsibility to free the data
1606 // and the storage, both of which may have been generated in a non-standard
1607 // way through the fbvector's allocator.
1609 // If you call attach, it is your responsibility to ensure that the fbvector
1610 // is fresh (size and capacity both zero), and that the supplied data is
1611 // capable of being manipulated by the allocator.
1612 // It is acceptable to supply a stack pointer IF:
1613 // (1) The vector's data does not outlive the stack pointer. This includes
1614 // extension of the data's life through a move operation.
1615 // (2) The pointer has enough capacity that the vector will never be
1617 // (3) Insert is not called on the vector; these functions have leeway to
1618 // relocate the vector even if there is enough capacity.
1619 // (4) A stack pointer is compatible with the fbvector's allocator.
1622 template <class T, class A>
1623 T* relinquish(fbvector<T, A>& v) {
1625 v.impl_.b_ = v.impl_.e_ = v.impl_.z_ = nullptr;
1629 template <class T, class A>
1630 void attach(fbvector<T, A>& v, T* data, size_t sz, size_t cap) {
1631 assert(v.data() == nullptr);
1633 v.impl_.e_ = data + sz;
1634 v.impl_.z_ = data + cap;
1637 } // namespace folly
1639 #endif // FOLLY_FBVECTOR_H