2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Nicholas Ormrod (njormrod)
19 * Andrei Alexandrescu (aalexandre)
21 * FBVector is Facebook's drop-in implementation of std::vector. It has special
22 * optimizations for use with relocatable types and jemalloc.
25 #ifndef FOLLY_FBVECTOR_H
26 #define FOLLY_FBVECTOR_H
28 //=============================================================================
36 #include <type_traits>
39 #include <folly/FormatTraits.h>
40 #include <folly/Likely.h>
41 #include <folly/Malloc.h>
42 #include <folly/Traits.h>
44 #include <boost/operators.hpp>
46 //=============================================================================
47 // forward declaration
50 template <class T, class Allocator = std::allocator<T>>
54 //=============================================================================
57 #define FOLLY_FBV_UNROLL_PTR(first, last, OP) do { \
58 for (; (last) - (first) >= 4; (first) += 4) { \
64 for (; (first) != (last); ++(first)) OP((first)); \
67 //=============================================================================
68 ///////////////////////////////////////////////////////////////////////////////
72 ///////////////////////////////////////////////////////////////////////////////
76 template <class T, class Allocator>
77 class fbvector : private boost::totally_ordered<fbvector<T, Allocator>> {
79 //===========================================================================
80 //---------------------------------------------------------------------------
84 typedef std::allocator_traits<Allocator> A;
86 struct Impl : public Allocator {
88 typedef typename A::pointer pointer;
89 typedef typename A::size_type size_type;
95 Impl() : Allocator(), b_(nullptr), e_(nullptr), z_(nullptr) {}
96 /* implicit */ Impl(const Allocator& a)
97 : Allocator(a), b_(nullptr), e_(nullptr), z_(nullptr) {}
98 /* implicit */ Impl(Allocator&& a)
99 : Allocator(std::move(a)), b_(nullptr), e_(nullptr), z_(nullptr) {}
101 /* implicit */ Impl(size_type n, const Allocator& a = Allocator())
105 Impl(Impl&& other) noexcept
106 : Allocator(std::move(other)),
107 b_(other.b_), e_(other.e_), z_(other.z_)
108 { other.b_ = other.e_ = other.z_ = nullptr; }
116 // note that 'allocate' and 'deallocate' are inherited from Allocator
117 T* D_allocate(size_type n) {
118 if (usingStdAllocator::value) {
119 return static_cast<T*>(malloc(n * sizeof(T)));
121 return std::allocator_traits<Allocator>::allocate(*this, n);
125 void D_deallocate(T* p, size_type n) noexcept {
126 if (usingStdAllocator::value) {
129 std::allocator_traits<Allocator>::deallocate(*this, p, n);
134 void swapData(Impl& other) {
135 std::swap(b_, other.b_);
136 std::swap(e_, other.e_);
137 std::swap(z_, other.z_);
141 inline void destroy() noexcept {
143 // THIS DISPATCH CODE IS DUPLICATED IN fbvector::D_destroy_range_a.
144 // It has been inlined here for speed. It calls the static fbvector
145 // methods to perform the actual destruction.
146 if (usingStdAllocator::value) {
147 S_destroy_range(b_, e_);
149 S_destroy_range_a(*this, b_, e_);
152 D_deallocate(b_, z_ - b_);
156 void init(size_type n) {
157 if (UNLIKELY(n == 0)) {
158 b_ = e_ = z_ = nullptr;
160 size_type sz = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
168 set(pointer newB, size_type newSize, size_type newCap) {
174 void reset(size_type newCap) {
183 void reset() { // same as reset(0)
185 b_ = e_ = z_ = nullptr;
189 static void swap(Impl& a, Impl& b) {
191 if (!usingStdAllocator::value) swap<Allocator>(a, b);
195 //===========================================================================
196 //---------------------------------------------------------------------------
197 // types and constants
200 typedef T value_type;
201 typedef value_type& reference;
202 typedef const value_type& const_reference;
204 typedef const T* const_iterator;
205 typedef size_t size_type;
206 typedef typename std::make_signed<size_type>::type difference_type;
207 typedef Allocator allocator_type;
208 typedef typename A::pointer pointer;
209 typedef typename A::const_pointer const_pointer;
210 typedef std::reverse_iterator<iterator> reverse_iterator;
211 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
215 typedef std::integral_constant<bool,
216 boost::has_trivial_copy_constructor<T>::value &&
217 sizeof(T) <= 16 // don't force large structures to be passed by value
218 > should_pass_by_value;
219 typedef typename std::conditional<
220 should_pass_by_value::value, T, const T&>::type VT;
221 typedef typename std::conditional<
222 should_pass_by_value::value, T, T&&>::type MT;
224 typedef std::integral_constant<bool,
225 std::is_same<Allocator, std::allocator<T>>::value> usingStdAllocator;
226 typedef std::integral_constant<bool,
227 usingStdAllocator::value ||
228 A::propagate_on_container_move_assignment::value> moveIsSwap;
230 //===========================================================================
231 //---------------------------------------------------------------------------
235 //---------------------------------------------------------------------------
238 T* M_allocate(size_type n) {
239 return impl_.D_allocate(n);
242 //---------------------------------------------------------------------------
245 void M_deallocate(T* p, size_type n) noexcept {
246 impl_.D_deallocate(p, n);
249 //---------------------------------------------------------------------------
252 // GCC is very sensitive to the exact way that construct is called. For
253 // that reason there are several different specializations of construct.
255 template <typename U, typename... Args>
256 void M_construct(U* p, Args&&... args) {
257 if (usingStdAllocator::value) {
258 new (p) U(std::forward<Args>(args)...);
260 std::allocator_traits<Allocator>::construct(
261 impl_, p, std::forward<Args>(args)...);
265 template <typename U, typename... Args>
266 static void S_construct(U* p, Args&&... args) {
267 new (p) U(std::forward<Args>(args)...);
270 template <typename U, typename... Args>
271 static void S_construct_a(Allocator& a, U* p, Args&&... args) {
272 std::allocator_traits<Allocator>::construct(
273 a, p, std::forward<Args>(args)...);
276 // scalar optimization
277 // TODO we can expand this optimization to: default copyable and assignable
278 template <typename U, typename Enable = typename
279 std::enable_if<std::is_scalar<U>::value>::type>
280 void M_construct(U* p, U arg) {
281 if (usingStdAllocator::value) {
284 std::allocator_traits<Allocator>::construct(impl_, p, arg);
288 template <typename U, typename Enable = typename
289 std::enable_if<std::is_scalar<U>::value>::type>
290 static void S_construct(U* p, U arg) {
294 template <typename U, typename Enable = typename
295 std::enable_if<std::is_scalar<U>::value>::type>
296 static void S_construct_a(Allocator& a, U* p, U arg) {
297 std::allocator_traits<Allocator>::construct(a, p, arg);
300 // const& optimization
301 template <typename U, typename Enable = typename
302 std::enable_if<!std::is_scalar<U>::value>::type>
303 void M_construct(U* p, const U& value) {
304 if (usingStdAllocator::value) {
307 std::allocator_traits<Allocator>::construct(impl_, p, value);
311 template <typename U, typename Enable = typename
312 std::enable_if<!std::is_scalar<U>::value>::type>
313 static void S_construct(U* p, const U& value) {
317 template <typename U, typename Enable = typename
318 std::enable_if<!std::is_scalar<U>::value>::type>
319 static void S_construct_a(Allocator& a, U* p, const U& value) {
320 std::allocator_traits<Allocator>::construct(a, p, value);
323 //---------------------------------------------------------------------------
326 void M_destroy(T* p) noexcept {
327 if (usingStdAllocator::value) {
328 if (!boost::has_trivial_destructor<T>::value) p->~T();
330 std::allocator_traits<Allocator>::destroy(impl_, p);
334 //===========================================================================
335 //---------------------------------------------------------------------------
336 // algorithmic helpers
339 //---------------------------------------------------------------------------
343 void M_destroy_range_e(T* pos) noexcept {
344 D_destroy_range_a(pos, impl_.e_);
349 // THIS DISPATCH CODE IS DUPLICATED IN IMPL. SEE IMPL FOR DETAILS.
350 void D_destroy_range_a(T* first, T* last) noexcept {
351 if (usingStdAllocator::value) {
352 S_destroy_range(first, last);
354 S_destroy_range_a(impl_, first, last);
359 static void S_destroy_range_a(Allocator& a, T* first, T* last) noexcept {
360 for (; first != last; ++first)
361 std::allocator_traits<Allocator>::destroy(a, first);
365 static void S_destroy_range(T* first, T* last) noexcept {
366 if (!boost::has_trivial_destructor<T>::value) {
367 // EXPERIMENTAL DATA on fbvector<vector<int>> (where each vector<int> has
369 // The unrolled version seems to work faster for small to medium sized
370 // fbvectors. It gets a 10% speedup on fbvectors of size 1024, 64, and
372 // The simple loop version seems to work faster for large fbvectors. The
373 // unrolled version is about 6% slower on fbvectors on size 16384.
374 // The two methods seem tied for very large fbvectors. The unrolled
375 // version is about 0.5% slower on size 262144.
377 // for (; first != last; ++first) first->~T();
378 #define FOLLY_FBV_OP(p) (p)->~T()
379 FOLLY_FBV_UNROLL_PTR(first, last, FOLLY_FBV_OP)
384 //---------------------------------------------------------------------------
385 // uninitialized_fill_n
388 void M_uninitialized_fill_n_e(size_type sz) {
389 D_uninitialized_fill_n_a(impl_.e_, sz);
393 void M_uninitialized_fill_n_e(size_type sz, VT value) {
394 D_uninitialized_fill_n_a(impl_.e_, sz, value);
399 void D_uninitialized_fill_n_a(T* dest, size_type sz) {
400 if (usingStdAllocator::value) {
401 S_uninitialized_fill_n(dest, sz);
403 S_uninitialized_fill_n_a(impl_, dest, sz);
407 void D_uninitialized_fill_n_a(T* dest, size_type sz, VT value) {
408 if (usingStdAllocator::value) {
409 S_uninitialized_fill_n(dest, sz, value);
411 S_uninitialized_fill_n_a(impl_, dest, sz, value);
416 template <typename... Args>
417 static void S_uninitialized_fill_n_a(Allocator& a, T* dest,
418 size_type sz, Args&&... args) {
423 std::allocator_traits<Allocator>::construct(a, b,
424 std::forward<Args>(args)...);
426 S_destroy_range_a(a, dest, b);
432 static void S_uninitialized_fill_n(T* dest, size_type n) {
433 if (folly::IsZeroInitializable<T>::value) {
434 std::memset(dest, 0, sizeof(T) * n);
439 for (; b != e; ++b) S_construct(b);
442 for (; b >= dest; --b) b->~T();
448 static void S_uninitialized_fill_n(T* dest, size_type n, const T& value) {
452 for (; b != e; ++b) S_construct(b, value);
454 S_destroy_range(dest, b);
459 //---------------------------------------------------------------------------
460 // uninitialized_copy
462 // it is possible to add an optimization for the case where
463 // It = move(T*) and IsRelocatable<T> and Is0Initiailizable<T>
466 template <typename It>
467 void M_uninitialized_copy_e(It first, It last) {
468 D_uninitialized_copy_a(impl_.e_, first, last);
469 impl_.e_ += std::distance(first, last);
472 template <typename It>
473 void M_uninitialized_move_e(It first, It last) {
474 D_uninitialized_move_a(impl_.e_, first, last);
475 impl_.e_ += std::distance(first, last);
479 template <typename It>
480 void D_uninitialized_copy_a(T* dest, It first, It last) {
481 if (usingStdAllocator::value) {
482 if (folly::IsTriviallyCopyable<T>::value) {
483 S_uninitialized_copy_bits(dest, first, last);
485 S_uninitialized_copy(dest, first, last);
488 S_uninitialized_copy_a(impl_, dest, first, last);
492 template <typename It>
493 void D_uninitialized_move_a(T* dest, It first, It last) {
494 D_uninitialized_copy_a(dest,
495 std::make_move_iterator(first), std::make_move_iterator(last));
499 template <typename It>
501 S_uninitialized_copy_a(Allocator& a, T* dest, It first, It last) {
504 for (; first != last; ++first, ++b)
505 std::allocator_traits<Allocator>::construct(a, b, *first);
507 S_destroy_range_a(a, dest, b);
513 template <typename It>
514 static void S_uninitialized_copy(T* dest, It first, It last) {
517 for (; first != last; ++first, ++b)
518 S_construct(b, *first);
520 S_destroy_range(dest, b);
526 S_uninitialized_copy_bits(T* dest, const T* first, const T* last) {
527 std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T));
531 S_uninitialized_copy_bits(T* dest, std::move_iterator<T*> first,
532 std::move_iterator<T*> last) {
533 T* bFirst = first.base();
534 T* bLast = last.base();
535 std::memcpy((void*)dest, (void*)bFirst, (bLast - bFirst) * sizeof(T));
538 template <typename It>
540 S_uninitialized_copy_bits(T* dest, It first, It last) {
541 S_uninitialized_copy(dest, first, last);
544 //---------------------------------------------------------------------------
547 // This function is "unsafe": it assumes that the iterator can be advanced at
548 // least n times. However, as a private function, that unsafety is managed
549 // wholly by fbvector itself.
551 template <typename It>
552 static It S_copy_n(T* dest, It first, size_type n) {
554 for (; dest != e; ++dest, ++first) *dest = *first;
558 static const T* S_copy_n(T* dest, const T* first, size_type n) {
559 if (folly::IsTriviallyCopyable<T>::value) {
560 std::memcpy((void*)dest, (void*)first, n * sizeof(T));
563 return S_copy_n<const T*>(dest, first, n);
567 static std::move_iterator<T*>
568 S_copy_n(T* dest, std::move_iterator<T*> mIt, size_type n) {
569 if (folly::IsTriviallyCopyable<T>::value) {
570 T* first = mIt.base();
571 std::memcpy((void*)dest, (void*)first, n * sizeof(T));
572 return std::make_move_iterator(first + n);
574 return S_copy_n<std::move_iterator<T*>>(dest, mIt, n);
578 //===========================================================================
579 //---------------------------------------------------------------------------
580 // relocation helpers
583 // Relocation is divided into three parts:
586 // Performs the actual movement of data from point a to point b.
589 // Destroys the old data.
592 // Destoys the new data and restores the old data.
594 // The three steps are used because there may be an exception after part 1
595 // has completed. If that is the case, then relocate_undo can nullify the
596 // initial move. Otherwise, relocate_done performs the last bit of tidying
599 // The relocation trio may use either memcpy, move, or copy. It is decided
600 // by the following case statement:
602 // IsRelocatable && usingStdAllocator -> memcpy
603 // has_nothrow_move && usingStdAllocator -> move
604 // cannot copy -> move
607 // If the class is non-copyable then it must be movable. However, if the
608 // move constructor is not noexcept, i.e. an error could be thrown, then
609 // relocate_undo will be unable to restore the old data, for fear of a
610 // second exception being thrown. This is a known and unavoidable
611 // deficiency. In lieu of a strong exception guarantee, relocate_undo does
612 // the next best thing: it provides a weak exception guarantee by
613 // destorying the new data, but leaving the old data in an indeterminate
614 // state. Note that that indeterminate state will be valid, since the
615 // old data has not been destroyed; it has merely been the source of a
616 // move, which is required to leave the source in a valid state.
619 void M_relocate(T* newB) {
620 relocate_move(newB, impl_.b_, impl_.e_);
621 relocate_done(newB, impl_.b_, impl_.e_);
624 // dispatch type trait
625 typedef std::integral_constant<bool,
626 folly::IsRelocatable<T>::value && usingStdAllocator::value
627 > relocate_use_memcpy;
629 typedef std::integral_constant<bool,
630 (std::is_nothrow_move_constructible<T>::value
631 && usingStdAllocator::value)
632 || !std::is_copy_constructible<T>::value
636 void relocate_move(T* dest, T* first, T* last) {
637 relocate_move_or_memcpy(dest, first, last, relocate_use_memcpy());
640 void relocate_move_or_memcpy(T* dest, T* first, T* last, std::true_type) {
641 std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T));
644 void relocate_move_or_memcpy(T* dest, T* first, T* last, std::false_type) {
645 relocate_move_or_copy(dest, first, last, relocate_use_move());
648 void relocate_move_or_copy(T* dest, T* first, T* last, std::true_type) {
649 D_uninitialized_move_a(dest, first, last);
652 void relocate_move_or_copy(T* dest, T* first, T* last, std::false_type) {
653 D_uninitialized_copy_a(dest, first, last);
657 void relocate_done(T* /*dest*/, T* first, T* last) noexcept {
658 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
659 // used memcpy; data has been relocated, do not call destructor
661 D_destroy_range_a(first, last);
666 void relocate_undo(T* dest, T* first, T* last) noexcept {
667 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
668 // used memcpy, old data is still valid, nothing to do
669 } else if (std::is_nothrow_move_constructible<T>::value &&
670 usingStdAllocator::value) {
671 // noexcept move everything back, aka relocate_move
672 relocate_move(first, dest, dest + (last - first));
673 } else if (!std::is_copy_constructible<T>::value) {
675 D_destroy_range_a(dest, dest + (last - first));
677 // used copy, old data is still valid
678 D_destroy_range_a(dest, dest + (last - first));
683 //===========================================================================
684 //---------------------------------------------------------------------------
685 // construct/copy/destroy
688 fbvector() = default;
690 explicit fbvector(const Allocator& a) : impl_(a) {}
692 explicit fbvector(size_type n, const Allocator& a = Allocator())
694 { M_uninitialized_fill_n_e(n); }
696 fbvector(size_type n, VT value, const Allocator& a = Allocator())
698 { M_uninitialized_fill_n_e(n, value); }
700 template <class It, class Category = typename
701 std::iterator_traits<It>::iterator_category>
702 fbvector(It first, It last, const Allocator& a = Allocator())
703 : fbvector(first, last, a, Category()) {}
705 fbvector(const fbvector& other)
706 : impl_(other.size(), A::select_on_container_copy_construction(other.impl_))
707 { M_uninitialized_copy_e(other.begin(), other.end()); }
709 fbvector(fbvector&& other) noexcept : impl_(std::move(other.impl_)) {}
711 fbvector(const fbvector& other, const Allocator& a)
712 : fbvector(other.begin(), other.end(), a) {}
714 /* may throw */ fbvector(fbvector&& other, const Allocator& a) : impl_(a) {
715 if (impl_ == other.impl_) {
716 impl_.swapData(other.impl_);
718 impl_.init(other.size());
719 M_uninitialized_move_e(other.begin(), other.end());
723 fbvector(std::initializer_list<T> il, const Allocator& a = Allocator())
724 : fbvector(il.begin(), il.end(), a) {}
726 ~fbvector() = default; // the cleanup occurs in impl_
728 fbvector& operator=(const fbvector& other) {
729 if (UNLIKELY(this == &other)) return *this;
731 if (!usingStdAllocator::value &&
732 A::propagate_on_container_copy_assignment::value) {
733 if (impl_ != other.impl_) {
734 // can't use other's different allocator to clean up self
737 (Allocator&)impl_ = (Allocator&)other.impl_;
740 assign(other.begin(), other.end());
744 fbvector& operator=(fbvector&& other) {
745 if (UNLIKELY(this == &other)) return *this;
746 moveFrom(std::move(other), moveIsSwap());
750 fbvector& operator=(std::initializer_list<T> il) {
751 assign(il.begin(), il.end());
755 template <class It, class Category = typename
756 std::iterator_traits<It>::iterator_category>
757 void assign(It first, It last) {
758 assign(first, last, Category());
761 void assign(size_type n, VT value) {
762 if (n > capacity()) {
763 // Not enough space. Do not reserve in place, since we will
764 // discard the old values anyways.
765 if (dataIsInternalAndNotVT(value)) {
766 T copy(std::move(value));
768 M_uninitialized_fill_n_e(n, copy);
771 M_uninitialized_fill_n_e(n, value);
773 } else if (n <= size()) {
774 auto newE = impl_.b_ + n;
775 std::fill(impl_.b_, newE, value);
776 M_destroy_range_e(newE);
778 std::fill(impl_.b_, impl_.e_, value);
779 M_uninitialized_fill_n_e(n - size(), value);
783 void assign(std::initializer_list<T> il) {
784 assign(il.begin(), il.end());
787 allocator_type get_allocator() const noexcept {
793 // contract dispatch for iterator types fbvector(It first, It last)
794 template <class ForwardIterator>
795 fbvector(ForwardIterator first, ForwardIterator last,
796 const Allocator& a, std::forward_iterator_tag)
797 : impl_(std::distance(first, last), a)
798 { M_uninitialized_copy_e(first, last); }
800 template <class InputIterator>
801 fbvector(InputIterator first, InputIterator last,
802 const Allocator& a, std::input_iterator_tag)
804 { for (; first != last; ++first) emplace_back(*first); }
806 // contract dispatch for allocator movement in operator=(fbvector&&)
808 moveFrom(fbvector&& other, std::true_type) {
809 swap(impl_, other.impl_);
811 void moveFrom(fbvector&& other, std::false_type) {
812 if (impl_ == other.impl_) {
813 impl_.swapData(other.impl_);
815 impl_.reset(other.size());
816 M_uninitialized_move_e(other.begin(), other.end());
820 // contract dispatch for iterator types in assign(It first, It last)
821 template <class ForwardIterator>
822 void assign(ForwardIterator first, ForwardIterator last,
823 std::forward_iterator_tag) {
824 const size_t newSize = std::distance(first, last);
825 if (newSize > capacity()) {
826 impl_.reset(newSize);
827 M_uninitialized_copy_e(first, last);
828 } else if (newSize <= size()) {
829 auto newEnd = std::copy(first, last, impl_.b_);
830 M_destroy_range_e(newEnd);
832 auto mid = S_copy_n(impl_.b_, first, size());
833 M_uninitialized_copy_e<decltype(last)>(mid, last);
837 template <class InputIterator>
838 void assign(InputIterator first, InputIterator last,
839 std::input_iterator_tag) {
841 for (; first != last && p != impl_.e_; ++first, ++p) {
845 M_destroy_range_e(p);
847 for (; first != last; ++first) emplace_back(*first);
851 // contract dispatch for aliasing under VT optimization
852 bool dataIsInternalAndNotVT(const T& t) {
853 if (should_pass_by_value::value) return false;
854 return dataIsInternal(t);
856 bool dataIsInternal(const T& t) {
857 return UNLIKELY(impl_.b_ <= std::addressof(t) &&
858 std::addressof(t) < impl_.e_);
862 //===========================================================================
863 //---------------------------------------------------------------------------
867 iterator begin() noexcept {
870 const_iterator begin() const noexcept {
873 iterator end() noexcept {
876 const_iterator end() const noexcept {
879 reverse_iterator rbegin() noexcept {
880 return reverse_iterator(end());
882 const_reverse_iterator rbegin() const noexcept {
883 return const_reverse_iterator(end());
885 reverse_iterator rend() noexcept {
886 return reverse_iterator(begin());
888 const_reverse_iterator rend() const noexcept {
889 return const_reverse_iterator(begin());
892 const_iterator cbegin() const noexcept {
895 const_iterator cend() const noexcept {
898 const_reverse_iterator crbegin() const noexcept {
899 return const_reverse_iterator(end());
901 const_reverse_iterator crend() const noexcept {
902 return const_reverse_iterator(begin());
905 //===========================================================================
906 //---------------------------------------------------------------------------
910 size_type size() const noexcept {
911 return impl_.e_ - impl_.b_;
914 size_type max_size() const noexcept {
915 // good luck gettin' there
916 return ~size_type(0);
919 void resize(size_type n) {
921 M_destroy_range_e(impl_.b_ + n);
924 M_uninitialized_fill_n_e(n - size());
928 void resize(size_type n, VT t) {
930 M_destroy_range_e(impl_.b_ + n);
931 } else if (dataIsInternalAndNotVT(t) && n > capacity()) {
934 M_uninitialized_fill_n_e(n - size(), copy);
937 M_uninitialized_fill_n_e(n - size(), t);
941 size_type capacity() const noexcept {
942 return impl_.z_ - impl_.b_;
945 bool empty() const noexcept {
946 return impl_.b_ == impl_.e_;
949 void reserve(size_type n) {
950 if (n <= capacity()) return;
951 if (impl_.b_ && reserve_in_place(n)) return;
953 auto newCap = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
954 auto newB = M_allocate(newCap);
958 M_deallocate(newB, newCap);
962 M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
963 impl_.z_ = newB + newCap;
964 impl_.e_ = newB + (impl_.e_ - impl_.b_);
968 void shrink_to_fit() noexcept {
969 auto const newCapacityBytes = folly::goodMallocSize(size() * sizeof(T));
970 auto const newCap = newCapacityBytes / sizeof(T);
971 auto const oldCap = capacity();
973 if (newCap >= oldCap) return;
976 // xallocx() will shrink to precisely newCapacityBytes (which was generated
977 // by goodMallocSize()) if it successfully shrinks in place.
978 if ((usingJEMalloc() && usingStdAllocator::value) &&
979 newCapacityBytes >= folly::jemallocMinInPlaceExpandable &&
980 xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) {
981 impl_.z_ += newCap - oldCap;
983 T* newB; // intentionally uninitialized
985 newB = M_allocate(newCap);
989 M_deallocate(newB, newCap);
990 return; // swallow the error
996 M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
997 impl_.z_ = newB + newCap;
998 impl_.e_ = newB + (impl_.e_ - impl_.b_);
1005 bool reserve_in_place(size_type n) {
1006 if (!usingStdAllocator::value || !usingJEMalloc()) return false;
1008 // jemalloc can never grow in place blocks smaller than 4096 bytes.
1009 if ((impl_.z_ - impl_.b_) * sizeof(T) <
1010 folly::jemallocMinInPlaceExpandable) return false;
1012 auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T));
1014 if (xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) {
1015 impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T);
1021 //===========================================================================
1022 //---------------------------------------------------------------------------
1026 reference operator[](size_type n) {
1030 const_reference operator[](size_type n) const {
1034 const_reference at(size_type n) const {
1035 if (UNLIKELY(n >= size())) {
1036 throw std::out_of_range("fbvector: index is greater than size.");
1040 reference at(size_type n) {
1041 auto const& cThis = *this;
1042 return const_cast<reference>(cThis.at(n));
1048 const_reference front() const {
1054 return impl_.e_[-1];
1056 const_reference back() const {
1058 return impl_.e_[-1];
1061 //===========================================================================
1062 //---------------------------------------------------------------------------
1066 T* data() noexcept {
1069 const T* data() const noexcept {
1073 //===========================================================================
1074 //---------------------------------------------------------------------------
1075 // modifiers (common)
1078 template <class... Args>
1079 void emplace_back(Args&&... args) {
1080 if (impl_.e_ != impl_.z_) {
1081 M_construct(impl_.e_, std::forward<Args>(args)...);
1084 emplace_back_aux(std::forward<Args>(args)...);
1089 push_back(const T& value) {
1090 if (impl_.e_ != impl_.z_) {
1091 M_construct(impl_.e_, value);
1094 emplace_back_aux(value);
1099 push_back(T&& value) {
1100 if (impl_.e_ != impl_.z_) {
1101 M_construct(impl_.e_, std::move(value));
1104 emplace_back_aux(std::move(value));
1111 M_destroy(impl_.e_);
1114 void swap(fbvector& other) noexcept {
1115 if (!usingStdAllocator::value &&
1116 A::propagate_on_container_swap::value)
1117 swap(impl_, other.impl_);
1118 else impl_.swapData(other.impl_);
1121 void clear() noexcept {
1122 M_destroy_range_e(impl_.b_);
1127 // std::vector implements a similar function with a different growth
1128 // strategy: empty() ? 1 : capacity() * 2.
1130 // fbvector grows differently on two counts:
1133 // Instead of grwoing to size 1 from empty, and fbvector allocates at
1134 // least 64 bytes. You may still use reserve to reserve a lesser amount
1137 // For medium-sized vectors, the growth strategy is 1.5x. See the docs
1139 // This does not apply to very small or very large fbvectors. This is a
1141 // A nice addition to fbvector would be the capability of having a user-
1142 // defined growth strategy, probably as part of the allocator.
1145 size_type computePushBackCapacity() const {
1146 if (capacity() == 0) {
1147 return std::max(64 / sizeof(T), size_type(1));
1149 if (capacity() < folly::jemallocMinInPlaceExpandable / sizeof(T)) {
1150 return capacity() * 2;
1152 if (capacity() > 4096 * 32 / sizeof(T)) {
1153 return capacity() * 2;
1155 return (capacity() * 3 + 1) / 2;
1158 template <class... Args>
1159 void emplace_back_aux(Args&&... args);
1161 //===========================================================================
1162 //---------------------------------------------------------------------------
1163 // modifiers (erase)
1166 iterator erase(const_iterator position) {
1167 return erase(position, position + 1);
1170 iterator erase(const_iterator first, const_iterator last) {
1171 assert(isValid(first) && isValid(last));
1172 assert(first <= last);
1173 if (first != last) {
1174 if (last == end()) {
1175 M_destroy_range_e((iterator)first);
1177 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1178 D_destroy_range_a((iterator)first, (iterator)last);
1179 if (last - first >= cend() - last) {
1180 std::memcpy((void*)first, (void*)last, (cend() - last) * sizeof(T));
1182 std::memmove((iterator)first, last, (cend() - last) * sizeof(T));
1184 impl_.e_ -= (last - first);
1186 std::copy(std::make_move_iterator((iterator)last),
1187 std::make_move_iterator(end()), (iterator)first);
1188 auto newEnd = impl_.e_ - std::distance(first, last);
1189 M_destroy_range_e(newEnd);
1193 return (iterator)first;
1196 //===========================================================================
1197 //---------------------------------------------------------------------------
1198 // modifiers (insert)
1199 private: // we have the private section first because it defines some macros
1201 bool isValid(const_iterator it) {
1202 return cbegin() <= it && it <= cend();
1205 size_type computeInsertCapacity(size_type n) {
1206 size_type nc = std::max(computePushBackCapacity(), size() + n);
1207 size_type ac = folly::goodMallocSize(nc * sizeof(T)) / sizeof(T);
1211 //---------------------------------------------------------------------------
1213 // make_window takes an fbvector, and creates an uninitialized gap (a
1214 // window) at the given position, of the given size. The fbvector must
1215 // have enough capacity.
1217 // Explanation by picture.
1221 // make_window here of size 3
1225 // If something goes wrong and the window must be destroyed, use
1226 // undo_window to provide a weak exception guarantee. It destroys
1231 //---------------------------------------------------------------------------
1233 // wrap_frame takes an inverse window and relocates an fbvector around it.
1234 // The fbvector must have at least as many elements as the left ledge.
1236 // Explanation by picture.
1239 // fbvector: inverse window:
1240 // 123456789______ _____abcde_______
1244 // _______________ 12345abcde6789___
1246 //---------------------------------------------------------------------------
1248 // insert_use_fresh_memory returns true iff the fbvector should use a fresh
1249 // block of memory for the insertion. If the fbvector does not have enough
1250 // spare capacity, then it must return true. Otherwise either true or false
1253 //---------------------------------------------------------------------------
1255 // These three functions, make_window, wrap_frame, and
1256 // insert_use_fresh_memory, can be combined into a uniform interface.
1257 // Since that interface involves a lot of case-work, it is built into
1258 // some macros: FOLLY_FBVECTOR_INSERT_(START|TRY|END)
1259 // Macros are used in an attempt to let GCC perform better optimizations,
1260 // especially control flow optimization.
1263 //---------------------------------------------------------------------------
1266 void make_window(iterator position, size_type n) {
1267 assert(isValid(position));
1268 assert(size() + n <= capacity());
1271 // The result is guaranteed to be non-negative, so use an unsigned type:
1272 size_type tail = std::distance(position, impl_.e_);
1275 relocate_move(position + n, position, impl_.e_);
1276 relocate_done(position + n, position, impl_.e_);
1279 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1280 std::memmove(position + n, position, tail * sizeof(T));
1283 D_uninitialized_move_a(impl_.e_, impl_.e_ - n, impl_.e_);
1285 std::copy_backward(std::make_move_iterator(position),
1286 std::make_move_iterator(impl_.e_ - n), impl_.e_);
1288 D_destroy_range_a(impl_.e_ - n, impl_.e_ + n);
1293 D_destroy_range_a(position, position + n);
1298 void undo_window(iterator position, size_type n) noexcept {
1299 D_destroy_range_a(position + n, impl_.e_);
1300 impl_.e_ = position;
1303 //---------------------------------------------------------------------------
1306 void wrap_frame(T* ledge, size_type idx, size_type n) {
1307 assert(size() >= idx);
1310 relocate_move(ledge, impl_.b_, impl_.b_ + idx);
1312 relocate_move(ledge + idx + n, impl_.b_ + idx, impl_.e_);
1314 relocate_undo(ledge, impl_.b_, impl_.b_ + idx);
1317 relocate_done(ledge, impl_.b_, impl_.b_ + idx);
1318 relocate_done(ledge + idx + n, impl_.b_ + idx, impl_.e_);
1321 //---------------------------------------------------------------------------
1324 bool insert_use_fresh(const_iterator cposition, size_type n) {
1325 if (cposition == cend()) {
1326 if (size() + n <= capacity()) return false;
1327 if (reserve_in_place(size() + n)) return false;
1331 if (size() + n > capacity()) return true;
1336 //---------------------------------------------------------------------------
1339 #define FOLLY_FBVECTOR_INSERT_START(cpos, n) \
1340 assert(isValid(cpos)); \
1341 T* position = const_cast<T*>(cpos); \
1342 size_type idx = std::distance(impl_.b_, position); \
1343 bool fresh = insert_use_fresh(position, n); \
1345 size_type newCap = 0; \
1348 newCap = computeInsertCapacity(n); \
1349 b = M_allocate(newCap); \
1351 make_window(position, n); \
1355 T* start = b + idx; \
1359 // construct the inserted elements
1361 #define FOLLY_FBVECTOR_INSERT_TRY(cpos, n) \
1364 M_deallocate(b, newCap); \
1366 undo_window(position, n); \
1373 wrap_frame(b, idx, n); \
1377 // delete the inserted elements (exception has been thrown)
1379 #define FOLLY_FBVECTOR_INSERT_END(cpos, n) \
1380 M_deallocate(b, newCap); \
1383 if (impl_.b_) M_deallocate(impl_.b_, capacity()); \
1384 impl_.set(b, size() + n, newCap); \
1385 return impl_.b_ + idx; \
1390 //---------------------------------------------------------------------------
1394 template <class... Args>
1395 iterator emplace(const_iterator cpos, Args&&... args) {
1396 FOLLY_FBVECTOR_INSERT_START(cpos, 1)
1397 M_construct(start, std::forward<Args>(args)...);
1398 FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
1400 FOLLY_FBVECTOR_INSERT_END(cpos, 1)
1403 iterator insert(const_iterator cpos, const T& value) {
1404 if (dataIsInternal(value)) return insert(cpos, T(value));
1406 FOLLY_FBVECTOR_INSERT_START(cpos, 1)
1407 M_construct(start, value);
1408 FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
1410 FOLLY_FBVECTOR_INSERT_END(cpos, 1)
1413 iterator insert(const_iterator cpos, T&& value) {
1414 if (dataIsInternal(value)) return insert(cpos, T(std::move(value)));
1416 FOLLY_FBVECTOR_INSERT_START(cpos, 1)
1417 M_construct(start, std::move(value));
1418 FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
1420 FOLLY_FBVECTOR_INSERT_END(cpos, 1)
1423 iterator insert(const_iterator cpos, size_type n, VT value) {
1424 if (n == 0) return (iterator)cpos;
1425 if (dataIsInternalAndNotVT(value)) return insert(cpos, n, T(value));
1427 FOLLY_FBVECTOR_INSERT_START(cpos, n)
1428 D_uninitialized_fill_n_a(start, n, value);
1429 FOLLY_FBVECTOR_INSERT_TRY(cpos, n)
1430 D_destroy_range_a(start, start + n);
1431 FOLLY_FBVECTOR_INSERT_END(cpos, n)
1434 template <class It, class Category = typename
1435 std::iterator_traits<It>::iterator_category>
1436 iterator insert(const_iterator cpos, It first, It last) {
1437 return insert(cpos, first, last, Category());
1440 iterator insert(const_iterator cpos, std::initializer_list<T> il) {
1441 return insert(cpos, il.begin(), il.end());
1444 //---------------------------------------------------------------------------
1445 // insert dispatch for iterator types
1448 template <class FIt>
1449 iterator insert(const_iterator cpos, FIt first, FIt last,
1450 std::forward_iterator_tag) {
1451 size_type n = std::distance(first, last);
1452 if (n == 0) return (iterator)cpos;
1454 FOLLY_FBVECTOR_INSERT_START(cpos, n)
1455 D_uninitialized_copy_a(start, first, last);
1456 FOLLY_FBVECTOR_INSERT_TRY(cpos, n)
1457 D_destroy_range_a(start, start + n);
1458 FOLLY_FBVECTOR_INSERT_END(cpos, n)
1461 template <class IIt>
1462 iterator insert(const_iterator cpos, IIt first, IIt last,
1463 std::input_iterator_tag) {
1464 T* position = const_cast<T*>(cpos);
1465 assert(isValid(position));
1466 size_type idx = std::distance(begin(), position);
1468 fbvector storage(std::make_move_iterator(position),
1469 std::make_move_iterator(end()),
1470 A::select_on_container_copy_construction(impl_));
1471 M_destroy_range_e(position);
1472 for (; first != last; ++first) emplace_back(*first);
1473 insert(cend(), std::make_move_iterator(storage.begin()),
1474 std::make_move_iterator(storage.end()));
1475 return impl_.b_ + idx;
1478 //===========================================================================
1479 //---------------------------------------------------------------------------
1480 // lexicographical functions (others from boost::totally_ordered superclass)
1483 bool operator==(const fbvector& other) const {
1484 return size() == other.size() && std::equal(begin(), end(), other.begin());
1487 bool operator<(const fbvector& other) const {
1488 return std::lexicographical_compare(
1489 begin(), end(), other.begin(), other.end());
1492 //===========================================================================
1493 //---------------------------------------------------------------------------
1497 template <class _T, class _A>
1498 friend _T* relinquish(fbvector<_T, _A>&);
1500 template <class _T, class _A>
1501 friend void attach(fbvector<_T, _A>&, _T* data, size_t sz, size_t cap);
1503 }; // class fbvector
1506 //=============================================================================
1507 //-----------------------------------------------------------------------------
1508 // outlined functions (gcc, you finicky compiler you)
1510 template <typename T, typename Allocator>
1511 template <class... Args>
1512 void fbvector<T, Allocator>::emplace_back_aux(Args&&... args) {
1513 size_type byte_sz = folly::goodMallocSize(
1514 computePushBackCapacity() * sizeof(T));
1515 if (usingStdAllocator::value
1517 && ((impl_.z_ - impl_.b_) * sizeof(T) >=
1518 folly::jemallocMinInPlaceExpandable)) {
1519 // Try to reserve in place.
1520 // Ask xallocx to allocate in place at least size()+1 and at most sz space.
1521 // xallocx will allocate as much as possible within that range, which
1522 // is the best possible outcome: if sz space is available, take it all,
1523 // otherwise take as much as possible. If nothing is available, then fail.
1524 // In this fashion, we never relocate if there is a possibility of
1525 // expanding in place, and we never reallocate by less than the desired
1526 // amount unless we cannot expand further. Hence we will not reallocate
1527 // sub-optimally twice in a row (modulo the blocking memory being freed).
1528 size_type lower = folly::goodMallocSize(sizeof(T) + size() * sizeof(T));
1529 size_type upper = byte_sz;
1530 size_type extra = upper - lower;
1535 if ((actual = xallocx(p, lower, extra, 0)) >= lower) {
1536 impl_.z_ = impl_.b_ + actual / sizeof(T);
1537 M_construct(impl_.e_, std::forward<Args>(args)...);
1543 // Reallocation failed. Perform a manual relocation.
1544 size_type sz = byte_sz / sizeof(T);
1545 auto newB = M_allocate(sz);
1546 auto newE = newB + size();
1548 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1549 // For linear memory access, relocate before construction.
1550 // By the test condition, relocate is noexcept.
1551 // Note that there is no cleanup to do if M_construct throws - that's
1552 // one of the beauties of relocation.
1553 // Benchmarks for this code have high variance, and seem to be close.
1554 relocate_move(newB, impl_.b_, impl_.e_);
1555 M_construct(newE, std::forward<Args>(args)...);
1558 M_construct(newE, std::forward<Args>(args)...);
1563 M_destroy(newE - 1);
1568 M_deallocate(newB, sz);
1571 if (impl_.b_) M_deallocate(impl_.b_, size());
1574 impl_.z_ = newB + sz;
1577 //=============================================================================
1578 //-----------------------------------------------------------------------------
1579 // specialized functions
1581 template <class T, class A>
1582 void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) noexcept {
1586 //=============================================================================
1587 //-----------------------------------------------------------------------------
1593 template <class T, class A>
1594 struct IndexableTraits<fbvector<T, A>>
1595 : public IndexableTraitsSeq<fbvector<T, A>> {
1598 } // namespace detail
1600 template <class T, class A>
1601 void compactResize(fbvector<T, A>* v, size_t sz) {
1608 // relinquish and attach are not a members function specifically so that it is
1609 // awkward to call them. It is very easy to shoot yourself in the foot with
1612 // If you call relinquish, then it is your responsibility to free the data
1613 // and the storage, both of which may have been generated in a non-standard
1614 // way through the fbvector's allocator.
1616 // If you call attach, it is your responsibility to ensure that the fbvector
1617 // is fresh (size and capacity both zero), and that the supplied data is
1618 // capable of being manipulated by the allocator.
1619 // It is acceptable to supply a stack pointer IF:
1620 // (1) The vector's data does not outlive the stack pointer. This includes
1621 // extension of the data's life through a move operation.
1622 // (2) The pointer has enough capacity that the vector will never be
1624 // (3) Insert is not called on the vector; these functions have leeway to
1625 // relocate the vector even if there is enough capacity.
1626 // (4) A stack pointer is compatible with the fbvector's allocator.
1629 template <class T, class A>
1630 T* relinquish(fbvector<T, A>& v) {
1632 v.impl_.b_ = v.impl_.e_ = v.impl_.z_ = nullptr;
1636 template <class T, class A>
1637 void attach(fbvector<T, A>& v, T* data, size_t sz, size_t cap) {
1638 assert(v.data() == nullptr);
1640 v.impl_.e_ = data + sz;
1641 v.impl_.z_ = data + cap;
1644 } // namespace folly
1646 #endif // FOLLY_FBVECTOR_H