2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Nicholas Ormrod (njormrod)
19 * Andrei Alexandrescu (aalexandre)
21 * FBVector is Facebook's drop-in implementation of std::vector. It has special
22 * optimizations for use with relocatable types and jemalloc.
27 //=============================================================================
35 #include <type_traits>
38 #include <folly/FormatTraits.h>
39 #include <folly/Likely.h>
40 #include <folly/Traits.h>
41 #include <folly/memory/Malloc.h>
42 #include <folly/portability/BitsFunctexcept.h>
44 //=============================================================================
45 // forward declaration
48 template <class T, class Allocator = std::allocator<T>>
52 //=============================================================================
55 #define FOLLY_FBV_UNROLL_PTR(first, last, OP) do { \
56 for (; (last) - (first) >= 4; (first) += 4) { \
62 for (; (first) != (last); ++(first)) OP((first)); \
65 //=============================================================================
66 ///////////////////////////////////////////////////////////////////////////////
70 ///////////////////////////////////////////////////////////////////////////////
74 template <class T, class Allocator>
77 //===========================================================================
78 //---------------------------------------------------------------------------
81 typedef std::allocator_traits<Allocator> A;
83 struct Impl : public Allocator {
85 typedef typename A::pointer pointer;
86 typedef typename A::size_type size_type;
92 Impl() : Allocator(), b_(nullptr), e_(nullptr), z_(nullptr) {}
93 /* implicit */ Impl(const Allocator& a)
94 : Allocator(a), b_(nullptr), e_(nullptr), z_(nullptr) {}
95 /* implicit */ Impl(Allocator&& a)
96 : Allocator(std::move(a)), b_(nullptr), e_(nullptr), z_(nullptr) {}
98 /* implicit */ Impl(size_type n, const Allocator& a = Allocator())
102 Impl(Impl&& other) noexcept
103 : Allocator(std::move(other)),
104 b_(other.b_), e_(other.e_), z_(other.z_)
105 { other.b_ = other.e_ = other.z_ = nullptr; }
113 // note that 'allocate' and 'deallocate' are inherited from Allocator
114 T* D_allocate(size_type n) {
115 if (usingStdAllocator::value) {
116 return static_cast<T*>(malloc(n * sizeof(T)));
118 return std::allocator_traits<Allocator>::allocate(*this, n);
122 void D_deallocate(T* p, size_type n) noexcept {
123 if (usingStdAllocator::value) {
126 std::allocator_traits<Allocator>::deallocate(*this, p, n);
131 void swapData(Impl& other) {
132 std::swap(b_, other.b_);
133 std::swap(e_, other.e_);
134 std::swap(z_, other.z_);
138 inline void destroy() noexcept {
140 // THIS DISPATCH CODE IS DUPLICATED IN fbvector::D_destroy_range_a.
141 // It has been inlined here for speed. It calls the static fbvector
142 // methods to perform the actual destruction.
143 if (usingStdAllocator::value) {
144 S_destroy_range(b_, e_);
146 S_destroy_range_a(*this, b_, e_);
149 D_deallocate(b_, size_type(z_ - b_));
153 void init(size_type n) {
154 if (UNLIKELY(n == 0)) {
155 b_ = e_ = z_ = nullptr;
157 size_type sz = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
164 void set(pointer newB, size_type newSize, size_type newCap) {
170 void reset(size_type newCap) {
179 void reset() { // same as reset(0)
181 b_ = e_ = z_ = nullptr;
185 static void swap(Impl& a, Impl& b) {
187 if (!usingStdAllocator::value) {
188 swap<Allocator>(a, b);
193 //===========================================================================
194 //---------------------------------------------------------------------------
195 // types and constants
197 typedef T value_type;
198 typedef value_type& reference;
199 typedef const value_type& const_reference;
201 typedef const T* const_iterator;
202 typedef size_t size_type;
203 typedef typename std::make_signed<size_type>::type difference_type;
204 typedef Allocator allocator_type;
205 typedef typename A::pointer pointer;
206 typedef typename A::const_pointer const_pointer;
207 typedef std::reverse_iterator<iterator> reverse_iterator;
208 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
211 typedef std::integral_constant<bool,
212 IsTriviallyCopyable<T>::value &&
213 sizeof(T) <= 16 // don't force large structures to be passed by value
214 > should_pass_by_value;
215 typedef typename std::conditional<
216 should_pass_by_value::value, T, const T&>::type VT;
217 typedef typename std::conditional<
218 should_pass_by_value::value, T, T&&>::type MT;
220 typedef std::integral_constant<bool,
221 std::is_same<Allocator, std::allocator<T>>::value> usingStdAllocator;
222 typedef std::integral_constant<bool,
223 usingStdAllocator::value ||
224 A::propagate_on_container_move_assignment::value> moveIsSwap;
226 //===========================================================================
227 //---------------------------------------------------------------------------
230 //---------------------------------------------------------------------------
233 T* M_allocate(size_type n) {
234 return impl_.D_allocate(n);
237 //---------------------------------------------------------------------------
240 void M_deallocate(T* p, size_type n) noexcept {
241 impl_.D_deallocate(p, n);
244 //---------------------------------------------------------------------------
247 // GCC is very sensitive to the exact way that construct is called. For
248 // that reason there are several different specializations of construct.
250 template <typename U, typename... Args>
251 void M_construct(U* p, Args&&... args) {
252 if (usingStdAllocator::value) {
253 new (p) U(std::forward<Args>(args)...);
255 std::allocator_traits<Allocator>::construct(
256 impl_, p, std::forward<Args>(args)...);
260 template <typename U, typename... Args>
261 static void S_construct(U* p, Args&&... args) {
262 new (p) U(std::forward<Args>(args)...);
265 template <typename U, typename... Args>
266 static void S_construct_a(Allocator& a, U* p, Args&&... args) {
267 std::allocator_traits<Allocator>::construct(
268 a, p, std::forward<Args>(args)...);
271 // scalar optimization
272 // TODO we can expand this optimization to: default copyable and assignable
273 template <typename U, typename Enable = typename
274 std::enable_if<std::is_scalar<U>::value>::type>
275 void M_construct(U* p, U arg) {
276 if (usingStdAllocator::value) {
279 std::allocator_traits<Allocator>::construct(impl_, p, arg);
283 template <typename U, typename Enable = typename
284 std::enable_if<std::is_scalar<U>::value>::type>
285 static void S_construct(U* p, U arg) {
289 template <typename U, typename Enable = typename
290 std::enable_if<std::is_scalar<U>::value>::type>
291 static void S_construct_a(Allocator& a, U* p, U arg) {
292 std::allocator_traits<Allocator>::construct(a, p, arg);
295 // const& optimization
296 template <typename U, typename Enable = typename
297 std::enable_if<!std::is_scalar<U>::value>::type>
298 void M_construct(U* p, const U& value) {
299 if (usingStdAllocator::value) {
302 std::allocator_traits<Allocator>::construct(impl_, p, value);
306 template <typename U, typename Enable = typename
307 std::enable_if<!std::is_scalar<U>::value>::type>
308 static void S_construct(U* p, const U& value) {
312 template <typename U, typename Enable = typename
313 std::enable_if<!std::is_scalar<U>::value>::type>
314 static void S_construct_a(Allocator& a, U* p, const U& value) {
315 std::allocator_traits<Allocator>::construct(a, p, value);
318 //---------------------------------------------------------------------------
321 void M_destroy(T* p) noexcept {
322 if (usingStdAllocator::value) {
323 if (!std::is_trivially_destructible<T>::value) {
327 std::allocator_traits<Allocator>::destroy(impl_, p);
331 //===========================================================================
332 //---------------------------------------------------------------------------
333 // algorithmic helpers
335 //---------------------------------------------------------------------------
339 void M_destroy_range_e(T* pos) noexcept {
340 D_destroy_range_a(pos, impl_.e_);
345 // THIS DISPATCH CODE IS DUPLICATED IN IMPL. SEE IMPL FOR DETAILS.
346 void D_destroy_range_a(T* first, T* last) noexcept {
347 if (usingStdAllocator::value) {
348 S_destroy_range(first, last);
350 S_destroy_range_a(impl_, first, last);
355 static void S_destroy_range_a(Allocator& a, T* first, T* last) noexcept {
356 for (; first != last; ++first) {
357 std::allocator_traits<Allocator>::destroy(a, first);
362 static void S_destroy_range(T* first, T* last) noexcept {
363 if (!std::is_trivially_destructible<T>::value) {
364 // EXPERIMENTAL DATA on fbvector<vector<int>> (where each vector<int> has
366 // The unrolled version seems to work faster for small to medium sized
367 // fbvectors. It gets a 10% speedup on fbvectors of size 1024, 64, and
369 // The simple loop version seems to work faster for large fbvectors. The
370 // unrolled version is about 6% slower on fbvectors on size 16384.
371 // The two methods seem tied for very large fbvectors. The unrolled
372 // version is about 0.5% slower on size 262144.
374 // for (; first != last; ++first) first->~T();
375 #define FOLLY_FBV_OP(p) (p)->~T()
376 FOLLY_FBV_UNROLL_PTR(first, last, FOLLY_FBV_OP)
381 //---------------------------------------------------------------------------
382 // uninitialized_fill_n
385 void M_uninitialized_fill_n_e(size_type sz) {
386 D_uninitialized_fill_n_a(impl_.e_, sz);
390 void M_uninitialized_fill_n_e(size_type sz, VT value) {
391 D_uninitialized_fill_n_a(impl_.e_, sz, value);
396 void D_uninitialized_fill_n_a(T* dest, size_type sz) {
397 if (usingStdAllocator::value) {
398 S_uninitialized_fill_n(dest, sz);
400 S_uninitialized_fill_n_a(impl_, dest, sz);
404 void D_uninitialized_fill_n_a(T* dest, size_type sz, VT value) {
405 if (usingStdAllocator::value) {
406 S_uninitialized_fill_n(dest, sz, value);
408 S_uninitialized_fill_n_a(impl_, dest, sz, value);
413 template <typename... Args>
414 static void S_uninitialized_fill_n_a(Allocator& a, T* dest,
415 size_type sz, Args&&... args) {
419 for (; b != e; ++b) {
420 std::allocator_traits<Allocator>::construct(a, b,
421 std::forward<Args>(args)...);
424 S_destroy_range_a(a, dest, b);
430 static void S_uninitialized_fill_n(T* dest, size_type n) {
431 if (folly::IsZeroInitializable<T>::value) {
432 if (LIKELY(n != 0)) {
433 std::memset(dest, 0, sizeof(T) * n);
439 for (; b != e; ++b) {
444 for (; b >= dest; --b) {
452 static void S_uninitialized_fill_n(T* dest, size_type n, const T& value) {
456 for (; b != e; ++b) {
457 S_construct(b, value);
460 S_destroy_range(dest, b);
465 //---------------------------------------------------------------------------
466 // uninitialized_copy
468 // it is possible to add an optimization for the case where
469 // It = move(T*) and IsRelocatable<T> and Is0Initiailizable<T>
472 template <typename It>
473 void M_uninitialized_copy_e(It first, It last) {
474 D_uninitialized_copy_a(impl_.e_, first, last);
475 impl_.e_ += std::distance(first, last);
478 template <typename It>
479 void M_uninitialized_move_e(It first, It last) {
480 D_uninitialized_move_a(impl_.e_, first, last);
481 impl_.e_ += std::distance(first, last);
485 template <typename It>
486 void D_uninitialized_copy_a(T* dest, It first, It last) {
487 if (usingStdAllocator::value) {
488 if (folly::IsTriviallyCopyable<T>::value) {
489 S_uninitialized_copy_bits(dest, first, last);
491 S_uninitialized_copy(dest, first, last);
494 S_uninitialized_copy_a(impl_, dest, first, last);
498 template <typename It>
499 void D_uninitialized_move_a(T* dest, It first, It last) {
500 D_uninitialized_copy_a(dest,
501 std::make_move_iterator(first), std::make_move_iterator(last));
505 template <typename It>
507 S_uninitialized_copy_a(Allocator& a, T* dest, It first, It last) {
510 for (; first != last; ++first, ++b) {
511 std::allocator_traits<Allocator>::construct(a, b, *first);
514 S_destroy_range_a(a, dest, b);
520 template <typename It>
521 static void S_uninitialized_copy(T* dest, It first, It last) {
524 for (; first != last; ++first, ++b) {
525 S_construct(b, *first);
528 S_destroy_range(dest, b);
534 S_uninitialized_copy_bits(T* dest, const T* first, const T* last) {
536 std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T));
541 S_uninitialized_copy_bits(T* dest, std::move_iterator<T*> first,
542 std::move_iterator<T*> last) {
543 T* bFirst = first.base();
544 T* bLast = last.base();
545 if (bLast != bFirst) {
546 std::memcpy((void*)dest, (void*)bFirst, (bLast - bFirst) * sizeof(T));
550 template <typename It>
552 S_uninitialized_copy_bits(T* dest, It first, It last) {
553 S_uninitialized_copy(dest, first, last);
556 //---------------------------------------------------------------------------
559 // This function is "unsafe": it assumes that the iterator can be advanced at
560 // least n times. However, as a private function, that unsafety is managed
561 // wholly by fbvector itself.
563 template <typename It>
564 static It S_copy_n(T* dest, It first, size_type n) {
566 for (; dest != e; ++dest, ++first) {
572 static const T* S_copy_n(T* dest, const T* first, size_type n) {
573 if (folly::IsTriviallyCopyable<T>::value) {
574 std::memcpy((void*)dest, (void*)first, n * sizeof(T));
577 return S_copy_n<const T*>(dest, first, n);
581 static std::move_iterator<T*>
582 S_copy_n(T* dest, std::move_iterator<T*> mIt, size_type n) {
583 if (folly::IsTriviallyCopyable<T>::value) {
584 T* first = mIt.base();
585 std::memcpy((void*)dest, (void*)first, n * sizeof(T));
586 return std::make_move_iterator(first + n);
588 return S_copy_n<std::move_iterator<T*>>(dest, mIt, n);
592 //===========================================================================
593 //---------------------------------------------------------------------------
594 // relocation helpers
596 // Relocation is divided into three parts:
599 // Performs the actual movement of data from point a to point b.
602 // Destroys the old data.
605 // Destoys the new data and restores the old data.
607 // The three steps are used because there may be an exception after part 1
608 // has completed. If that is the case, then relocate_undo can nullify the
609 // initial move. Otherwise, relocate_done performs the last bit of tidying
612 // The relocation trio may use either memcpy, move, or copy. It is decided
613 // by the following case statement:
615 // IsRelocatable && usingStdAllocator -> memcpy
616 // has_nothrow_move && usingStdAllocator -> move
617 // cannot copy -> move
620 // If the class is non-copyable then it must be movable. However, if the
621 // move constructor is not noexcept, i.e. an error could be thrown, then
622 // relocate_undo will be unable to restore the old data, for fear of a
623 // second exception being thrown. This is a known and unavoidable
624 // deficiency. In lieu of a strong exception guarantee, relocate_undo does
625 // the next best thing: it provides a weak exception guarantee by
626 // destorying the new data, but leaving the old data in an indeterminate
627 // state. Note that that indeterminate state will be valid, since the
628 // old data has not been destroyed; it has merely been the source of a
629 // move, which is required to leave the source in a valid state.
632 void M_relocate(T* newB) {
633 relocate_move(newB, impl_.b_, impl_.e_);
634 relocate_done(newB, impl_.b_, impl_.e_);
637 // dispatch type trait
638 typedef std::integral_constant<bool,
639 folly::IsRelocatable<T>::value && usingStdAllocator::value
640 > relocate_use_memcpy;
642 typedef std::integral_constant<bool,
643 (std::is_nothrow_move_constructible<T>::value
644 && usingStdAllocator::value)
645 || !std::is_copy_constructible<T>::value
649 void relocate_move(T* dest, T* first, T* last) {
650 relocate_move_or_memcpy(dest, first, last, relocate_use_memcpy());
653 void relocate_move_or_memcpy(T* dest, T* first, T* last, std::true_type) {
654 if (first != nullptr) {
655 std::memcpy((void*)dest, (void*)first, (last - first) * sizeof(T));
659 void relocate_move_or_memcpy(T* dest, T* first, T* last, std::false_type) {
660 relocate_move_or_copy(dest, first, last, relocate_use_move());
663 void relocate_move_or_copy(T* dest, T* first, T* last, std::true_type) {
664 D_uninitialized_move_a(dest, first, last);
667 void relocate_move_or_copy(T* dest, T* first, T* last, std::false_type) {
668 D_uninitialized_copy_a(dest, first, last);
672 void relocate_done(T* /*dest*/, T* first, T* last) noexcept {
673 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
674 // used memcpy; data has been relocated, do not call destructor
676 D_destroy_range_a(first, last);
681 void relocate_undo(T* dest, T* first, T* last) noexcept {
682 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
683 // used memcpy, old data is still valid, nothing to do
684 } else if (std::is_nothrow_move_constructible<T>::value &&
685 usingStdAllocator::value) {
686 // noexcept move everything back, aka relocate_move
687 relocate_move(first, dest, dest + (last - first));
688 } else if (!std::is_copy_constructible<T>::value) {
690 D_destroy_range_a(dest, dest + (last - first));
692 // used copy, old data is still valid
693 D_destroy_range_a(dest, dest + (last - first));
698 //===========================================================================
699 //---------------------------------------------------------------------------
700 // construct/copy/destroy
702 fbvector() = default;
704 explicit fbvector(const Allocator& a) : impl_(a) {}
706 explicit fbvector(size_type n, const Allocator& a = Allocator())
708 { M_uninitialized_fill_n_e(n); }
710 fbvector(size_type n, VT value, const Allocator& a = Allocator())
712 { M_uninitialized_fill_n_e(n, value); }
714 template <class It, class Category = typename
715 std::iterator_traits<It>::iterator_category>
716 fbvector(It first, It last, const Allocator& a = Allocator())
717 : fbvector(first, last, a, Category()) {}
719 fbvector(const fbvector& other)
720 : impl_(other.size(), A::select_on_container_copy_construction(other.impl_))
721 { M_uninitialized_copy_e(other.begin(), other.end()); }
723 fbvector(fbvector&& other) noexcept : impl_(std::move(other.impl_)) {}
725 fbvector(const fbvector& other, const Allocator& a)
726 : fbvector(other.begin(), other.end(), a) {}
728 /* may throw */ fbvector(fbvector&& other, const Allocator& a) : impl_(a) {
729 if (impl_ == other.impl_) {
730 impl_.swapData(other.impl_);
732 impl_.init(other.size());
733 M_uninitialized_move_e(other.begin(), other.end());
737 fbvector(std::initializer_list<T> il, const Allocator& a = Allocator())
738 : fbvector(il.begin(), il.end(), a) {}
740 ~fbvector() = default; // the cleanup occurs in impl_
742 fbvector& operator=(const fbvector& other) {
743 if (UNLIKELY(this == &other)) {
747 if (!usingStdAllocator::value &&
748 A::propagate_on_container_copy_assignment::value) {
749 if (impl_ != other.impl_) {
750 // can't use other's different allocator to clean up self
753 (Allocator&)impl_ = (Allocator&)other.impl_;
756 assign(other.begin(), other.end());
760 fbvector& operator=(fbvector&& other) {
761 if (UNLIKELY(this == &other)) {
764 moveFrom(std::move(other), moveIsSwap());
768 fbvector& operator=(std::initializer_list<T> il) {
769 assign(il.begin(), il.end());
773 template <class It, class Category = typename
774 std::iterator_traits<It>::iterator_category>
775 void assign(It first, It last) {
776 assign(first, last, Category());
779 void assign(size_type n, VT value) {
780 if (n > capacity()) {
781 // Not enough space. Do not reserve in place, since we will
782 // discard the old values anyways.
783 if (dataIsInternalAndNotVT(value)) {
784 T copy(std::move(value));
786 M_uninitialized_fill_n_e(n, copy);
789 M_uninitialized_fill_n_e(n, value);
791 } else if (n <= size()) {
792 auto newE = impl_.b_ + n;
793 std::fill(impl_.b_, newE, value);
794 M_destroy_range_e(newE);
796 std::fill(impl_.b_, impl_.e_, value);
797 M_uninitialized_fill_n_e(n - size(), value);
801 void assign(std::initializer_list<T> il) {
802 assign(il.begin(), il.end());
805 allocator_type get_allocator() const noexcept {
810 // contract dispatch for iterator types fbvector(It first, It last)
811 template <class ForwardIterator>
812 fbvector(ForwardIterator first, ForwardIterator last,
813 const Allocator& a, std::forward_iterator_tag)
814 : impl_(size_type(std::distance(first, last)), a)
815 { M_uninitialized_copy_e(first, last); }
817 template <class InputIterator>
822 std::input_iterator_tag)
824 for (; first != last; ++first) {
825 emplace_back(*first);
829 // contract dispatch for allocator movement in operator=(fbvector&&)
831 moveFrom(fbvector&& other, std::true_type) {
832 swap(impl_, other.impl_);
834 void moveFrom(fbvector&& other, std::false_type) {
835 if (impl_ == other.impl_) {
836 impl_.swapData(other.impl_);
838 impl_.reset(other.size());
839 M_uninitialized_move_e(other.begin(), other.end());
843 // contract dispatch for iterator types in assign(It first, It last)
844 template <class ForwardIterator>
845 void assign(ForwardIterator first, ForwardIterator last,
846 std::forward_iterator_tag) {
847 const auto newSize = size_type(std::distance(first, last));
848 if (newSize > capacity()) {
849 impl_.reset(newSize);
850 M_uninitialized_copy_e(first, last);
851 } else if (newSize <= size()) {
852 auto newEnd = std::copy(first, last, impl_.b_);
853 M_destroy_range_e(newEnd);
855 auto mid = S_copy_n(impl_.b_, first, size());
856 M_uninitialized_copy_e<decltype(last)>(mid, last);
860 template <class InputIterator>
861 void assign(InputIterator first, InputIterator last,
862 std::input_iterator_tag) {
864 for (; first != last && p != impl_.e_; ++first, ++p) {
868 M_destroy_range_e(p);
870 for (; first != last; ++first) {
871 emplace_back(*first);
876 // contract dispatch for aliasing under VT optimization
877 bool dataIsInternalAndNotVT(const T& t) {
878 if (should_pass_by_value::value) {
881 return dataIsInternal(t);
883 bool dataIsInternal(const T& t) {
884 return UNLIKELY(impl_.b_ <= std::addressof(t) &&
885 std::addressof(t) < impl_.e_);
889 //===========================================================================
890 //---------------------------------------------------------------------------
893 iterator begin() noexcept {
896 const_iterator begin() const noexcept {
899 iterator end() noexcept {
902 const_iterator end() const noexcept {
905 reverse_iterator rbegin() noexcept {
906 return reverse_iterator(end());
908 const_reverse_iterator rbegin() const noexcept {
909 return const_reverse_iterator(end());
911 reverse_iterator rend() noexcept {
912 return reverse_iterator(begin());
914 const_reverse_iterator rend() const noexcept {
915 return const_reverse_iterator(begin());
918 const_iterator cbegin() const noexcept {
921 const_iterator cend() const noexcept {
924 const_reverse_iterator crbegin() const noexcept {
925 return const_reverse_iterator(end());
927 const_reverse_iterator crend() const noexcept {
928 return const_reverse_iterator(begin());
931 //===========================================================================
932 //---------------------------------------------------------------------------
935 size_type size() const noexcept {
936 return size_type(impl_.e_ - impl_.b_);
939 size_type max_size() const noexcept {
940 // good luck gettin' there
941 return ~size_type(0);
944 void resize(size_type n) {
946 M_destroy_range_e(impl_.b_ + n);
949 M_uninitialized_fill_n_e(n - size());
953 void resize(size_type n, VT t) {
955 M_destroy_range_e(impl_.b_ + n);
956 } else if (dataIsInternalAndNotVT(t) && n > capacity()) {
959 M_uninitialized_fill_n_e(n - size(), copy);
962 M_uninitialized_fill_n_e(n - size(), t);
966 size_type capacity() const noexcept {
967 return size_type(impl_.z_ - impl_.b_);
970 bool empty() const noexcept {
971 return impl_.b_ == impl_.e_;
974 void reserve(size_type n) {
975 if (n <= capacity()) {
978 if (impl_.b_ && reserve_in_place(n)) {
982 auto newCap = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
983 auto newB = M_allocate(newCap);
987 M_deallocate(newB, newCap);
991 M_deallocate(impl_.b_, size_type(impl_.z_ - impl_.b_));
993 impl_.z_ = newB + newCap;
994 impl_.e_ = newB + (impl_.e_ - impl_.b_);
998 void shrink_to_fit() noexcept {
1004 auto const newCapacityBytes = folly::goodMallocSize(size() * sizeof(T));
1005 auto const newCap = newCapacityBytes / sizeof(T);
1006 auto const oldCap = capacity();
1008 if (newCap >= oldCap) {
1013 // xallocx() will shrink to precisely newCapacityBytes (which was generated
1014 // by goodMallocSize()) if it successfully shrinks in place.
1015 if ((usingJEMalloc() && usingStdAllocator::value) &&
1016 newCapacityBytes >= folly::jemallocMinInPlaceExpandable &&
1017 xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) {
1018 impl_.z_ += newCap - oldCap;
1020 T* newB; // intentionally uninitialized
1022 newB = M_allocate(newCap);
1026 M_deallocate(newB, newCap);
1027 return; // swallow the error
1033 M_deallocate(impl_.b_, size_type(impl_.z_ - impl_.b_));
1035 impl_.z_ = newB + newCap;
1036 impl_.e_ = newB + (impl_.e_ - impl_.b_);
1042 bool reserve_in_place(size_type n) {
1043 if (!usingStdAllocator::value || !usingJEMalloc()) {
1047 // jemalloc can never grow in place blocks smaller than 4096 bytes.
1048 if ((impl_.z_ - impl_.b_) * sizeof(T) <
1049 folly::jemallocMinInPlaceExpandable) {
1053 auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T));
1055 if (xallocx(p, newCapacityBytes, 0, 0) == newCapacityBytes) {
1056 impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T);
1062 //===========================================================================
1063 //---------------------------------------------------------------------------
1066 reference operator[](size_type n) {
1070 const_reference operator[](size_type n) const {
1074 const_reference at(size_type n) const {
1075 if (UNLIKELY(n >= size())) {
1076 std::__throw_out_of_range("fbvector: index is greater than size.");
1080 reference at(size_type n) {
1081 auto const& cThis = *this;
1082 return const_cast<reference>(cThis.at(n));
1088 const_reference front() const {
1094 return impl_.e_[-1];
1096 const_reference back() const {
1098 return impl_.e_[-1];
1101 //===========================================================================
1102 //---------------------------------------------------------------------------
1105 T* data() noexcept {
1108 const T* data() const noexcept {
1112 //===========================================================================
1113 //---------------------------------------------------------------------------
1114 // modifiers (common)
1116 template <class... Args>
1117 void emplace_back(Args&&... args) {
1118 if (impl_.e_ != impl_.z_) {
1119 M_construct(impl_.e_, std::forward<Args>(args)...);
1122 emplace_back_aux(std::forward<Args>(args)...);
1127 push_back(const T& value) {
1128 if (impl_.e_ != impl_.z_) {
1129 M_construct(impl_.e_, value);
1132 emplace_back_aux(value);
1137 push_back(T&& value) {
1138 if (impl_.e_ != impl_.z_) {
1139 M_construct(impl_.e_, std::move(value));
1142 emplace_back_aux(std::move(value));
1149 M_destroy(impl_.e_);
1152 void swap(fbvector& other) noexcept {
1153 if (!usingStdAllocator::value && A::propagate_on_container_swap::value) {
1154 swap(impl_, other.impl_);
1156 impl_.swapData(other.impl_);
1160 void clear() noexcept {
1161 M_destroy_range_e(impl_.b_);
1165 // std::vector implements a similar function with a different growth
1166 // strategy: empty() ? 1 : capacity() * 2.
1168 // fbvector grows differently on two counts:
1171 // Instead of growing to size 1 from empty, fbvector allocates at least
1172 // 64 bytes. You may still use reserve to reserve a lesser amount of
1175 // For medium-sized vectors, the growth strategy is 1.5x. See the docs
1177 // This does not apply to very small or very large fbvectors. This is a
1179 // A nice addition to fbvector would be the capability of having a user-
1180 // defined growth strategy, probably as part of the allocator.
1183 size_type computePushBackCapacity() const {
1184 if (capacity() == 0) {
1185 return std::max(64 / sizeof(T), size_type(1));
1187 if (capacity() < folly::jemallocMinInPlaceExpandable / sizeof(T)) {
1188 return capacity() * 2;
1190 if (capacity() > 4096 * 32 / sizeof(T)) {
1191 return capacity() * 2;
1193 return (capacity() * 3 + 1) / 2;
1196 template <class... Args>
1197 void emplace_back_aux(Args&&... args);
1199 //===========================================================================
1200 //---------------------------------------------------------------------------
1201 // modifiers (erase)
1203 iterator erase(const_iterator position) {
1204 return erase(position, position + 1);
1207 iterator erase(const_iterator first, const_iterator last) {
1208 assert(isValid(first) && isValid(last));
1209 assert(first <= last);
1210 if (first != last) {
1211 if (last == end()) {
1212 M_destroy_range_e((iterator)first);
1214 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1215 D_destroy_range_a((iterator)first, (iterator)last);
1216 if (last - first >= cend() - last) {
1217 std::memcpy((void*)first, (void*)last, (cend() - last) * sizeof(T));
1219 std::memmove((iterator)first, last, (cend() - last) * sizeof(T));
1221 impl_.e_ -= (last - first);
1223 std::copy(std::make_move_iterator((iterator)last),
1224 std::make_move_iterator(end()), (iterator)first);
1225 auto newEnd = impl_.e_ - std::distance(first, last);
1226 M_destroy_range_e(newEnd);
1230 return (iterator)first;
1233 //===========================================================================
1234 //---------------------------------------------------------------------------
1235 // modifiers (insert)
1236 private: // we have the private section first because it defines some macros
1237 bool isValid(const_iterator it) {
1238 return cbegin() <= it && it <= cend();
1241 size_type computeInsertCapacity(size_type n) {
1242 size_type nc = std::max(computePushBackCapacity(), size() + n);
1243 size_type ac = folly::goodMallocSize(nc * sizeof(T)) / sizeof(T);
1247 //---------------------------------------------------------------------------
1249 // make_window takes an fbvector, and creates an uninitialized gap (a
1250 // window) at the given position, of the given size. The fbvector must
1251 // have enough capacity.
1253 // Explanation by picture.
1257 // make_window here of size 3
1261 // If something goes wrong and the window must be destroyed, use
1262 // undo_window to provide a weak exception guarantee. It destroys
1267 //---------------------------------------------------------------------------
1269 // wrap_frame takes an inverse window and relocates an fbvector around it.
1270 // The fbvector must have at least as many elements as the left ledge.
1272 // Explanation by picture.
1275 // fbvector: inverse window:
1276 // 123456789______ _____abcde_______
1280 // _______________ 12345abcde6789___
1282 //---------------------------------------------------------------------------
1284 // insert_use_fresh_memory returns true iff the fbvector should use a fresh
1285 // block of memory for the insertion. If the fbvector does not have enough
1286 // spare capacity, then it must return true. Otherwise either true or false
1289 //---------------------------------------------------------------------------
1291 // These three functions, make_window, wrap_frame, and
1292 // insert_use_fresh_memory, can be combined into a uniform interface.
1293 // Since that interface involves a lot of case-work, it is built into
1294 // some macros: FOLLY_FBVECTOR_INSERT_(PRE|START|TRY|END)
1295 // Macros are used in an attempt to let GCC perform better optimizations,
1296 // especially control flow optimization.
1299 //---------------------------------------------------------------------------
1302 void make_window(iterator position, size_type n) {
1303 // The result is guaranteed to be non-negative, so use an unsigned type:
1304 size_type tail = size_type(std::distance(position, impl_.e_));
1307 relocate_move(position + n, position, impl_.e_);
1308 relocate_done(position + n, position, impl_.e_);
1311 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1312 std::memmove(position + n, position, tail * sizeof(T));
1315 D_uninitialized_move_a(impl_.e_, impl_.e_ - n, impl_.e_);
1317 std::copy_backward(std::make_move_iterator(position),
1318 std::make_move_iterator(impl_.e_ - n), impl_.e_);
1320 D_destroy_range_a(impl_.e_ - n, impl_.e_ + n);
1325 D_destroy_range_a(position, position + n);
1330 void undo_window(iterator position, size_type n) noexcept {
1331 D_destroy_range_a(position + n, impl_.e_);
1332 impl_.e_ = position;
1335 //---------------------------------------------------------------------------
1338 void wrap_frame(T* ledge, size_type idx, size_type n) {
1339 assert(size() >= idx);
1342 relocate_move(ledge, impl_.b_, impl_.b_ + idx);
1344 relocate_move(ledge + idx + n, impl_.b_ + idx, impl_.e_);
1346 relocate_undo(ledge, impl_.b_, impl_.b_ + idx);
1349 relocate_done(ledge, impl_.b_, impl_.b_ + idx);
1350 relocate_done(ledge + idx + n, impl_.b_ + idx, impl_.e_);
1353 //---------------------------------------------------------------------------
1356 bool insert_use_fresh(bool at_end, size_type n) {
1358 if (size() + n <= capacity()) {
1361 if (reserve_in_place(size() + n)) {
1367 if (size() + n > capacity()) {
1374 //---------------------------------------------------------------------------
1378 typename IsInternalFunc,
1379 typename InsertInternalFunc,
1380 typename ConstructFunc,
1381 typename DestroyFunc>
1382 iterator do_real_insert(
1383 const_iterator cpos,
1385 IsInternalFunc&& isInternalFunc,
1386 InsertInternalFunc&& insertInternalFunc,
1387 ConstructFunc&& constructFunc,
1388 DestroyFunc&& destroyFunc) {
1390 return iterator(cpos);
1392 bool at_end = cpos == cend();
1393 bool fresh = insert_use_fresh(at_end, n);
1395 if (!fresh && isInternalFunc()) {
1396 // check for internal data (technically not required by the standard)
1397 return insertInternalFunc();
1399 assert(isValid(cpos));
1401 T* position = const_cast<T*>(cpos);
1402 size_type idx = size_type(std::distance(impl_.b_, position));
1404 size_type newCap; /* intentionally uninitialized */
1407 newCap = computeInsertCapacity(n);
1408 b = M_allocate(newCap);
1411 make_window(position, n);
1420 // construct the inserted elements
1421 constructFunc(start);
1424 M_deallocate(b, newCap);
1427 undo_window(position, n);
1437 wrap_frame(b, idx, n);
1439 // delete the inserted elements (exception has been thrown)
1441 M_deallocate(b, newCap);
1445 M_deallocate(impl_.b_, capacity());
1447 impl_.set(b, size() + n, newCap);
1448 return impl_.b_ + idx;
1455 template <class... Args>
1456 iterator emplace(const_iterator cpos, Args&&... args) {
1457 return do_real_insert(
1460 [&] { return false; },
1461 [&] { return iterator{}; },
1462 [&](iterator start) {
1463 M_construct(start, std::forward<Args>(args)...);
1465 [&](iterator start) { M_destroy(start); });
1468 iterator insert(const_iterator cpos, const T& value) {
1469 return do_real_insert(
1472 [&] { return dataIsInternal(value); },
1473 [&] { return insert(cpos, T(value)); },
1474 [&](iterator start) { M_construct(start, value); },
1475 [&](iterator start) { M_destroy(start); });
1478 iterator insert(const_iterator cpos, T&& value) {
1479 return do_real_insert(
1482 [&] { return dataIsInternal(value); },
1483 [&] { return insert(cpos, T(std::move(value))); },
1484 [&](iterator start) { M_construct(start, std::move(value)); },
1485 [&](iterator start) { M_destroy(start); });
1488 iterator insert(const_iterator cpos, size_type n, VT value) {
1489 return do_real_insert(
1492 [&] { return dataIsInternalAndNotVT(value); },
1493 [&] { return insert(cpos, n, T(value)); },
1494 [&](iterator start) { D_uninitialized_fill_n_a(start, n, value); },
1495 [&](iterator start) { D_destroy_range_a(start, start + n); });
1498 template <class It, class Category = typename
1499 std::iterator_traits<It>::iterator_category>
1500 iterator insert(const_iterator cpos, It first, It last) {
1501 return insert(cpos, first, last, Category());
1504 iterator insert(const_iterator cpos, std::initializer_list<T> il) {
1505 return insert(cpos, il.begin(), il.end());
1508 //---------------------------------------------------------------------------
1509 // insert dispatch for iterator types
1511 template <class FIt>
1512 iterator insert(const_iterator cpos, FIt first, FIt last,
1513 std::forward_iterator_tag) {
1514 size_type n = size_type(std::distance(first, last));
1515 return do_real_insert(
1518 [&] { return false; },
1519 [&] { return iterator{}; },
1520 [&](iterator start) { D_uninitialized_copy_a(start, first, last); },
1521 [&](iterator start) { D_destroy_range_a(start, start + n); });
1524 template <class IIt>
1525 iterator insert(const_iterator cpos, IIt first, IIt last,
1526 std::input_iterator_tag) {
1527 T* position = const_cast<T*>(cpos);
1528 assert(isValid(position));
1529 size_type idx = std::distance(begin(), position);
1531 fbvector storage(std::make_move_iterator(position),
1532 std::make_move_iterator(end()),
1533 A::select_on_container_copy_construction(impl_));
1534 M_destroy_range_e(position);
1535 for (; first != last; ++first) {
1536 emplace_back(*first);
1538 insert(cend(), std::make_move_iterator(storage.begin()),
1539 std::make_move_iterator(storage.end()));
1540 return impl_.b_ + idx;
1543 //===========================================================================
1544 //---------------------------------------------------------------------------
1545 // lexicographical functions
1547 bool operator==(const fbvector& other) const {
1548 return size() == other.size() && std::equal(begin(), end(), other.begin());
1551 bool operator!=(const fbvector& other) const {
1552 return !(*this == other);
1555 bool operator<(const fbvector& other) const {
1556 return std::lexicographical_compare(
1557 begin(), end(), other.begin(), other.end());
1560 bool operator>(const fbvector& other) const {
1561 return other < *this;
1564 bool operator<=(const fbvector& other) const {
1565 return !(*this > other);
1568 bool operator>=(const fbvector& other) const {
1569 return !(*this < other);
1572 //===========================================================================
1573 //---------------------------------------------------------------------------
1576 template <class _T, class _A>
1577 friend _T* relinquish(fbvector<_T, _A>&);
1579 template <class _T, class _A>
1580 friend void attach(fbvector<_T, _A>&, _T* data, size_t sz, size_t cap);
1582 }; // class fbvector
1585 //=============================================================================
1586 //-----------------------------------------------------------------------------
1587 // outlined functions (gcc, you finicky compiler you)
1589 template <typename T, typename Allocator>
1590 template <class... Args>
1591 void fbvector<T, Allocator>::emplace_back_aux(Args&&... args) {
1592 size_type byte_sz = folly::goodMallocSize(
1593 computePushBackCapacity() * sizeof(T));
1594 if (usingStdAllocator::value
1596 && ((impl_.z_ - impl_.b_) * sizeof(T) >=
1597 folly::jemallocMinInPlaceExpandable)) {
1598 // Try to reserve in place.
1599 // Ask xallocx to allocate in place at least size()+1 and at most sz space.
1600 // xallocx will allocate as much as possible within that range, which
1601 // is the best possible outcome: if sz space is available, take it all,
1602 // otherwise take as much as possible. If nothing is available, then fail.
1603 // In this fashion, we never relocate if there is a possibility of
1604 // expanding in place, and we never reallocate by less than the desired
1605 // amount unless we cannot expand further. Hence we will not reallocate
1606 // sub-optimally twice in a row (modulo the blocking memory being freed).
1607 size_type lower = folly::goodMallocSize(sizeof(T) + size() * sizeof(T));
1608 size_type upper = byte_sz;
1609 size_type extra = upper - lower;
1614 if ((actual = xallocx(p, lower, extra, 0)) >= lower) {
1615 impl_.z_ = impl_.b_ + actual / sizeof(T);
1616 M_construct(impl_.e_, std::forward<Args>(args)...);
1622 // Reallocation failed. Perform a manual relocation.
1623 size_type sz = byte_sz / sizeof(T);
1624 auto newB = M_allocate(sz);
1625 auto newE = newB + size();
1627 if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
1628 // For linear memory access, relocate before construction.
1629 // By the test condition, relocate is noexcept.
1630 // Note that there is no cleanup to do if M_construct throws - that's
1631 // one of the beauties of relocation.
1632 // Benchmarks for this code have high variance, and seem to be close.
1633 relocate_move(newB, impl_.b_, impl_.e_);
1634 M_construct(newE, std::forward<Args>(args)...);
1637 M_construct(newE, std::forward<Args>(args)...);
1642 M_destroy(newE - 1);
1647 M_deallocate(newB, sz);
1651 M_deallocate(impl_.b_, size());
1655 impl_.z_ = newB + sz;
1658 //=============================================================================
1659 //-----------------------------------------------------------------------------
1660 // specialized functions
1662 template <class T, class A>
1663 void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) noexcept {
1667 //=============================================================================
1668 //-----------------------------------------------------------------------------
1674 template <class T, class A>
1675 struct IndexableTraits<fbvector<T, A>>
1676 : public IndexableTraitsSeq<fbvector<T, A>> {
1679 } // namespace detail
1681 template <class T, class A>
1682 void compactResize(fbvector<T, A>* v, size_t sz) {
1689 // relinquish and attach are not a members function specifically so that it is
1690 // awkward to call them. It is very easy to shoot yourself in the foot with
1693 // If you call relinquish, then it is your responsibility to free the data
1694 // and the storage, both of which may have been generated in a non-standard
1695 // way through the fbvector's allocator.
1697 // If you call attach, it is your responsibility to ensure that the fbvector
1698 // is fresh (size and capacity both zero), and that the supplied data is
1699 // capable of being manipulated by the allocator.
1700 // It is acceptable to supply a stack pointer IF:
1701 // (1) The vector's data does not outlive the stack pointer. This includes
1702 // extension of the data's life through a move operation.
1703 // (2) The pointer has enough capacity that the vector will never be
1705 // (3) Insert is not called on the vector; these functions have leeway to
1706 // relocate the vector even if there is enough capacity.
1707 // (4) A stack pointer is compatible with the fbvector's allocator.
1710 template <class T, class A>
1711 T* relinquish(fbvector<T, A>& v) {
1713 v.impl_.b_ = v.impl_.e_ = v.impl_.z_ = nullptr;
1717 template <class T, class A>
1718 void attach(fbvector<T, A>& v, T* data, size_t sz, size_t cap) {
1719 assert(v.data() == nullptr);
1721 v.impl_.e_ = data + sz;
1722 v.impl_.z_ = data + cap;
1725 } // namespace folly