2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * For high-level documentation and usage examples see
19 * folly/docs/small_vector.md
21 * @author Jordan DeLong <delong.j@fb.com>
23 #ifndef FOLLY_SMALL_VECTOR_H_
24 #define FOLLY_SMALL_VECTOR_H_
26 #include "Portability.h"
30 #include <type_traits>
35 #include <boost/operators.hpp>
36 #include <boost/type_traits.hpp>
37 #include <boost/mpl/if.hpp>
38 #include <boost/mpl/eval_if.hpp>
39 #include <boost/mpl/vector.hpp>
40 #include <boost/mpl/front.hpp>
41 #include <boost/mpl/filter_view.hpp>
42 #include <boost/mpl/identity.hpp>
43 #include <boost/mpl/placeholders.hpp>
44 #include <boost/mpl/empty.hpp>
45 #include <boost/mpl/size.hpp>
46 #include <boost/mpl/count.hpp>
47 #include <boost/mpl/max.hpp>
49 #include "folly/Malloc.h"
51 #if defined(__GNUC__) && defined(__x86_64__)
52 # include "folly/SmallLocks.h"
53 # define FB_PACKED __attribute__((packed))
58 #if FOLLY_HAVE_MALLOC_SIZE
59 extern "C" std::size_t malloc_size(const void*);
60 # if !FOLLY_HAVE_MALLOC_USABLE_SIZE
61 # define malloc_usable_size malloc_size
63 # ifndef malloc_usable_size
64 # define malloc_usable_size malloc_size
68 // Ignore shadowing warnings within this file, so includers can use -Wshadow.
69 #pragma GCC diagnostic push
70 #pragma GCC diagnostic ignored "-Wshadow"
74 //////////////////////////////////////////////////////////////////////
76 namespace small_vector_policy {
78 //////////////////////////////////////////////////////////////////////
81 * A flag which makes us refuse to use the heap at all. If we
82 * overflow the in situ capacity we throw an exception.
87 * Passing this policy will cause small_vector to provide lock() and
88 * unlock() functions using a 1-bit spin lock in the size value.
90 * Note that this is intended for a fairly specialized (although
91 * strangely common at facebook) use case, where you have billions of
92 * vectors in memory where none of them are "hot" and most of them are
93 * small. This allows you to get fine-grained locks without spending
94 * a lot of memory on mutexes (the alternative of a large hashtable of
95 * locks leads to extra cache misses in the lookup path).
101 //////////////////////////////////////////////////////////////////////
103 } // small_vector_policy
105 //////////////////////////////////////////////////////////////////////
107 template<class T, std::size_t M, class A, class B, class C>
110 //////////////////////////////////////////////////////////////////////
115 * Move a range to a range of uninitialized memory. Assumes the
116 * ranges don't overlap.
119 typename std::enable_if<
120 !FOLLY_IS_TRIVIALLY_COPYABLE(T)
122 moveToUninitialized(T* first, T* last, T* out) {
123 auto const count = last - first;
126 for (; idx < count; ++first, ++idx) {
127 new (&out[idx]) T(std::move(*first));
130 // Even for callers trying to give the strong guarantee
131 // (e.g. push_back) it's ok to assume here that we don't have to
132 // move things back and that it was a copy constructor that
133 // threw: if someone throws from a move constructor the effects
135 for (std::size_t i = 0; i < idx; ++i) {
142 // Specialization for trivially copyable types.
144 typename std::enable_if<
145 FOLLY_IS_TRIVIALLY_COPYABLE(T)
147 moveToUninitialized(T* first, T* last, T* out) {
148 std::memmove(out, first, (last - first) * sizeof *first);
152 * Move objects in memory to the right into some uninitialized
153 * memory, where the region overlaps. This doesn't just use
154 * std::move_backward because move_backward only works if all the
155 * memory is initialized to type T already.
158 typename std::enable_if<
159 !FOLLY_IS_TRIVIALLY_COPYABLE(T)
161 moveObjectsRight(T* first, T* lastConstructed, T* realLast) {
162 if (lastConstructed == realLast) {
166 T* end = first - 1; // Past the end going backwards.
167 T* out = realLast - 1;
168 T* in = lastConstructed - 1;
170 for (; in != end && out >= lastConstructed; --in, --out) {
171 new (out) T(std::move(*in));
173 for (; in != end; --in, --out) {
174 *out = std::move(*in);
176 for (; out >= lastConstructed; --out) {
180 // We want to make sure the same stuff is uninitialized memory
181 // if we exit via an exception (this is to make sure we provide
182 // the basic exception safety guarantee for insert functions).
183 if (out < lastConstructed) {
184 out = lastConstructed - 1;
186 for (auto it = out + 1; it != realLast; ++it) {
193 // Specialization for trivially copyable types. The call to
194 // std::move_backward here will just turn into a memmove. (TODO:
195 // change to std::is_trivially_copyable when that works.)
197 typename std::enable_if<
198 FOLLY_IS_TRIVIALLY_COPYABLE(T)
200 moveObjectsRight(T* first, T* lastConstructed, T* realLast) {
201 std::move_backward(first, lastConstructed, realLast);
205 * Populate a region of memory using `op' to construct elements. If
206 * anything throws, undo what we did.
208 template<class T, class Function>
209 void populateMemForward(T* mem, std::size_t n, Function const& op) {
212 for (size_t i = 0; i < n; ++i) {
217 for (std::size_t i = 0; i < idx; ++i) {
224 template<class SizeType, bool ShouldUseHeap>
225 struct IntegralSizePolicy {
226 typedef SizeType InternalSizeType;
228 IntegralSizePolicy() : size_(0) {}
231 std::size_t policyMaxSize() const {
232 return SizeType(~kExternMask);
235 std::size_t doSize() const {
236 return size_ & ~kExternMask;
239 std::size_t isExtern() const {
240 return kExternMask & size_;
243 void setExtern(bool b) {
245 size_ |= kExternMask;
247 size_ &= ~kExternMask;
251 void setSize(std::size_t sz) {
252 assert(sz <= policyMaxSize());
253 size_ = (kExternMask & size_) | SizeType(sz);
256 void swapSizePolicy(IntegralSizePolicy& o) {
257 std::swap(size_, o.size_);
261 static bool const kShouldUseHeap = ShouldUseHeap;
264 static SizeType const kExternMask =
265 kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 1)
272 template<class SizeType, bool ShouldUseHeap>
273 struct OneBitMutexImpl {
274 typedef SizeType InternalSizeType;
276 OneBitMutexImpl() { psl_.init(); }
278 void lock() const { psl_.lock(); }
279 void unlock() const { psl_.unlock(); }
280 bool try_lock() const { return psl_.try_lock(); }
283 static bool const kShouldUseHeap = ShouldUseHeap;
285 std::size_t policyMaxSize() const {
286 return SizeType(~(SizeType(1) << kLockBit | kExternMask));
289 std::size_t doSize() const {
290 return psl_.getData() & ~kExternMask;
293 std::size_t isExtern() const {
294 return psl_.getData() & kExternMask;
297 void setExtern(bool b) {
299 setSize(SizeType(doSize()) | kExternMask);
301 setSize(SizeType(doSize()) & ~kExternMask);
305 void setSize(std::size_t sz) {
306 assert(sz < (std::size_t(1) << kLockBit));
307 psl_.setData((kExternMask & psl_.getData()) | SizeType(sz));
310 void swapSizePolicy(OneBitMutexImpl& o) {
311 std::swap(psl_, o.psl_);
315 static SizeType const kLockBit = sizeof(SizeType) * 8 - 1;
316 static SizeType const kExternMask =
317 kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 2)
320 PicoSpinLock<SizeType,kLockBit> psl_;
323 template<class SizeType, bool ShouldUseHeap>
324 struct OneBitMutexImpl {
325 static_assert(std::is_same<SizeType,void>::value,
326 "OneBitMutex only works on x86-64");
331 * If you're just trying to use this class, ignore everything about
332 * this next small_vector_base class thing.
334 * The purpose of this junk is to minimize sizeof(small_vector<>)
335 * and allow specifying the template parameters in whatever order is
336 * convenient for the user. There's a few extra steps here to try
337 * to keep the error messages at least semi-reasonable.
339 * Apologies for all the black magic.
341 namespace mpl = boost::mpl;
342 template<class Value,
343 std::size_t RequestedMaxInline,
347 struct small_vector_base {
348 typedef mpl::vector<InPolicyA,InPolicyB,InPolicyC> PolicyList;
351 * Determine the size type
353 typedef typename mpl::filter_view<
355 boost::is_integral<mpl::placeholders::_1>
357 typedef typename mpl::eval_if<
358 mpl::empty<Integrals>,
359 mpl::identity<std::size_t>,
360 mpl::front<Integrals>
363 static_assert(std::is_unsigned<SizeType>::value,
364 "Size type should be an unsigned integral type");
365 static_assert(mpl::size<Integrals>::value == 0 ||
366 mpl::size<Integrals>::value == 1,
367 "Multiple size types specified in small_vector<>");
370 * Figure out if we're supposed to supply a one-bit mutex. :)
372 typedef typename mpl::count<
373 PolicyList,small_vector_policy::OneBitMutex
376 static_assert(HasMutex::value == 0 || HasMutex::value == 1,
377 "Multiple copies of small_vector_policy::OneBitMutex "
378 "supplied; this is probably a mistake");
381 * Determine whether we should allow spilling to the heap or not.
383 typedef typename mpl::count<
384 PolicyList,small_vector_policy::NoHeap
387 static_assert(HasNoHeap::value == 0 || HasNoHeap::value == 1,
388 "Multiple copies of small_vector_policy::NoHeap "
389 "supplied; this is probably a mistake");
392 * Make the real policy base classes.
394 typedef typename mpl::if_<
396 OneBitMutexImpl<SizeType,!HasNoHeap::value>,
397 IntegralSizePolicy<SizeType,!HasNoHeap::value>
398 >::type ActualSizePolicy;
401 * Now inherit from them all. This is done in such a convoluted
402 * way to make sure we get the empty base optimizaton on all these
403 * types to keep sizeof(small_vector<>) minimal.
405 typedef boost::totally_ordered1<
406 small_vector<Value,RequestedMaxInline,InPolicyA,InPolicyB,InPolicyC>,
412 T* pointerFlagSet(T* p) {
413 return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(p) | 1);
416 bool pointerFlagGet(T* p) {
417 return reinterpret_cast<uintptr_t>(p) & 1;
420 T* pointerFlagClear(T* p) {
421 return reinterpret_cast<T*>(
422 reinterpret_cast<uintptr_t>(p) & ~uintptr_t(1));
424 inline void* shiftPointer(void* p, size_t sizeBytes) {
425 return static_cast<char*>(p) + sizeBytes;
429 //////////////////////////////////////////////////////////////////////
431 template<class Value,
432 std::size_t RequestedMaxInline = 1,
433 class PolicyA = void,
434 class PolicyB = void,
435 class PolicyC = void>
437 : public detail::small_vector_base<
438 Value,RequestedMaxInline,PolicyA,PolicyB,PolicyC
441 typedef typename detail::small_vector_base<
442 Value,RequestedMaxInline,PolicyA,PolicyB,PolicyC
444 typedef typename BaseType::InternalSizeType InternalSizeType;
447 * Figure out the max number of elements we should inline. (If
448 * the user asks for less inlined elements than we can fit unioned
449 * into our value_type*, we will inline more than they asked.)
452 MaxInline = boost::mpl::max<
453 boost::mpl::int_<sizeof(Value*) / sizeof(Value)>,
454 boost::mpl::int_<RequestedMaxInline>
459 typedef std::size_t size_type;
460 typedef Value value_type;
461 typedef value_type& reference;
462 typedef value_type const& const_reference;
463 typedef value_type* iterator;
464 typedef value_type const* const_iterator;
465 typedef std::ptrdiff_t difference_type;
467 typedef std::reverse_iterator<iterator> reverse_iterator;
468 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
470 explicit small_vector() {}
472 small_vector(small_vector const& o) {
473 assign(o.begin(), o.end());
476 small_vector(small_vector&& o) {
477 *this = std::move(o);
480 small_vector(std::initializer_list<value_type> il) {
481 constructImpl(il.begin(), il.end(), std::false_type());
484 explicit small_vector(size_type n, value_type const& t = value_type()) {
489 explicit small_vector(Arg arg1, Arg arg2) {
490 // Forward using std::is_arithmetic to get to the proper
491 // implementation; this disambiguates between the iterators and
492 // (size_t, value_type) meaning for this constructor.
493 constructImpl(arg1, arg2, std::is_arithmetic<Arg>());
497 for (auto& t : *this) {
500 if (this->isExtern()) {
505 small_vector& operator=(small_vector const& o) {
506 assign(o.begin(), o.end());
510 small_vector& operator=(small_vector&& o) {
514 for (std::size_t i = 0; i < o.size(); ++i) {
515 new (data() + i) value_type(std::move(o[i]));
517 this->setSize(o.size());
524 bool operator==(small_vector const& o) const {
525 return size() == o.size() && std::equal(begin(), end(), o.begin());
528 bool operator<(small_vector const& o) const {
529 return std::lexicographical_compare(begin(), end(), o.begin(), o.end());
532 size_type max_size() const {
533 return !BaseType::kShouldUseHeap ? MaxInline
534 : this->policyMaxSize();
537 size_type size() const { return this->doSize(); }
538 bool empty() const { return !size(); }
540 iterator begin() { return data(); }
541 iterator end() { return data() + size(); }
542 const_iterator begin() const { return data(); }
543 const_iterator end() const { return data() + size(); }
544 const_iterator cbegin() const { return begin(); }
545 const_iterator cend() const { return end(); }
547 reverse_iterator rbegin() { return reverse_iterator(end()); }
548 reverse_iterator rend() { return reverse_iterator(begin()); }
550 const_reverse_iterator rbegin() const {
551 return const_reverse_iterator(end());
554 const_reverse_iterator rend() const {
555 return const_reverse_iterator(begin());
558 const_reverse_iterator crbegin() const { return rbegin(); }
559 const_reverse_iterator crend() const { return rend(); }
562 * Usually one of the simplest functions in a Container-like class
563 * but a bit more complex here. We have to handle all combinations
564 * of in-place vs. heap between this and o.
566 * Basic guarantee only. Provides the nothrow guarantee iff our
567 * value_type has a nothrow move or copy constructor.
569 void swap(small_vector& o) {
570 using std::swap; // Allow ADL on swap for our value_type.
572 if (this->isExtern() && o.isExtern()) {
573 this->swapSizePolicy(o);
575 auto thisCapacity = this->capacity();
576 auto oCapacity = o.capacity();
578 std::swap(unpackHack(&u.pdata_.heap_), unpackHack(&o.u.pdata_.heap_));
580 this->setCapacity(oCapacity);
581 o.setCapacity(thisCapacity);
586 if (!this->isExtern() && !o.isExtern()) {
587 auto& oldSmall = size() < o.size() ? *this : o;
588 auto& oldLarge = size() < o.size() ? o : *this;
590 for (size_type i = 0; i < oldSmall.size(); ++i) {
591 swap(oldSmall[i], oldLarge[i]);
594 size_type i = oldSmall.size();
596 for (; i < oldLarge.size(); ++i) {
597 new (&oldSmall[i]) value_type(std::move(oldLarge[i]));
598 oldLarge[i].~value_type();
601 for (; i < oldLarge.size(); ++i) {
602 oldLarge[i].~value_type();
604 oldLarge.setSize(oldSmall.size());
607 this->swapSizePolicy(o);
611 // isExtern != o.isExtern()
612 auto& oldExtern = o.isExtern() ? o : *this;
613 auto& oldIntern = o.isExtern() ? *this : o;
615 auto oldExternCapacity = oldExtern.capacity();
616 auto oldExternHeap = oldExtern.u.pdata_.heap_;
618 auto buff = oldExtern.u.buffer();
621 for (; i < oldIntern.size(); ++i) {
622 new (&buff[i]) value_type(std::move(oldIntern[i]));
623 oldIntern[i].~value_type();
626 for (size_type kill = 0; kill < i; ++kill) {
627 buff[kill].~value_type();
629 for (; i < oldIntern.size(); ++i) {
630 oldIntern[i].~value_type();
632 oldIntern.setSize(0);
633 oldExtern.u.pdata_.heap_ = oldExternHeap;
634 oldExtern.setCapacity(oldExternCapacity);
637 oldIntern.u.pdata_.heap_ = oldExternHeap;
638 this->swapSizePolicy(o);
639 oldIntern.setCapacity(oldExternCapacity);
642 void resize(size_type sz) {
644 erase(begin() + sz, end());
648 detail::populateMemForward(begin() + size(), sz - size(),
649 [&] (void* p) { new (p) value_type(); }
654 void resize(size_type sz, value_type const& v) {
656 erase(begin() + sz, end());
660 detail::populateMemForward(begin() + size(), sz - size(),
661 [&] (void* p) { new (p) value_type(v); }
666 value_type* data() noexcept {
667 return this->isExtern() ? u.heap() : u.buffer();
670 value_type const* data() const noexcept {
671 return this->isExtern() ? u.heap() : u.buffer();
674 template<class ...Args>
675 iterator emplace(const_iterator p, Args&&... args) {
677 emplace_back(std::forward<Args>(args)...);
682 * We implement emplace at places other than at the back with a
683 * temporary for exception safety reasons. It is possible to
684 * avoid having to do this, but it becomes hard to maintain the
685 * basic exception safety guarantee (unless you respond to a copy
686 * constructor throwing by clearing the whole vector).
688 * The reason for this is that otherwise you have to destruct an
689 * element before constructing this one in its place---if the
690 * constructor throws, you either need a nothrow default
691 * constructor or a nothrow copy/move to get something back in the
692 * "gap", and the vector requirements don't guarantee we have any
693 * of these. Clearing the whole vector is a legal response in
694 * this situation, but it seems like this implementation is easy
695 * enough and probably better.
697 return insert(p, value_type(std::forward<Args>(args)...));
700 void reserve(size_type sz) {
704 size_type capacity() const {
705 if (this->isExtern()) {
706 if (u.hasCapacity()) {
707 return *u.getCapacity();
709 return malloc_usable_size(u.pdata_.heap_) / sizeof(value_type);
714 void shrink_to_fit() {
715 if (!this->isExtern()) {
719 small_vector tmp(begin(), end());
723 template<class ...Args>
724 void emplace_back(Args&&... args) {
725 // call helper function for static dispatch of special cases
726 emplaceBack(std::forward<Args>(args)...);
729 void push_back(value_type&& t) {
730 if (capacity() == size()) {
731 makeSize(std::max(size_type(2), 3 * size() / 2), &t, size());
733 new (end()) value_type(std::move(t));
735 this->setSize(size() + 1);
738 void push_back(value_type const& t) {
739 // Make a copy and forward to the rvalue value_type&& overload
741 push_back(value_type(t));
748 iterator insert(const_iterator constp, value_type&& t) {
749 iterator p = unconst(constp);
752 push_back(std::move(t));
756 auto offset = p - begin();
758 if (capacity() == size()) {
759 makeSize(size() + 1, &t, offset);
760 this->setSize(this->size() + 1);
762 makeSize(size() + 1);
763 detail::moveObjectsRight(data() + offset,
765 data() + size() + 1);
766 this->setSize(size() + 1);
767 data()[offset] = std::move(t);
769 return begin() + offset;
773 iterator insert(const_iterator p, value_type const& t) {
774 // Make a copy and forward to the rvalue value_type&& overload
776 return insert(p, value_type(t));
779 iterator insert(const_iterator pos, size_type n, value_type const& val) {
780 auto offset = pos - begin();
781 makeSize(size() + n);
782 detail::moveObjectsRight(data() + offset,
784 data() + size() + n);
785 this->setSize(size() + n);
786 std::generate_n(begin() + offset, n, [&] { return val; });
787 return begin() + offset;
791 iterator insert(const_iterator p, Arg arg1, Arg arg2) {
792 // Forward using std::is_arithmetic to get to the proper
793 // implementation; this disambiguates between the iterators and
794 // (size_t, value_type) meaning for this function.
795 return insertImpl(unconst(p), arg1, arg2, std::is_arithmetic<Arg>());
798 iterator insert(const_iterator p, std::initializer_list<value_type> il) {
799 return insert(p, il.begin(), il.end());
802 iterator erase(const_iterator q) {
803 std::move(unconst(q) + 1, end(), unconst(q));
804 (data() + size() - 1)->~value_type();
805 this->setSize(size() - 1);
809 iterator erase(const_iterator q1, const_iterator q2) {
810 std::move(unconst(q2), end(), unconst(q1));
811 for (auto it = q1; it != end(); ++it) {
814 this->setSize(size() - (q2 - q1));
819 erase(begin(), end());
823 void assign(Arg first, Arg last) {
825 insert(end(), first, last);
828 void assign(std::initializer_list<value_type> il) {
829 assign(il.begin(), il.end());
832 void assign(size_type n, const value_type& t) {
837 reference front() { assert(!empty()); return *begin(); }
838 reference back() { assert(!empty()); return *(end() - 1); }
839 const_reference front() const { assert(!empty()); return *begin(); }
840 const_reference back() const { assert(!empty()); return *(end() - 1); }
842 reference operator[](size_type i) {
844 return *(begin() + i);
847 const_reference operator[](size_type i) const {
849 return *(begin() + i);
852 reference at(size_type i) {
854 throw std::out_of_range("index out of range");
859 const_reference at(size_type i) const {
861 throw std::out_of_range("index out of range");
869 * This is doing the same like emplace_back, but we need this helper
870 * to catch the special case - see the next overload function..
872 template<class ...Args>
873 void emplaceBack(Args&&... args) {
874 makeSize(size() + 1);
875 new (end()) value_type(std::forward<Args>(args)...);
876 this->setSize(size() + 1);
880 * Special case of emplaceBack for rvalue
882 void emplaceBack(value_type&& t) {
883 push_back(std::move(t));
886 static iterator unconst(const_iterator it) {
887 return const_cast<iterator>(it);
891 * g++ doesn't allow you to bind a non-const reference to a member
892 * of a packed structure, presumably because it would make it too
893 * easy to accidentally make an unaligned memory access?
895 template<class T> static T& unpackHack(T* p) {
899 // The std::false_type argument is part of disambiguating the
900 // iterator insert functions from integral types (see insert().)
902 iterator insertImpl(iterator pos, It first, It last, std::false_type) {
903 typedef typename std::iterator_traits<It>::iterator_category categ;
904 if (std::is_same<categ,std::input_iterator_tag>::value) {
905 auto offset = pos - begin();
906 while (first != last) {
907 pos = insert(pos, *first++);
910 return begin() + offset;
913 auto distance = std::distance(first, last);
914 auto offset = pos - begin();
915 makeSize(size() + distance);
916 detail::moveObjectsRight(data() + offset,
918 data() + size() + distance);
919 this->setSize(size() + distance);
920 std::copy_n(first, distance, begin() + offset);
921 return begin() + offset;
924 iterator insertImpl(iterator pos, size_type n, const value_type& val,
926 // The true_type means this should call the size_t,value_type
927 // overload. (See insert().)
928 return insert(pos, n, val);
931 // The std::false_type argument came from std::is_arithmetic as part
932 // of disambiguating an overload (see the comment in the
935 void constructImpl(It first, It last, std::false_type) {
936 typedef typename std::iterator_traits<It>::iterator_category categ;
937 if (std::is_same<categ,std::input_iterator_tag>::value) {
938 // With iterators that only allow a single pass, we can't really
939 // do anything sane here.
940 while (first != last) {
946 auto distance = std::distance(first, last);
948 this->setSize(distance);
950 detail::populateMemForward(data(), distance,
951 [&] (void* p) { new (p) value_type(*first++); }
955 void doConstruct(size_type n, value_type const& val) {
958 detail::populateMemForward(data(), n,
959 [&] (void* p) { new (p) value_type(val); }
963 // The true_type means we should forward to the size_t,value_type
965 void constructImpl(size_type n, value_type const& val, std::true_type) {
969 void makeSize(size_type size, value_type* v = NULL) {
970 makeSize(size, v, size - 1);
974 * Ensure we have a large enough memory region to be size `size'.
975 * Will move/copy elements if we are spilling to heap_ or needed to
976 * allocate a new region, but if resized in place doesn't initialize
977 * anything in the new region. In any case doesn't change size().
978 * Supports insertion of new element during reallocation by given
979 * pointer to new element and position of new element.
980 * NOTE: If reallocation is not needed, and new element should be
981 * inserted in the middle of vector (not at the end), do the move
982 * objects and insertion outside the function, otherwise exception is thrown.
984 void makeSize(size_type size, value_type* v, size_type pos) {
985 if (size > this->max_size()) {
986 throw std::length_error("max_size exceeded in small_vector");
988 if (size <= this->capacity()) {
992 auto needBytes = size * sizeof(value_type);
993 // If the capacity isn't explicitly stored inline, but the heap
994 // allocation is grown to over some threshold, we should store
995 // a capacity at the front of the heap allocation.
996 bool heapifyCapacity =
997 !kHasInlineCapacity && needBytes > kHeapifyCapacityThreshold;
998 if (heapifyCapacity) {
999 needBytes += kHeapifyCapacitySize;
1001 auto const sizeBytes = goodMallocSize(needBytes);
1002 void* newh = checkedMalloc(sizeBytes);
1003 // We expect newh to be at least 2-aligned, because we want to
1004 // use its least significant bit as a flag.
1005 assert(!detail::pointerFlagGet(newh));
1007 value_type* newp = static_cast<value_type*>(
1009 detail::shiftPointer(newh, kHeapifyCapacitySize) :
1015 new (&newp[pos]) value_type(std::move(*v));
1021 // move old elements to the left of the new one
1023 detail::moveToUninitialized(begin(), begin() + pos, newp);
1025 newp[pos].~value_type();
1030 // move old elements to the right of the new one
1033 detail::moveToUninitialized(begin() + pos, end(), newp + pos + 1);
1036 for (size_type i = 0; i <= pos; ++i) {
1037 newp[i].~value_type();
1043 // move without inserting new element
1045 detail::moveToUninitialized(begin(), end(), newp);
1051 for (auto& val : *this) {
1055 if (this->isExtern()) {
1058 auto availableSizeBytes = sizeBytes;
1059 if (heapifyCapacity) {
1060 u.pdata_.heap_ = detail::pointerFlagSet(newh);
1061 availableSizeBytes -= kHeapifyCapacitySize;
1063 u.pdata_.heap_ = newh;
1065 this->setExtern(true);
1066 this->setCapacity(availableSizeBytes / sizeof(value_type));
1070 * This will set the capacity field, stored inline in the storage_ field
1071 * if there is sufficient room to store it.
1073 void setCapacity(size_type newCapacity) {
1074 assert(this->isExtern());
1075 if (u.hasCapacity()) {
1076 assert(newCapacity < std::numeric_limits<InternalSizeType>::max());
1077 *u.getCapacity() = InternalSizeType(newCapacity);
1082 struct HeapPtrWithCapacity {
1084 InternalSizeType capacity_;
1086 InternalSizeType* getCapacity() {
1092 // Lower order bit of heap_ is used as flag to indicate whether capacity is
1093 // stored at the front of the heap allocation.
1096 InternalSizeType* getCapacity() {
1097 assert(detail::pointerFlagGet(heap_));
1098 return static_cast<InternalSizeType*>(
1099 detail::pointerFlagClear(heap_));
1103 #if defined(__x86_64_)
1104 typedef unsigned char InlineStorageType[sizeof(value_type) * MaxInline];
1106 typedef typename std::aligned_storage<
1107 sizeof(value_type) * MaxInline,
1109 >::type InlineStorageType;
1112 static bool const kHasInlineCapacity =
1113 sizeof(HeapPtrWithCapacity) < sizeof(InlineStorageType);
1115 // This value should we multiple of word size.
1116 static size_t const kHeapifyCapacitySize = sizeof(
1117 typename std::aligned_storage<
1118 sizeof(InternalSizeType),
1121 // Threshold to control capacity heapifying.
1122 static size_t const kHeapifyCapacityThreshold =
1123 100 * kHeapifyCapacitySize;
1125 typedef typename std::conditional<
1127 HeapPtrWithCapacity,
1129 >::type PointerType;
1132 explicit Data() { pdata_.heap_ = 0; }
1135 InlineStorageType storage_;
1137 value_type* buffer() noexcept {
1138 void* vp = &storage_;
1139 return static_cast<value_type*>(vp);
1141 value_type const* buffer() const noexcept {
1142 return const_cast<Data*>(this)->buffer();
1144 value_type* heap() noexcept {
1145 if (kHasInlineCapacity || !detail::pointerFlagGet(pdata_.heap_)) {
1146 return static_cast<value_type*>(pdata_.heap_);
1148 return static_cast<value_type*>(
1149 detail::shiftPointer(
1150 detail::pointerFlagClear(pdata_.heap_), kHeapifyCapacitySize));
1152 value_type const* heap() const noexcept {
1153 return const_cast<Data*>(this)->heap();
1156 bool hasCapacity() const {
1157 return kHasInlineCapacity || detail::pointerFlagGet(pdata_.heap_);
1159 InternalSizeType* getCapacity() {
1160 return pdata_.getCapacity();
1162 InternalSizeType* getCapacity() const {
1163 return const_cast<Data*>(this)->getCapacity();
1167 auto vp = detail::pointerFlagClear(pdata_.heap_);
1173 //////////////////////////////////////////////////////////////////////
1175 // Basic guarantee only, or provides the nothrow guarantee iff T has a
1176 // nothrow move or copy constructor.
1177 template<class T, std::size_t MaxInline, class A, class B, class C>
1178 void swap(small_vector<T,MaxInline,A,B,C>& a,
1179 small_vector<T,MaxInline,A,B,C>& b) {
1183 //////////////////////////////////////////////////////////////////////
1187 #pragma GCC diagnostic pop