2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * For high-level documentation and usage examples see folly/doc/small_vector.md
20 * @author Jordan DeLong <delong.j@fb.com>
22 #ifndef FOLLY_SMALL_VECTOR_H_
23 #define FOLLY_SMALL_VECTOR_H_
25 #include "Portability.h"
29 #include <type_traits>
34 #include <boost/operators.hpp>
35 #include <boost/type_traits.hpp>
36 #include <boost/mpl/if.hpp>
37 #include <boost/mpl/eval_if.hpp>
38 #include <boost/mpl/vector.hpp>
39 #include <boost/mpl/front.hpp>
40 #include <boost/mpl/filter_view.hpp>
41 #include <boost/mpl/identity.hpp>
42 #include <boost/mpl/placeholders.hpp>
43 #include <boost/mpl/empty.hpp>
44 #include <boost/mpl/size.hpp>
45 #include <boost/mpl/count.hpp>
46 #include <boost/mpl/max.hpp>
48 #include "folly/Malloc.h"
50 #if defined(__GNUC__) && defined(__x86_64__)
51 # include "folly/SmallLocks.h"
52 # define FB_PACKED __attribute__((packed))
57 #if FOLLY_HAVE_MALLOC_SIZE
58 extern "C" std::size_t malloc_size(const void*);
59 # if !FOLLY_HAVE_MALLOC_USABLE_SIZE
60 # define malloc_usable_size malloc_size
62 # ifndef malloc_usable_size
63 # define malloc_usable_size malloc_size
67 // Ignore shadowing warnings within this file, so includers can use -Wshadow.
68 #pragma GCC diagnostic push
69 #pragma GCC diagnostic ignored "-Wshadow"
73 //////////////////////////////////////////////////////////////////////
75 namespace small_vector_policy {
77 //////////////////////////////////////////////////////////////////////
80 * A flag which makes us refuse to use the heap at all. If we
81 * overflow the in situ capacity we throw an exception.
86 * Passing this policy will cause small_vector to provide lock() and
87 * unlock() functions using a 1-bit spin lock in the size value.
89 * Note that this is intended for a fairly specialized (although
90 * strangely common at facebook) use case, where you have billions of
91 * vectors in memory where none of them are "hot" and most of them are
92 * small. This allows you to get fine-grained locks without spending
93 * a lot of memory on mutexes (the alternative of a large hashtable of
94 * locks leads to extra cache misses in the lookup path).
100 //////////////////////////////////////////////////////////////////////
102 } // small_vector_policy
104 //////////////////////////////////////////////////////////////////////
106 template<class T, std::size_t M, class A, class B, class C>
109 //////////////////////////////////////////////////////////////////////
114 * Move a range to a range of uninitialized memory. Assumes the
115 * ranges don't overlap.
118 typename std::enable_if<
119 !FOLLY_IS_TRIVIALLY_COPYABLE(T)
121 moveToUninitialized(T* first, T* last, T* out) {
122 auto const count = last - first;
125 for (; idx < count; ++first, ++idx) {
126 new (&out[idx]) T(std::move(*first));
129 // Even for callers trying to give the strong guarantee
130 // (e.g. push_back) it's ok to assume here that we don't have to
131 // move things back and that it was a copy constructor that
132 // threw: if someone throws from a move constructor the effects
134 for (std::size_t i = 0; i < idx; ++i) {
141 // Specialization for trivially copyable types.
143 typename std::enable_if<
144 FOLLY_IS_TRIVIALLY_COPYABLE(T)
146 moveToUninitialized(T* first, T* last, T* out) {
147 std::memmove(out, first, (last - first) * sizeof *first);
151 * Move objects in memory to the right into some uninitialized
152 * memory, where the region overlaps. This doesn't just use
153 * std::move_backward because move_backward only works if all the
154 * memory is initialized to type T already.
157 typename std::enable_if<
158 !FOLLY_IS_TRIVIALLY_COPYABLE(T)
160 moveObjectsRight(T* first, T* lastConstructed, T* realLast) {
161 if (lastConstructed == realLast) {
165 T* end = first - 1; // Past the end going backwards.
166 T* out = realLast - 1;
167 T* in = lastConstructed - 1;
169 for (; in != end && out >= lastConstructed; --in, --out) {
170 new (out) T(std::move(*in));
172 for (; in != end; --in, --out) {
173 *out = std::move(*in);
175 for (; out >= lastConstructed; --out) {
179 // We want to make sure the same stuff is uninitialized memory
180 // if we exit via an exception (this is to make sure we provide
181 // the basic exception safety guarantee for insert functions).
182 if (out < lastConstructed) {
183 out = lastConstructed - 1;
185 for (auto it = out + 1; it != realLast; ++it) {
192 // Specialization for trivially copyable types. The call to
193 // std::move_backward here will just turn into a memmove. (TODO:
194 // change to std::is_trivially_copyable when that works.)
196 typename std::enable_if<
197 FOLLY_IS_TRIVIALLY_COPYABLE(T)
199 moveObjectsRight(T* first, T* lastConstructed, T* realLast) {
200 std::move_backward(first, lastConstructed, realLast);
204 * Populate a region of memory using `op' to construct elements. If
205 * anything throws, undo what we did.
207 template<class T, class Function>
208 void populateMemForward(T* mem, std::size_t n, Function const& op) {
211 for (size_t i = 0; i < n; ++i) {
216 for (std::size_t i = 0; i < idx; ++i) {
223 template<class SizeType, bool ShouldUseHeap>
224 struct IntegralSizePolicy {
225 typedef SizeType InternalSizeType;
227 IntegralSizePolicy() : size_(0) {}
230 std::size_t policyMaxSize() const {
231 return SizeType(~kExternMask);
234 std::size_t doSize() const {
235 return size_ & ~kExternMask;
238 std::size_t isExtern() const {
239 return kExternMask & size_;
242 void setExtern(bool b) {
244 size_ |= kExternMask;
246 size_ &= ~kExternMask;
250 void setSize(std::size_t sz) {
251 assert(sz <= policyMaxSize());
252 size_ = (kExternMask & size_) | SizeType(sz);
255 void swapSizePolicy(IntegralSizePolicy& o) {
256 std::swap(size_, o.size_);
260 static bool const kShouldUseHeap = ShouldUseHeap;
263 static SizeType const kExternMask =
264 kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 1)
271 template<class SizeType, bool ShouldUseHeap>
272 struct OneBitMutexImpl {
273 typedef SizeType InternalSizeType;
275 OneBitMutexImpl() { psl_.init(); }
277 void lock() const { psl_.lock(); }
278 void unlock() const { psl_.unlock(); }
279 bool try_lock() const { return psl_.try_lock(); }
282 static bool const kShouldUseHeap = ShouldUseHeap;
284 std::size_t policyMaxSize() const {
285 return SizeType(~(SizeType(1) << kLockBit | kExternMask));
288 std::size_t doSize() const {
289 return psl_.getData() & ~kExternMask;
292 std::size_t isExtern() const {
293 return psl_.getData() & kExternMask;
296 void setExtern(bool b) {
298 setSize(SizeType(doSize()) | kExternMask);
300 setSize(SizeType(doSize()) & ~kExternMask);
304 void setSize(std::size_t sz) {
305 assert(sz < (std::size_t(1) << kLockBit));
306 psl_.setData((kExternMask & psl_.getData()) | SizeType(sz));
309 void swapSizePolicy(OneBitMutexImpl& o) {
310 std::swap(psl_, o.psl_);
314 static SizeType const kLockBit = sizeof(SizeType) * 8 - 1;
315 static SizeType const kExternMask =
316 kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 2)
319 PicoSpinLock<SizeType,kLockBit> psl_;
322 template<class SizeType, bool ShouldUseHeap>
323 struct OneBitMutexImpl {
324 static_assert(std::is_same<SizeType,void>::value,
325 "OneBitMutex only works on x86-64");
330 * If you're just trying to use this class, ignore everything about
331 * this next small_vector_base class thing.
333 * The purpose of this junk is to minimize sizeof(small_vector<>)
334 * and allow specifying the template parameters in whatever order is
335 * convenient for the user. There's a few extra steps here to try
336 * to keep the error messages at least semi-reasonable.
338 * Apologies for all the black magic.
340 namespace mpl = boost::mpl;
341 template<class Value,
342 std::size_t RequestedMaxInline,
346 struct small_vector_base {
347 typedef mpl::vector<InPolicyA,InPolicyB,InPolicyC> PolicyList;
350 * Determine the size type
352 typedef typename mpl::filter_view<
354 boost::is_integral<mpl::placeholders::_1>
356 typedef typename mpl::eval_if<
357 mpl::empty<Integrals>,
358 mpl::identity<std::size_t>,
359 mpl::front<Integrals>
362 static_assert(std::is_unsigned<SizeType>::value,
363 "Size type should be an unsigned integral type");
364 static_assert(mpl::size<Integrals>::value == 0 ||
365 mpl::size<Integrals>::value == 1,
366 "Multiple size types specified in small_vector<>");
369 * Figure out if we're supposed to supply a one-bit mutex. :)
371 typedef typename mpl::count<
372 PolicyList,small_vector_policy::OneBitMutex
375 static_assert(HasMutex::value == 0 || HasMutex::value == 1,
376 "Multiple copies of small_vector_policy::OneBitMutex "
377 "supplied; this is probably a mistake");
380 * Determine whether we should allow spilling to the heap or not.
382 typedef typename mpl::count<
383 PolicyList,small_vector_policy::NoHeap
386 static_assert(HasNoHeap::value == 0 || HasNoHeap::value == 1,
387 "Multiple copies of small_vector_policy::NoHeap "
388 "supplied; this is probably a mistake");
391 * Make the real policy base classes.
393 typedef typename mpl::if_<
395 OneBitMutexImpl<SizeType,!HasNoHeap::value>,
396 IntegralSizePolicy<SizeType,!HasNoHeap::value>
397 >::type ActualSizePolicy;
400 * Now inherit from them all. This is done in such a convoluted
401 * way to make sure we get the empty base optimizaton on all these
402 * types to keep sizeof(small_vector<>) minimal.
404 typedef boost::totally_ordered1<
405 small_vector<Value,RequestedMaxInline,InPolicyA,InPolicyB,InPolicyC>,
411 T* pointerFlagSet(T* p) {
412 return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(p) | 1);
415 bool pointerFlagGet(T* p) {
416 return reinterpret_cast<uintptr_t>(p) & 1;
419 T* pointerFlagClear(T* p) {
420 return reinterpret_cast<T*>(
421 reinterpret_cast<uintptr_t>(p) & ~uintptr_t(1));
423 inline void* shiftPointer(void* p, size_t sizeBytes) {
424 return static_cast<char*>(p) + sizeBytes;
428 //////////////////////////////////////////////////////////////////////
430 template<class Value,
431 std::size_t RequestedMaxInline = 1,
432 class PolicyA = void,
433 class PolicyB = void,
434 class PolicyC = void>
436 : public detail::small_vector_base<
437 Value,RequestedMaxInline,PolicyA,PolicyB,PolicyC
440 typedef typename detail::small_vector_base<
441 Value,RequestedMaxInline,PolicyA,PolicyB,PolicyC
443 typedef typename BaseType::InternalSizeType InternalSizeType;
446 * Figure out the max number of elements we should inline. (If
447 * the user asks for less inlined elements than we can fit unioned
448 * into our value_type*, we will inline more than they asked.)
451 MaxInline = boost::mpl::max<
452 boost::mpl::int_<sizeof(Value*) / sizeof(Value)>,
453 boost::mpl::int_<RequestedMaxInline>
458 typedef std::size_t size_type;
459 typedef Value value_type;
460 typedef value_type& reference;
461 typedef value_type const& const_reference;
462 typedef value_type* iterator;
463 typedef value_type const* const_iterator;
464 typedef std::ptrdiff_t difference_type;
466 typedef std::reverse_iterator<iterator> reverse_iterator;
467 typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
469 explicit small_vector() {}
471 small_vector(small_vector const& o) {
472 assign(o.begin(), o.end());
475 small_vector(small_vector&& o) {
476 *this = std::move(o);
479 small_vector(std::initializer_list<value_type> il) {
480 constructImpl(il.begin(), il.end(), std::false_type());
483 explicit small_vector(size_type n, value_type const& t = value_type()) {
488 explicit small_vector(Arg arg1, Arg arg2) {
489 // Forward using std::is_arithmetic to get to the proper
490 // implementation; this disambiguates between the iterators and
491 // (size_t, value_type) meaning for this constructor.
492 constructImpl(arg1, arg2, std::is_arithmetic<Arg>());
496 for (auto& t : *this) {
499 if (this->isExtern()) {
504 small_vector& operator=(small_vector const& o) {
505 assign(o.begin(), o.end());
509 small_vector& operator=(small_vector&& o) {
513 for (std::size_t i = 0; i < o.size(); ++i) {
514 new (data() + i) value_type(std::move(o[i]));
516 this->setSize(o.size());
523 bool operator==(small_vector const& o) const {
524 return size() == o.size() && std::equal(begin(), end(), o.begin());
527 bool operator<(small_vector const& o) const {
528 return std::lexicographical_compare(begin(), end(), o.begin(), o.end());
531 size_type max_size() const {
532 return !BaseType::kShouldUseHeap ? MaxInline
533 : this->policyMaxSize();
536 size_type size() const { return this->doSize(); }
537 bool empty() const { return !size(); }
539 iterator begin() { return data(); }
540 iterator end() { return data() + size(); }
541 const_iterator begin() const { return data(); }
542 const_iterator end() const { return data() + size(); }
543 const_iterator cbegin() const { return begin(); }
544 const_iterator cend() const { return end(); }
546 reverse_iterator rbegin() { return reverse_iterator(end()); }
547 reverse_iterator rend() { return reverse_iterator(begin()); }
549 const_reverse_iterator rbegin() const {
550 return const_reverse_iterator(end());
553 const_reverse_iterator rend() const {
554 return const_reverse_iterator(begin());
557 const_reverse_iterator crbegin() const { return rbegin(); }
558 const_reverse_iterator crend() const { return rend(); }
561 * Usually one of the simplest functions in a Container-like class
562 * but a bit more complex here. We have to handle all combinations
563 * of in-place vs. heap between this and o.
565 * Basic guarantee only. Provides the nothrow guarantee iff our
566 * value_type has a nothrow move or copy constructor.
568 void swap(small_vector& o) {
569 using std::swap; // Allow ADL on swap for our value_type.
571 if (this->isExtern() && o.isExtern()) {
572 this->swapSizePolicy(o);
574 auto thisCapacity = this->capacity();
575 auto oCapacity = o.capacity();
577 std::swap(unpackHack(&u.pdata_.heap_), unpackHack(&o.u.pdata_.heap_));
579 this->setCapacity(oCapacity);
580 o.setCapacity(thisCapacity);
585 if (!this->isExtern() && !o.isExtern()) {
586 auto& oldSmall = size() < o.size() ? *this : o;
587 auto& oldLarge = size() < o.size() ? o : *this;
589 for (size_type i = 0; i < oldSmall.size(); ++i) {
590 swap(oldSmall[i], oldLarge[i]);
593 size_type i = oldSmall.size();
595 for (; i < oldLarge.size(); ++i) {
596 new (&oldSmall[i]) value_type(std::move(oldLarge[i]));
597 oldLarge[i].~value_type();
600 for (; i < oldLarge.size(); ++i) {
601 oldLarge[i].~value_type();
603 oldLarge.setSize(oldSmall.size());
606 this->swapSizePolicy(o);
610 // isExtern != o.isExtern()
611 auto& oldExtern = o.isExtern() ? o : *this;
612 auto& oldIntern = o.isExtern() ? *this : o;
614 auto oldExternCapacity = oldExtern.capacity();
615 auto oldExternHeap = oldExtern.u.pdata_.heap_;
617 auto buff = oldExtern.u.buffer();
620 for (; i < oldIntern.size(); ++i) {
621 new (&buff[i]) value_type(std::move(oldIntern[i]));
622 oldIntern[i].~value_type();
625 for (size_type kill = 0; kill < i; ++kill) {
626 buff[kill].~value_type();
628 for (; i < oldIntern.size(); ++i) {
629 oldIntern[i].~value_type();
631 oldIntern.setSize(0);
632 oldExtern.u.pdata_.heap_ = oldExternHeap;
633 oldExtern.setCapacity(oldExternCapacity);
636 oldIntern.u.pdata_.heap_ = oldExternHeap;
637 this->swapSizePolicy(o);
638 oldIntern.setCapacity(oldExternCapacity);
641 void resize(size_type sz) {
643 erase(begin() + sz, end());
647 detail::populateMemForward(begin() + size(), sz - size(),
648 [&] (void* p) { new (p) value_type(); }
653 void resize(size_type sz, value_type const& v) {
655 erase(begin() + sz, end());
659 detail::populateMemForward(begin() + size(), sz - size(),
660 [&] (void* p) { new (p) value_type(v); }
665 value_type* data() noexcept {
666 return this->isExtern() ? u.heap() : u.buffer();
669 value_type const* data() const noexcept {
670 return this->isExtern() ? u.heap() : u.buffer();
673 template<class ...Args>
674 iterator emplace(const_iterator p, Args&&... args) {
676 emplace_back(std::forward<Args>(args)...);
681 * We implement emplace at places other than at the back with a
682 * temporary for exception safety reasons. It is possible to
683 * avoid having to do this, but it becomes hard to maintain the
684 * basic exception safety guarantee (unless you respond to a copy
685 * constructor throwing by clearing the whole vector).
687 * The reason for this is that otherwise you have to destruct an
688 * element before constructing this one in its place---if the
689 * constructor throws, you either need a nothrow default
690 * constructor or a nothrow copy/move to get something back in the
691 * "gap", and the vector requirements don't guarantee we have any
692 * of these. Clearing the whole vector is a legal response in
693 * this situation, but it seems like this implementation is easy
694 * enough and probably better.
696 return insert(p, value_type(std::forward<Args>(args)...));
699 void reserve(size_type sz) {
703 size_type capacity() const {
704 if (this->isExtern()) {
705 if (u.hasCapacity()) {
706 return *u.getCapacity();
708 return malloc_usable_size(u.pdata_.heap_) / sizeof(value_type);
713 void shrink_to_fit() {
714 if (!this->isExtern()) {
718 small_vector tmp(begin(), end());
722 template<class ...Args>
723 void emplace_back(Args&&... args) {
724 // call helper function for static dispatch of special cases
725 emplaceBack(std::forward<Args>(args)...);
728 void push_back(value_type&& t) {
729 if (capacity() == size()) {
730 makeSize(std::max(size_type(2), 3 * size() / 2), &t, size());
732 new (end()) value_type(std::move(t));
734 this->setSize(size() + 1);
737 void push_back(value_type const& t) {
738 // Make a copy and forward to the rvalue value_type&& overload
740 push_back(value_type(t));
747 iterator insert(const_iterator constp, value_type&& t) {
748 iterator p = unconst(constp);
751 push_back(std::move(t));
755 auto offset = p - begin();
757 if (capacity() == size()) {
758 makeSize(size() + 1, &t, offset);
759 this->setSize(this->size() + 1);
761 makeSize(size() + 1);
762 detail::moveObjectsRight(data() + offset,
764 data() + size() + 1);
765 this->setSize(size() + 1);
766 data()[offset] = std::move(t);
768 return begin() + offset;
772 iterator insert(const_iterator p, value_type const& t) {
773 // Make a copy and forward to the rvalue value_type&& overload
775 return insert(p, value_type(t));
778 iterator insert(const_iterator pos, size_type n, value_type const& val) {
779 auto offset = pos - begin();
780 makeSize(size() + n);
781 detail::moveObjectsRight(data() + offset,
783 data() + size() + n);
784 this->setSize(size() + n);
785 std::generate_n(begin() + offset, n, [&] { return val; });
786 return begin() + offset;
790 iterator insert(const_iterator p, Arg arg1, Arg arg2) {
791 // Forward using std::is_arithmetic to get to the proper
792 // implementation; this disambiguates between the iterators and
793 // (size_t, value_type) meaning for this function.
794 return insertImpl(unconst(p), arg1, arg2, std::is_arithmetic<Arg>());
797 iterator insert(const_iterator p, std::initializer_list<value_type> il) {
798 return insert(p, il.begin(), il.end());
801 iterator erase(const_iterator q) {
802 std::move(unconst(q) + 1, end(), unconst(q));
803 (data() + size() - 1)->~value_type();
804 this->setSize(size() - 1);
808 iterator erase(const_iterator q1, const_iterator q2) {
809 std::move(unconst(q2), end(), unconst(q1));
810 for (auto it = q1; it != end(); ++it) {
813 this->setSize(size() - (q2 - q1));
818 erase(begin(), end());
822 void assign(Arg first, Arg last) {
824 insert(end(), first, last);
827 void assign(std::initializer_list<value_type> il) {
828 assign(il.begin(), il.end());
831 void assign(size_type n, const value_type& t) {
836 reference front() { assert(!empty()); return *begin(); }
837 reference back() { assert(!empty()); return *(end() - 1); }
838 const_reference front() const { assert(!empty()); return *begin(); }
839 const_reference back() const { assert(!empty()); return *(end() - 1); }
841 reference operator[](size_type i) {
843 return *(begin() + i);
846 const_reference operator[](size_type i) const {
848 return *(begin() + i);
851 reference at(size_type i) {
853 throw std::out_of_range("index out of range");
858 const_reference at(size_type i) const {
860 throw std::out_of_range("index out of range");
868 * This is doing the same like emplace_back, but we need this helper
869 * to catch the special case - see the next overload function..
871 template<class ...Args>
872 void emplaceBack(Args&&... args) {
873 makeSize(size() + 1);
874 new (end()) value_type(std::forward<Args>(args)...);
875 this->setSize(size() + 1);
879 * Special case of emplaceBack for rvalue
881 void emplaceBack(value_type&& t) {
882 push_back(std::move(t));
885 static iterator unconst(const_iterator it) {
886 return const_cast<iterator>(it);
890 * g++ doesn't allow you to bind a non-const reference to a member
891 * of a packed structure, presumably because it would make it too
892 * easy to accidentally make an unaligned memory access?
894 template<class T> static T& unpackHack(T* p) {
898 // The std::false_type argument is part of disambiguating the
899 // iterator insert functions from integral types (see insert().)
901 iterator insertImpl(iterator pos, It first, It last, std::false_type) {
902 typedef typename std::iterator_traits<It>::iterator_category categ;
903 if (std::is_same<categ,std::input_iterator_tag>::value) {
904 auto offset = pos - begin();
905 while (first != last) {
906 pos = insert(pos, *first++);
909 return begin() + offset;
912 auto distance = std::distance(first, last);
913 auto offset = pos - begin();
914 makeSize(size() + distance);
915 detail::moveObjectsRight(data() + offset,
917 data() + size() + distance);
918 this->setSize(size() + distance);
919 std::copy_n(first, distance, begin() + offset);
920 return begin() + offset;
923 iterator insertImpl(iterator pos, size_type n, const value_type& val,
925 // The true_type means this should call the size_t,value_type
926 // overload. (See insert().)
927 return insert(pos, n, val);
930 // The std::false_type argument came from std::is_arithmetic as part
931 // of disambiguating an overload (see the comment in the
934 void constructImpl(It first, It last, std::false_type) {
935 typedef typename std::iterator_traits<It>::iterator_category categ;
936 if (std::is_same<categ,std::input_iterator_tag>::value) {
937 // With iterators that only allow a single pass, we can't really
938 // do anything sane here.
939 while (first != last) {
945 auto distance = std::distance(first, last);
947 this->setSize(distance);
949 detail::populateMemForward(data(), distance,
950 [&] (void* p) { new (p) value_type(*first++); }
954 void doConstruct(size_type n, value_type const& val) {
957 detail::populateMemForward(data(), n,
958 [&] (void* p) { new (p) value_type(val); }
962 // The true_type means we should forward to the size_t,value_type
964 void constructImpl(size_type n, value_type const& val, std::true_type) {
968 void makeSize(size_type size, value_type* v = NULL) {
969 makeSize(size, v, size - 1);
973 * Ensure we have a large enough memory region to be size `size'.
974 * Will move/copy elements if we are spilling to heap_ or needed to
975 * allocate a new region, but if resized in place doesn't initialize
976 * anything in the new region. In any case doesn't change size().
977 * Supports insertion of new element during reallocation by given
978 * pointer to new element and position of new element.
979 * NOTE: If reallocation is not needed, and new element should be
980 * inserted in the middle of vector (not at the end), do the move
981 * objects and insertion outside the function, otherwise exception is thrown.
983 void makeSize(size_type size, value_type* v, size_type pos) {
984 if (size > this->max_size()) {
985 throw std::length_error("max_size exceeded in small_vector");
987 if (size <= this->capacity()) {
991 auto needBytes = size * sizeof(value_type);
992 // If the capacity isn't explicitly stored inline, but the heap
993 // allocation is grown to over some threshold, we should store
994 // a capacity at the front of the heap allocation.
995 bool heapifyCapacity =
996 !kHasInlineCapacity && needBytes > kHeapifyCapacityThreshold;
997 if (heapifyCapacity) {
998 needBytes += kHeapifyCapacitySize;
1000 auto const sizeBytes = goodMallocSize(needBytes);
1001 void* newh = checkedMalloc(sizeBytes);
1002 // We expect newh to be at least 2-aligned, because we want to
1003 // use its least significant bit as a flag.
1004 assert(!detail::pointerFlagGet(newh));
1006 value_type* newp = static_cast<value_type*>(
1008 detail::shiftPointer(newh, kHeapifyCapacitySize) :
1014 new (&newp[pos]) value_type(std::move(*v));
1020 // move old elements to the left of the new one
1022 detail::moveToUninitialized(begin(), begin() + pos, newp);
1024 newp[pos].~value_type();
1029 // move old elements to the right of the new one
1032 detail::moveToUninitialized(begin() + pos, end(), newp + pos + 1);
1035 for (size_type i = 0; i <= pos; ++i) {
1036 newp[i].~value_type();
1042 // move without inserting new element
1044 detail::moveToUninitialized(begin(), end(), newp);
1050 for (auto& val : *this) {
1054 if (this->isExtern()) {
1057 auto availableSizeBytes = sizeBytes;
1058 if (heapifyCapacity) {
1059 u.pdata_.heap_ = detail::pointerFlagSet(newh);
1060 availableSizeBytes -= kHeapifyCapacitySize;
1062 u.pdata_.heap_ = newh;
1064 this->setExtern(true);
1065 this->setCapacity(availableSizeBytes / sizeof(value_type));
1069 * This will set the capacity field, stored inline in the storage_ field
1070 * if there is sufficient room to store it.
1072 void setCapacity(size_type newCapacity) {
1073 assert(this->isExtern());
1074 if (u.hasCapacity()) {
1075 assert(newCapacity < std::numeric_limits<InternalSizeType>::max());
1076 *u.getCapacity() = InternalSizeType(newCapacity);
1081 struct HeapPtrWithCapacity {
1083 InternalSizeType capacity_;
1085 InternalSizeType* getCapacity() {
1091 // Lower order bit of heap_ is used as flag to indicate whether capacity is
1092 // stored at the front of the heap allocation.
1095 InternalSizeType* getCapacity() {
1096 assert(detail::pointerFlagGet(heap_));
1097 return static_cast<InternalSizeType*>(
1098 detail::pointerFlagClear(heap_));
1102 #if defined(__x86_64_)
1103 typedef unsigned char InlineStorageType[sizeof(value_type) * MaxInline];
1105 typedef typename std::aligned_storage<
1106 sizeof(value_type) * MaxInline,
1108 >::type InlineStorageType;
1111 static bool const kHasInlineCapacity =
1112 sizeof(HeapPtrWithCapacity) < sizeof(InlineStorageType);
1114 // This value should we multiple of word size.
1115 static size_t const kHeapifyCapacitySize = sizeof(
1116 typename std::aligned_storage<
1117 sizeof(InternalSizeType),
1120 // Threshold to control capacity heapifying.
1121 static size_t const kHeapifyCapacityThreshold =
1122 100 * kHeapifyCapacitySize;
1124 typedef typename std::conditional<
1126 HeapPtrWithCapacity,
1128 >::type PointerType;
1131 explicit Data() { pdata_.heap_ = 0; }
1134 InlineStorageType storage_;
1136 value_type* buffer() noexcept {
1137 void* vp = &storage_;
1138 return static_cast<value_type*>(vp);
1140 value_type const* buffer() const noexcept {
1141 return const_cast<Data*>(this)->buffer();
1143 value_type* heap() noexcept {
1144 if (kHasInlineCapacity || !detail::pointerFlagGet(pdata_.heap_)) {
1145 return static_cast<value_type*>(pdata_.heap_);
1147 return static_cast<value_type*>(
1148 detail::shiftPointer(
1149 detail::pointerFlagClear(pdata_.heap_), kHeapifyCapacitySize));
1151 value_type const* heap() const noexcept {
1152 return const_cast<Data*>(this)->heap();
1155 bool hasCapacity() const {
1156 return kHasInlineCapacity || detail::pointerFlagGet(pdata_.heap_);
1158 InternalSizeType* getCapacity() {
1159 return pdata_.getCapacity();
1161 InternalSizeType* getCapacity() const {
1162 return const_cast<Data*>(this)->getCapacity();
1166 auto vp = detail::pointerFlagClear(pdata_.heap_);
1172 //////////////////////////////////////////////////////////////////////
1174 // Basic guarantee only, or provides the nothrow guarantee iff T has a
1175 // nothrow move or copy constructor.
1176 template<class T, std::size_t MaxInline, class A, class B, class C>
1177 void swap(small_vector<T,MaxInline,A,B,C>& a,
1178 small_vector<T,MaxInline,A,B,C>& b) {
1182 //////////////////////////////////////////////////////////////////////
1186 #pragma GCC diagnostic pop