* limitations under the License.
*/
-// Andrei Alexandrescu (aalexandre)
-
-/**
- * Vector type. Drop-in replacement for std::vector featuring
- * significantly faster primitives, see e.g. benchmark results at
- * https:*phabricator.fb.com/D235852.
- *
- * In order for a type to be used with fbvector, it must be
- * relocatable, see Traits.h.
- *
- * For user-defined types you must specialize templates
- * appropriately. Consult Traits.h for ways to do so and for a handy
- * family of macros FOLLY_ASSUME_FBVECTOR_COMPATIBLE*.
+/*
+ * Nicholas Ormrod (njormrod)
+ * Andrei Alexandrescu (aalexandre)
*
- * For more information and documentation see folly/docs/FBVector.md
+ * FBVector is Facebook's drop-in implementation of std::vector. It has special
+ * optimizations for use with relocatable types and jemalloc.
*/
-#ifndef FOLLY_FBVECTOR_H_
-#define FOLLY_FBVECTOR_H_
+#ifndef FOLLY_FBVECTOR_H
+#define FOLLY_FBVECTOR_H
+
+//=============================================================================
+// headers
-#include "folly/Foreach.h"
-#include "folly/Malloc.h"
-#include "folly/Traits.h"
-#include <iterator>
#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <memory>
#include <stdexcept>
+#include <type_traits>
+#include <utility>
+
+#include "folly/Likely.h"
+#include "folly/Malloc.h"
+#include "folly/Traits.h"
+
+#include <boost/operators.hpp>
+
+// some files expected these from FBVector
#include <limits>
-#include <cassert>
+#include "folly/Foreach.h"
#include <boost/type_traits.hpp>
-#include <boost/operators.hpp>
#include <boost/utility/enable_if.hpp>
-#include <type_traits>
+//=============================================================================
+// forward declaration
+
+#ifdef FOLLY_BENCHMARK_USE_NS_IFOLLY
+namespace Ifolly {
+#else
namespace folly {
-/**
- * Forward declaration for use by FOLLY_ASSUME_FBVECTOR_COMPATIBLE_2,
- * see folly/Traits.h.
- */
-template <typename T, class Allocator = std::allocator<T> >
-class fbvector;
+#endif
+ template <class T, class Allocator = std::allocator<T>>
+ class fbvector;
}
-// You can define an fbvector of fbvectors.
-FOLLY_ASSUME_FBVECTOR_COMPATIBLE_2(folly::fbvector);
+//=============================================================================
+// compatibility
+
+#if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 7
+// PLEASE UPGRADE TO GCC 4.7 or above
+#define FOLLY_FBV_COMPATIBILITY_MODE
+#endif
+
+#ifndef FOLLY_FBV_COMPATIBILITY_MODE
namespace folly {
-namespace fbvector_detail {
-/**
- * isForwardIterator<T>::value yields true if T is a forward iterator
- * or better, and false otherwise.
- */
-template <class It> struct isForwardIterator {
- enum { value = boost::is_convertible<
- typename std::iterator_traits<It>::iterator_category,
- std::forward_iterator_tag>::value
- };
+template <typename A>
+struct fbv_allocator_traits
+ : std::allocator_traits<A> {};
+
+template <typename T>
+struct fbv_is_nothrow_move_constructible
+ : std::is_nothrow_move_constructible<T> {};
+
+template <typename T, typename... Args>
+struct fbv_is_nothrow_constructible
+ : std::is_nothrow_constructible<T, Args...> {};
+
+template <typename T>
+struct fbv_is_copy_constructible
+ : std::is_copy_constructible<T> {};
+
+}
+
+#else
+
+namespace folly {
+
+template <typename A>
+struct fbv_allocator_traits {
+ static_assert(sizeof(A) == 0,
+ "If you want to use a custom allocator, then you must upgrade to gcc 4.7");
+ // for some old code that deals with this case, see D566719, diff number 10.
};
-/**
- * Destroys all elements in the range [b, e). If the type referred to
- * by the iterators has a trivial destructor, does nothing.
- */
-template <class It>
-void destroyRange(It b, It e) {
- typedef typename boost::remove_reference<decltype(*b)>::type T;
- if (boost::has_trivial_destructor<T>::value) return;
- for (; b != e; ++b) {
- (*b).~T();
+template <typename T>
+struct fbv_allocator_traits<std::allocator<T>> {
+ typedef std::allocator<T> A;
+
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef size_t size_type;
+
+ typedef std::false_type propagate_on_container_copy_assignment;
+ typedef std::false_type propagate_on_container_move_assignment;
+ typedef std::false_type propagate_on_container_swap;
+
+ static pointer allocate(A& a, size_type n) {
+ return static_cast<pointer>(::operator new(n * sizeof(T)));
+ }
+ static void deallocate(A& a, pointer p, size_type n) {
+ ::operator delete(p);
}
-}
-/**
- * Moves the "interesting" part of value to the uninitialized memory
- * at address addr, and leaves value in a destroyable state.
- */
+ template <typename R, typename... Args>
+ static void construct(A& a, R* p, Args&&... args) {
+ new (p) R(std::forward<Args>(args)...);
+ }
+ template <typename R>
+ static void destroy(A& a, R* p) {
+ p->~R();
+ }
-template <class T>
-typename boost::enable_if_c<
- boost::has_trivial_assign<T>::value
->::type
-uninitialized_destructive_move(T& value, T* addr) {
- // Just assign the thing; this is most efficient
- *addr = value;
-}
+ static A select_on_container_copy_construction(const A& a) {
+ return a;
+ }
+};
-template <class T>
-typename boost::enable_if_c<
- !boost::has_trivial_assign<T>::value &&
- boost::has_nothrow_constructor<T>::value
->::type
-uninitialized_destructive_move(T& value, T* addr) {
- // Cheap default constructor - move and reinitialize
- memcpy(addr, &value, sizeof(T));
- new(&value) T;
-}
+template <typename T>
+struct fbv_is_nothrow_move_constructible
+ : std::false_type {};
+
+template <typename T, typename... Args>
+struct fbv_is_nothrow_constructible
+ : std::false_type {};
+
+template <typename T>
+struct fbv_is_copy_constructible
+ : std::true_type {};
-template <class T>
-typename std::enable_if<
- !boost::has_trivial_assign<T>::value &&
- !boost::has_nothrow_constructor<T>::value
->::type
-uninitialized_destructive_move(T& value, T* addr) {
- // User defined move construction.
-
- // TODO: we should probably prefer this over the above memcpy()
- // version when the type has a user-defined move constructor. We
- // don't right now because 4.6 doesn't implement
- // std::is_move_constructible<> yet.
- new (addr) T(std::move(value));
}
-/**
- * Fills n objects of type T starting at address b with T's default
- * value. If the operation throws, destroys all objects constructed so
- * far and calls free(b).
- */
-template <class T>
-void uninitializedFillDefaultOrFree(T * b, size_t n) {
- if (boost::is_arithmetic<T>::value || boost::is_pointer<T>::value) {
- if (n <= 16384 / sizeof(T)) {
- memset(b, 0, n * sizeof(T));
+#endif
+
+//=============================================================================
+// unrolling
+
+#define FOLLY_FBV_UNROLL_PTR(first, last, OP) do { \
+ for (; (last) - (first) >= 4; (first) += 4) { \
+ OP(((first) + 0)); \
+ OP(((first) + 1)); \
+ OP(((first) + 2)); \
+ OP(((first) + 3)); \
+ } \
+ for (; (first) != (last); ++(first)) OP((first)); \
+} while(0);
+
+//=============================================================================
+///////////////////////////////////////////////////////////////////////////////
+// //
+// fbvector class //
+// //
+///////////////////////////////////////////////////////////////////////////////
+
+#ifdef FOLLY_BENCHMARK_USE_NS_IFOLLY
+namespace Ifolly {
+#else
+namespace folly {
+#endif
+
+template <class T, class Allocator>
+class fbvector : private boost::totally_ordered<fbvector<T, Allocator>> {
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // implementation
+private:
+
+ typedef folly::fbv_allocator_traits<Allocator> A;
+
+ struct Impl : public Allocator {
+ // typedefs
+ typedef typename A::pointer pointer;
+ typedef typename A::size_type size_type;
+
+ // data
+ pointer b_, e_, z_;
+
+ // constructors
+ Impl() : Allocator(), b_(nullptr), e_(nullptr), z_(nullptr) {}
+ Impl(const Allocator& a)
+ : Allocator(a), b_(nullptr), e_(nullptr), z_(nullptr) {}
+ Impl(Allocator&& a)
+ : Allocator(std::move(a)), b_(nullptr), e_(nullptr), z_(nullptr) {}
+
+ Impl(size_type n, const Allocator& a = Allocator())
+ : Allocator(a)
+ { init(n); }
+
+ Impl(Impl&& other)
+ : Allocator(std::move(other)),
+ b_(other.b_), e_(other.e_), z_(other.z_)
+ { other.b_ = other.e_ = other.z_ = nullptr; }
+
+ // destructor
+ ~Impl() {
+ destroy();
+ }
+
+ // allocation
+ // note that 'allocate' and 'deallocate' are inherited from Allocator
+ T* D_allocate(size_type n) {
+ if (usingStdAllocator::value) {
+ return static_cast<T*>(malloc(n * sizeof(T)));
+ } else {
+ return folly::fbv_allocator_traits<Allocator>::allocate(*this, n);
+ }
+ }
+
+ void D_deallocate(T* p, size_type n) noexcept {
+ if (usingStdAllocator::value) {
+ free(p);
+ } else {
+ folly::fbv_allocator_traits<Allocator>::deallocate(*this, p, n);
+ }
+ }
+
+ // helpers
+ void swapData(Impl& other) {
+ std::swap(b_, other.b_);
+ std::swap(e_, other.e_);
+ std::swap(z_, other.z_);
+ }
+
+ // data ops
+ inline void destroy() noexcept {
+ if (b_) {
+ // THIS DISPATCH CODE IS DUPLICATED IN fbvector::D_destroy_range_a.
+ // It has been inlined here for speed. It calls the static fbvector
+ // methods to perform the actual destruction.
+ if (usingStdAllocator::value) {
+ S_destroy_range(b_, e_);
+ } else {
+ S_destroy_range_a(*this, b_, e_);
+ }
+
+ D_deallocate(b_, z_ - b_);
+ }
+ }
+
+ void init(size_type n) {
+ if (UNLIKELY(n == 0)) {
+ b_ = e_ = z_ = nullptr;
+ } else {
+ size_type sz = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
+ b_ = D_allocate(sz);
+ e_ = b_;
+ z_ = b_ + sz;
+ }
+ }
+
+ void
+ set(pointer newB, size_type newSize, size_type newCap) {
+ z_ = newB + newCap;
+ e_ = newB + newSize;
+ b_ = newB;
+ }
+
+ void reset(size_type newCap) {
+ destroy();
+ try {
+ init(newCap);
+ } catch (...) {
+ init(0);
+ throw;
+ }
+ }
+ void reset() { // same as reset(0)
+ destroy();
+ b_ = e_ = z_ = nullptr;
+ }
+ } impl_;
+
+ static void swap(Impl& a, Impl& b) {
+ using std::swap;
+ if (!usingStdAllocator::value) swap<Allocator>(a, b);
+ a.swapData(b);
+ }
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // types and constants
+public:
+
+ typedef T value_type;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef T* iterator;
+ typedef const T* const_iterator;
+ typedef size_t size_type;
+ typedef typename std::make_signed<size_type>::type difference_type;
+ typedef Allocator allocator_type;
+ typedef typename A::pointer pointer;
+ typedef typename A::const_pointer const_pointer;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+
+private:
+
+ typedef std::integral_constant<bool,
+ std::has_trivial_copy_constructor<T>::value &&
+ sizeof(T) <= 16 // don't force large structures to be passed by value
+ > should_pass_by_value;
+ typedef typename std::conditional<
+ should_pass_by_value::value, T, const T&>::type VT;
+ typedef typename std::conditional<
+ should_pass_by_value::value, T, T&&>::type MT;
+
+ typedef std::integral_constant<bool,
+ std::is_same<Allocator, std::allocator<T>>::value> usingStdAllocator;
+ typedef std::integral_constant<bool,
+ usingStdAllocator::value ||
+ A::propagate_on_container_move_assignment::value> moveIsSwap;
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // allocator helpers
+private:
+
+ //---------------------------------------------------------------------------
+ // allocate
+
+ T* M_allocate(size_type n) {
+ return impl_.D_allocate(n);
+ }
+
+ //---------------------------------------------------------------------------
+ // deallocate
+
+ void M_deallocate(T* p, size_type n) noexcept {
+ impl_.D_deallocate(p, n);
+ }
+
+ //---------------------------------------------------------------------------
+ // construct
+
+ // GCC is very sensitive to the exact way that construct is called. For
+ // that reason there are several different specializations of construct.
+
+ template <typename U, typename... Args>
+ void M_construct(U* p, Args&&... args) {
+ if (usingStdAllocator::value) {
+ new (p) U(std::forward<Args>(args)...);
+ } else {
+ folly::fbv_allocator_traits<Allocator>::construct(
+ impl_, p, std::forward<Args>(args)...);
+ }
+ }
+
+ template <typename U, typename... Args>
+ static void S_construct(U* p, Args&&... args) {
+ new (p) U(std::forward<Args>(args)...);
+ }
+
+ template <typename U, typename... Args>
+ static void S_construct_a(Allocator& a, U* p, Args&&... args) {
+ folly::fbv_allocator_traits<Allocator>::construct(
+ a, p, std::forward<Args>(args)...);
+ }
+
+ // scalar optimization
+ // TODO we can expand this optimization to: default copyable and assignable
+ template <typename U, typename Enable = typename
+ std::enable_if<std::is_scalar<U>::value>::type>
+ void M_construct(U* p, U arg) {
+ if (usingStdAllocator::value) {
+ *p = arg;
} else {
- goto duff_fill;
- }
- } else if (boost::has_nothrow_constructor<T>::value) {
- duff_fill:
- auto i = b;
- auto const e1 = b + (n & ~size_t(7));
- for (; i != e1; i += 8) {
- new(i) T();
- new(i + 1) T();
- new(i + 2) T();
- new(i + 3) T();
- new(i + 4) T();
- new(i + 5) T();
- new(i + 6) T();
- new(i + 7) T();
- }
- for (auto const e = b + n; i != e; ++i) {
- new(i) T();
- }
- } else {
- // Conservative approach
- auto i = b;
+ folly::fbv_allocator_traits<Allocator>::construct(impl_, p, arg);
+ }
+ }
+
+ template <typename U, typename Enable = typename
+ std::enable_if<std::is_scalar<U>::value>::type>
+ static void S_construct(U* p, U arg) {
+ *p = arg;
+ }
+
+ template <typename U, typename Enable = typename
+ std::enable_if<std::is_scalar<U>::value>::type>
+ static void S_construct_a(Allocator& a, U* p, U arg) {
+ folly::fbv_allocator_traits<Allocator>::construct(a, p, arg);
+ }
+
+ // const& optimization
+ template <typename U, typename Enable = typename
+ std::enable_if<!std::is_scalar<U>::value>::type>
+ void M_construct(U* p, const U& value) {
+ if (usingStdAllocator::value) {
+ new (p) U(value);
+ } else {
+ folly::fbv_allocator_traits<Allocator>::construct(impl_, p, value);
+ }
+ }
+
+ template <typename U, typename Enable = typename
+ std::enable_if<!std::is_scalar<U>::value>::type>
+ static void S_construct(U* p, const U& value) {
+ new (p) U(value);
+ }
+
+ template <typename U, typename Enable = typename
+ std::enable_if<!std::is_scalar<U>::value>::type>
+ static void S_construct_a(Allocator& a, U* p, const U& value) {
+ folly::fbv_allocator_traits<Allocator>::construct(a, p, value);
+ }
+
+ //---------------------------------------------------------------------------
+ // destroy
+
+ void M_destroy(T* p) noexcept {
+ if (usingStdAllocator::value) {
+ if (!std::has_trivial_destructor<T>::value) p->~T();
+ } else {
+ folly::fbv_allocator_traits<Allocator>::destroy(impl_, p);
+ }
+ }
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // algorithmic helpers
+private:
+
+ //---------------------------------------------------------------------------
+ // destroy_range
+
+ // wrappers
+ void M_destroy_range_e(T* pos) noexcept {
+ D_destroy_range_a(pos, impl_.e_);
+ impl_.e_ = pos;
+ }
+
+ // dispatch
+ // THIS DISPATCH CODE IS DUPLICATED IN IMPL. SEE IMPL FOR DETAILS.
+ void D_destroy_range_a(T* first, T* last) noexcept {
+ if (usingStdAllocator::value) {
+ S_destroy_range(first, last);
+ } else {
+ S_destroy_range_a(impl_, first, last);
+ }
+ }
+
+ // allocator
+ static void S_destroy_range_a(Allocator& a, T* first, T* last) noexcept {
+ for (; first != last; ++first)
+ folly::fbv_allocator_traits<Allocator>::destroy(a, first);
+ }
+
+ // optimized
+ static void S_destroy_range(T* first, T* last) noexcept {
+ if (!std::has_trivial_destructor<T>::value) {
+ // EXPERIMENTAL DATA on fbvector<vector<int>> (where each vector<int> has
+ // size 0).
+ // The unrolled version seems to work faster for small to medium sized
+ // fbvectors. It gets a 10% speedup on fbvectors of size 1024, 64, and
+ // 16.
+ // The simple loop version seems to work faster for large fbvectors. The
+ // unrolled version is about 6% slower on fbvectors on size 16384.
+ // The two methods seem tied for very large fbvectors. The unrolled
+ // version is about 0.5% slower on size 262144.
+
+ // for (; first != last; ++first) first->~T();
+ #define FOLLY_FBV_OP(p) (p)->~T()
+ FOLLY_FBV_UNROLL_PTR(first, last, FOLLY_FBV_OP)
+ #undef FOLLY_FBV_OP
+ }
+ }
+
+ //---------------------------------------------------------------------------
+ // uninitialized_fill_n
+
+ // wrappers
+ void M_uninitialized_fill_n_e(size_type sz) {
+ D_uninitialized_fill_n_a(impl_.e_, sz);
+ impl_.e_ += sz;
+ }
+
+ void M_uninitialized_fill_n_e(size_type sz, VT value) {
+ D_uninitialized_fill_n_a(impl_.e_, sz, value);
+ impl_.e_ += sz;
+ }
+
+ // dispatch
+ void D_uninitialized_fill_n_a(T* dest, size_type sz) {
+ if (usingStdAllocator::value) {
+ S_uninitialized_fill_n(dest, sz);
+ } else {
+ S_uninitialized_fill_n_a(impl_, dest, sz);
+ }
+ }
+
+ void D_uninitialized_fill_n_a(T* dest, size_type sz, VT value) {
+ if (usingStdAllocator::value) {
+ S_uninitialized_fill_n(dest, sz, value);
+ } else {
+ S_uninitialized_fill_n_a(impl_, dest, sz, value);
+ }
+ }
+
+ // allocator
+ template <typename... Args>
+ static void S_uninitialized_fill_n_a(Allocator& a, T* dest,
+ size_type sz, Args&&... args) {
+ auto b = dest;
+ auto e = dest + sz;
try {
- for (auto const e = b + n; i != e; ++i) {
- new(i) T;
- }
+ for (; b != e; ++b)
+ folly::fbv_allocator_traits<Allocator>::construct(a, b,
+ std::forward<Args>(args)...);
} catch (...) {
- destroyRange(b, i);
- free(b);
+ S_destroy_range_a(a, dest, b);
throw;
}
}
-}
-/**
- * Fills n objects of type T starting at address b with value. If the
- * operation throws, destroys all objects constructed so far and calls
- * free(b).
- */
-template <class T>
-void uninitializedFillOrFree(T * b, size_t n, const T& value) {
- auto const e = b + n;
- if (boost::has_trivial_copy<T>::value) {
- auto i = b;
- auto const e1 = b + (n & ~size_t(7));
- for (; i != e1; i += 8) {
- new(i) T(value);
- new(i + 1) T(value);
- new(i + 2) T(value);
- new(i + 3) T(value);
- new(i + 4) T(value);
- new(i + 5) T(value);
- new(i + 6) T(value);
- new(i + 7) T(value);
- }
- for (; i != e; ++i) {
- new(i) T(value);
- }
- } else {
- // Conservative approach
- auto i = b;
- try {
- for (; i != e; ++i) {
- new(i) T(value);
+ // optimized
+ static void S_uninitialized_fill_n(T* dest, size_type n) {
+ if (folly::IsZeroInitializable<T>::value) {
+ std::memset(dest, 0, sizeof(T) * n);
+ } else {
+ auto b = dest;
+ auto e = dest + n;
+ try {
+ for (; b != e; ++b) S_construct(b);
+ } catch (...) {
+ --b;
+ for (; b >= dest; --b) b->~T();
+ throw;
}
+ }
+ }
+
+ static void S_uninitialized_fill_n(T* dest, size_type n, const T& value) {
+ auto b = dest;
+ auto e = dest + n;
+ try {
+ for (; b != e; ++b) S_construct(b, value);
} catch (...) {
- destroyRange(b, i);
- free(b);
+ S_destroy_range(dest, b);
throw;
}
}
-}
-} // namespace fbvector_detail
-
-/**
- * This is the std::vector replacement. For conformity, fbvector takes
- * the same template parameters, but it doesn't use the
- * allocator. Instead, it uses malloc, and when present, jemalloc's
- * extensions.
- */
-template <class T, class Allocator>
-class fbvector : private boost::totally_ordered<fbvector<T,Allocator> > {
- bool isSane() const {
- return
- begin() <= end() &&
- empty() == (size() == 0) &&
- empty() == (begin() == end()) &&
- size() <= max_size() &&
- capacity() <= max_size() &&
- size() <= capacity() &&
-
- // Either we have no capacity or our pointers should make sense:
- ((!b_ && !e_ && !z_) || (b_ != z_ && e_ <= z_));
- }
-
- struct Invariant {
-#ifndef NDEBUG
- explicit Invariant(const fbvector& s) : s_(s) {
- assert(s_.isSane());
- }
- ~Invariant() {
- assert(s_.isSane());
- }
- private:
- const fbvector& s_;
-#else
- explicit Invariant(const fbvector&) {}
-#endif
- Invariant& operator=(const Invariant&);
- };
-public:
+ //---------------------------------------------------------------------------
+ // uninitialized_copy
-// types:
- typedef T value_type;
- typedef value_type& reference;
- typedef const value_type& const_reference;
- typedef T* iterator;
- typedef const T* const_iterator;
- typedef size_t size_type;
- typedef ssize_t difference_type;
- // typedef typename allocator_traits<Allocator>::pointer pointer;
- // typedef typename allocator_traits<Allocator>::const_pointer const_pointer;
- typedef Allocator allocator_type;
- typedef typename Allocator::pointer pointer;
- typedef typename Allocator::const_pointer const_pointer;
- typedef std::reverse_iterator<iterator> reverse_iterator;
- typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ // it is possible to add an optimization for the case where
+ // It = move(T*) and IsRelocatable<T> and Is0Initiailizable<T>
-// 23.3.6.1 construct/copy/destroy:
- fbvector() : b_(NULL), e_(NULL), z_(NULL) {}
+ // wrappers
+ template <typename It>
+ void M_uninitialized_copy_e(It first, It last) {
+ D_uninitialized_copy_a(impl_.e_, first, last);
+ impl_.e_ += std::distance(first, last);
+ }
- explicit fbvector(const Allocator&) {
- new(this) fbvector;
+ template <typename It>
+ void M_uninitialized_move_e(It first, It last) {
+ D_uninitialized_move_a(impl_.e_, first, last);
+ impl_.e_ += std::distance(first, last);
}
- explicit fbvector(const size_type n) {
- if (n == 0) {
- b_ = e_ = z_ = 0;
- return;
+ // dispatch
+ template <typename It>
+ void D_uninitialized_copy_a(T* dest, It first, It last) {
+ if (usingStdAllocator::value) {
+ if (folly::IsTriviallyCopyable<T>::value) {
+ S_uninitialized_copy_bits(dest, first, last);
+ } else {
+ S_uninitialized_copy(dest, first, last);
+ }
+ } else {
+ S_uninitialized_copy_a(impl_, dest, first, last);
}
+ }
- auto const nBytes = goodMallocSize(n * sizeof(T));
- b_ = static_cast<T*>(checkedMalloc(nBytes));
- fbvector_detail::uninitializedFillDefaultOrFree(b_, n);
- e_ = b_ + n;
- z_ = b_ + nBytes / sizeof(T);
+ template <typename It>
+ void D_uninitialized_move_a(T* dest, It first, It last) {
+ D_uninitialized_copy_a(dest,
+ std::make_move_iterator(first), std::make_move_iterator(last));
}
- fbvector(const size_type n, const T& value) {
- if (!n) {
- b_ = e_ = z_ = 0;
- return;
+ // allocator
+ template <typename It>
+ static void
+ S_uninitialized_copy_a(Allocator& a, T* dest, It first, It last) {
+ auto b = dest;
+ try {
+ for (; first != last; ++first, ++b)
+ folly::fbv_allocator_traits<Allocator>::construct(a, b, *first);
+ } catch (...) {
+ S_destroy_range_a(a, dest, b);
+ throw;
}
+ }
- auto const nBytes = goodMallocSize(n * sizeof(T));
- b_ = static_cast<T*>(checkedMalloc(nBytes));
- fbvector_detail::uninitializedFillOrFree(b_, n, value);
- e_ = b_ + n;
- z_ = b_ + nBytes / sizeof(T);
+ // optimized
+ template <typename It>
+ static void S_uninitialized_copy(T* dest, It first, It last) {
+ auto b = dest;
+ try {
+ for (; first != last; ++first, ++b)
+ S_construct(b, *first);
+ } catch (...) {
+ S_destroy_range(dest, b);
+ throw;
+ }
}
- fbvector(const size_type n, const T& value, const Allocator&) {
- new(this) fbvector(n, value);
+ static void
+ S_uninitialized_copy_bits(T* dest, const T* first, const T* last) {
+ std::memcpy(dest, first, (last - first) * sizeof(T));
}
- template <class InputIteratorOrNum>
- fbvector(InputIteratorOrNum first, InputIteratorOrNum last) {
- new(this) fbvector;
- assign(first, last);
+ static void
+ S_uninitialized_copy_bits(T* dest, std::move_iterator<T*> first,
+ std::move_iterator<T*> last) {
+ T* bFirst = first.base();
+ T* bLast = last.base();
+ std::memcpy(dest, bFirst, (bLast - bFirst) * sizeof(T));
}
- template <class InputIterator>
- fbvector(InputIterator first, InputIterator last,
- const Allocator&) {
- new(this) fbvector(first, last);
+ template <typename It>
+ static void
+ S_uninitialized_copy_bits(T* dest, It first, It last) {
+ S_uninitialized_copy(dest, first, last);
}
- fbvector(const fbvector& rhs) {
- new(this) fbvector(rhs.begin(), rhs.end());
+ //---------------------------------------------------------------------------
+ // copy_n
+
+ // This function is "unsafe": it assumes that the iterator can be advanced at
+ // least n times. However, as a private function, that unsafety is managed
+ // wholly by fbvector itself.
+
+ template <typename It>
+ static It S_copy_n(T* dest, It first, size_type n) {
+ auto e = dest + n;
+ for (; dest != e; ++dest, ++first) *dest = *first;
+ return first;
}
- fbvector(const fbvector& rhs, const Allocator&) {
- new(this) fbvector(rhs);
+
+ static const T* S_copy_n(T* dest, const T* first, size_type n) {
+ if (folly::IsTriviallyCopyable<T>::value) {
+ std::memcpy(dest, first, n * sizeof(T));
+ return first + n;
+ } else {
+ return S_copy_n<const T*>(dest, first, n);
+ }
}
- fbvector(fbvector&& o, const Allocator& = Allocator())
- : b_(o.b_)
- , e_(o.e_)
- , z_(o.z_)
- {
- o.b_ = o.e_ = o.z_ = 0;
+ static std::move_iterator<T*>
+ S_copy_n(T* dest, std::move_iterator<T*> mIt, size_type n) {
+ if (folly::IsTriviallyCopyable<T>::value) {
+ T* first = mIt.base();
+ std::memcpy(dest, first, n * sizeof(T));
+ return std::make_move_iterator(first + n);
+ } else {
+ return S_copy_n<std::move_iterator<T*>>(dest, mIt, n);
+ }
+ }
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // relocation helpers
+private:
+
+ // Relocation is divided into three parts:
+ //
+ // 1: relocate_move
+ // Performs the actual movement of data from point a to point b.
+ //
+ // 2: relocate_done
+ // Destroys the old data.
+ //
+ // 3: relocate_undo
+ // Destoys the new data and restores the old data.
+ //
+ // The three steps are used because there may be an exception after part 1
+ // has completed. If that is the case, then relocate_undo can nullify the
+ // initial move. Otherwise, relocate_done performs the last bit of tidying
+ // up.
+ //
+ // The relocation trio may use either memcpy, move, or copy. It is decided
+ // by the following case statement:
+ //
+ // IsRelocatable && usingStdAllocator -> memcpy
+ // has_nothrow_move && usingStdAllocator -> move
+ // cannot copy -> move
+ // default -> copy
+ //
+ // If the class is non-copyable then it must be movable. However, if the
+ // move constructor is not noexcept, i.e. an error could be thrown, then
+ // relocate_undo will be unable to restore the old data, for fear of a
+ // second exception being thrown. This is a known and unavoidable
+ // deficiency. In lieu of a strong exception guarantee, relocate_undo does
+ // the next best thing: it provides a weak exception guarantee by
+ // destorying the new data, but leaving the old data in an indeterminate
+ // state. Note that that indeterminate state will be valid, since the
+ // old data has not been destroyed; it has merely been the source of a
+ // move, which is required to leave the source in a valid state.
+
+ // wrappers
+ void M_relocate(T* newB) {
+ relocate_move(newB, impl_.b_, impl_.e_);
+ relocate_done(newB, impl_.b_, impl_.e_);
+ }
+
+ // dispatch type trait
+ typedef std::integral_constant<bool,
+ folly::IsRelocatable<T>::value && usingStdAllocator::value
+ > relocate_use_memcpy;
+
+ typedef std::integral_constant<bool,
+ (folly::fbv_is_nothrow_move_constructible<T>::value
+ && usingStdAllocator::value)
+ || !folly::fbv_is_copy_constructible<T>::value
+ > relocate_use_move;
+
+ // move
+ void relocate_move(T* dest, T* first, T* last) {
+ relocate_move_or_memcpy(dest, first, last, relocate_use_memcpy());
+ }
+
+ void relocate_move_or_memcpy(T* dest, T* first, T* last, std::true_type) {
+ std::memcpy(dest, first, (last - first) * sizeof(T));
+ }
+
+ void relocate_move_or_memcpy(T* dest, T* first, T* last, std::false_type) {
+ relocate_move_or_copy(dest, first, last, relocate_use_move());
+ }
+
+ void relocate_move_or_copy(T* dest, T* first, T* last, std::true_type) {
+ D_uninitialized_move_a(dest, first, last);
+ }
+
+ void relocate_move_or_copy(T* dest, T* first, T* last, std::false_type) {
+ D_uninitialized_copy_a(dest, first, last);
+ }
+
+ // done
+ void relocate_done(T* dest, T* first, T* last) noexcept {
+ if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+ // used memcpy; data has been relocated, do not call destructor
+ } else {
+ D_destroy_range_a(first, last);
+ }
}
- fbvector(std::initializer_list<T> il, const Allocator& = Allocator()) {
- new(this) fbvector(il.begin(), il.end());
+ // undo
+ void relocate_undo(T* dest, T* first, T* last) noexcept {
+ if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+ // used memcpy, old data is still valid, nothing to do
+ } else if (folly::fbv_is_nothrow_move_constructible<T>::value &&
+ usingStdAllocator::value) {
+ // noexcept move everything back, aka relocate_move
+ relocate_move(first, dest, dest + (last - first));
+ } else if (!folly::fbv_is_copy_constructible<T>::value) {
+ // weak guarantee
+ D_destroy_range_a(dest, dest + (last - first));
+ } else {
+ // used copy, old data is still valid
+ D_destroy_range_a(dest, dest + (last - first));
+ }
}
- ~fbvector() {
- // fbvector only works with relocatable objects. We insert this
- // static check inside the destructor because pretty much any
- // instantiation of fbvector<T> will generate the destructor (and
- // therefore refuse compilation if the assertion fails). To see
- // how you can enable IsRelocatable for your type, refer to the
- // definition of IsRelocatable in Traits.h.
- BOOST_STATIC_ASSERT(IsRelocatable<T>::value);
- if (!b_) return;
- fbvector_detail::destroyRange(b_, e_);
- free(b_);
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // construct/copy/destroy
+public:
+
+ fbvector() = default;
+
+ explicit fbvector(const Allocator& a) : impl_(a) {}
+
+ explicit fbvector(size_type n, const Allocator& a = Allocator())
+ : impl_(n, a)
+ { M_uninitialized_fill_n_e(n); }
+
+ fbvector(size_type n, VT value, const Allocator& a = Allocator())
+ : impl_(n, a)
+ { M_uninitialized_fill_n_e(n, value); }
+
+ template <class It, class Category = typename
+ std::iterator_traits<It>::iterator_category>
+ fbvector(It first, It last, const Allocator& a = Allocator())
+ #ifndef FOLLY_FBV_COMPATIBILITY_MODE
+ : fbvector(first, last, a, Category()) {}
+ #else
+ : impl_(std::distance(first, last), a)
+ { fbvector_init(first, last, Category()); }
+ #endif
+
+ fbvector(const fbvector& other)
+ : impl_(other.size(), A::select_on_container_copy_construction(other.impl_))
+ { M_uninitialized_copy_e(other.begin(), other.end()); }
+
+ fbvector(fbvector&& other) noexcept : impl_(std::move(other.impl_)) {}
+
+ fbvector(const fbvector& other, const Allocator& a)
+ #ifndef FOLLY_FBV_COMPATIBILITY_MODE
+ : fbvector(other.begin(), other.end(), a) {}
+ #else
+ : impl_(other.size(), a)
+ { fbvector_init(other.begin(), other.end(), std::forward_iterator_tag()); }
+ #endif
+
+ fbvector(fbvector&& other, const Allocator& a) : impl_(a) {
+ if (impl_ == other.impl_) {
+ impl_.swapData(other.impl_);
+ } else {
+ impl_.init(other.size());
+ M_uninitialized_move_e(other.begin(), other.end());
+ }
}
- fbvector& operator=(const fbvector& rhs) {
- assign(rhs.begin(), rhs.end());
+
+ fbvector(std::initializer_list<T> il, const Allocator& a = Allocator())
+ #ifndef FOLLY_FBV_COMPATIBILITY_MODE
+ : fbvector(il.begin(), il.end(), a) {}
+ #else
+ : impl_(std::distance(il.begin(), il.end()), a)
+ { fbvector_init(il.begin(), il.end(), std::forward_iterator_tag()); }
+ #endif
+
+ ~fbvector() = default; // the cleanup occurs in impl_
+
+ fbvector& operator=(const fbvector& other) {
+ if (UNLIKELY(this == &other)) return *this;
+
+ if (!usingStdAllocator::value &&
+ A::propagate_on_container_copy_assignment::value) {
+ if (impl_ != other.impl_) {
+ // can't use other's different allocator to clean up self
+ impl_.reset();
+ }
+ (Allocator&)impl_ = (Allocator&)other.impl_;
+ }
+
+ assign(other.begin(), other.end());
return *this;
}
- fbvector& operator=(fbvector&& v) {
- clear();
- swap(v);
+ fbvector& operator=(fbvector&& other) {
+ if (UNLIKELY(this == &other)) return *this;
+ moveFrom(std::move(other), moveIsSwap());
return *this;
}
return *this;
}
- bool operator==(const fbvector& rhs) const {
- return size() == rhs.size() && std::equal(begin(), end(), rhs.begin());
+ template <class It, class Category = typename
+ std::iterator_traits<It>::iterator_category>
+ void assign(It first, It last) {
+ assign(first, last, Category());
}
- bool operator<(const fbvector& rhs) const {
- return std::lexicographical_compare(begin(), end(),
- rhs.begin(), rhs.end());
+ void assign(size_type n, VT value) {
+ if (n > capacity()) {
+ // Not enough space. Do not reserve in place, since we will
+ // discard the old values anyways.
+ if (dataIsInternalAndNotVT(value)) {
+ T copy(std::move(value));
+ impl_.reset(n);
+ M_uninitialized_fill_n_e(n, copy);
+ } else {
+ impl_.reset(n);
+ M_uninitialized_fill_n_e(n, value);
+ }
+ } else if (n <= size()) {
+ auto newE = impl_.b_ + n;
+ std::fill(impl_.b_, newE, value);
+ M_destroy_range_e(newE);
+ } else {
+ std::fill(impl_.b_, impl_.e_, value);
+ M_uninitialized_fill_n_e(n - size(), value);
+ }
+ }
+
+ void assign(std::initializer_list<T> il) {
+ assign(il.begin(), il.end());
+ }
+
+ allocator_type get_allocator() const noexcept {
+ return impl_;
}
private:
+
+ #ifndef FOLLY_FBV_COMPATIBILITY_MODE
+ // contract dispatch for iterator types fbvector(It first, It last)
+ template <class ForwardIterator>
+ fbvector(ForwardIterator first, ForwardIterator last,
+ const Allocator& a, std::forward_iterator_tag)
+ : impl_(std::distance(first, last), a)
+ { M_uninitialized_copy_e(first, last); }
+
template <class InputIterator>
- void assignImpl(InputIterator first, InputIterator last, boost::false_type) {
- // Pair of iterators
- if (fbvector_detail::isForwardIterator<InputIterator>::value) {
- auto const oldSize = size();
- auto const newSize = std::distance(first, last);
-
- if (static_cast<difference_type>(oldSize) >= newSize) {
- // No reallocation, nice
- auto const newEnd = std::copy(first, last, b_);
- fbvector_detail::destroyRange(newEnd, e_);
- e_ = newEnd;
- return;
- }
+ fbvector(InputIterator first, InputIterator last,
+ const Allocator& a, std::input_iterator_tag)
+ : impl_(a)
+ { for (; first != last; ++first) emplace_back(*first); }
+
+ #else
+ // contract dispatch for iterator types without constructor forwarding
+ template <class ForwardIterator>
+ void
+ fbvector_init(ForwardIterator first, ForwardIterator last,
+ std::forward_iterator_tag)
+ { M_uninitialized_copy_e(first, last); }
- // Must reallocate - just do it on the side
- auto const nBytes = goodMallocSize(newSize * sizeof(T));
- auto const b = static_cast<T*>(checkedMalloc(nBytes));
- std::uninitialized_copy(first, last, b);
- this->fbvector::~fbvector();
- b_ = b;
- e_ = b + newSize;
- z_ = b_ + nBytes / sizeof(T);
+ template <class InputIterator>
+ void
+ fbvector_init(InputIterator first, InputIterator last,
+ std::input_iterator_tag)
+ { for (; first != last; ++first) emplace_back(*first); }
+ #endif
+
+ // contract dispatch for allocator movement in operator=(fbvector&&)
+ void
+ moveFrom(fbvector&& other, std::true_type) {
+ swap(impl_, other.impl_);
+ }
+ void moveFrom(fbvector&& other, std::false_type) {
+ if (impl_ == other.impl_) {
+ impl_.swapData(other.impl_);
} else {
- // Input iterator sucks
- FOR_EACH (i, *this) {
- if (first == last) {
- fbvector_detail::destroyRange(i, e_);
- e_ = i;
- return;
- }
- *i = *first;
- ++first;
- }
- FOR_EACH_RANGE (i, first, last) {
- push_back(*i);
- }
+ impl_.reset(other.size());
+ M_uninitialized_move_e(other.begin(), other.end());
}
}
- void assignImpl(const size_type newSize, const T value, boost::true_type) {
- // Arithmetic type, forward back to unambiguous definition
- assign(newSize, value);
+ // contract dispatch for iterator types in assign(It first, It last)
+ template <class ForwardIterator>
+ void assign(ForwardIterator first, ForwardIterator last,
+ std::forward_iterator_tag) {
+ auto const newSize = std::distance(first, last);
+ if (newSize > capacity()) {
+ impl_.reset(newSize);
+ M_uninitialized_copy_e(first, last);
+ } else if (newSize <= size()) {
+ auto newEnd = std::copy(first, last, impl_.b_);
+ M_destroy_range_e(newEnd);
+ } else {
+ auto mid = S_copy_n(impl_.b_, first, size());
+ M_uninitialized_copy_e<decltype(last)>(mid, last);
+ }
}
-public:
- // Classic ambiguity (and a lot of unnecessary complexity) in
- // std::vector: assign(10, 20) for vector<int> means "assign 10
- // elements all having the value 20" but is intercepted by the
- // two-iterators overload assign(first, last). So we need to
- // disambiguate here. There is no pretty solution. We use here
- // overloading based on is_arithmetic. Method insert has the same
- // issue (and the same solution in this implementation).
- template <class InputIteratorOrNum>
- void assign(InputIteratorOrNum first, InputIteratorOrNum last) {
- assignImpl(first, last, boost::is_arithmetic<InputIteratorOrNum>());
- }
-
- void assign(const size_type newSize, const T& value) {
- if (b_ <= &value && &value < e_) {
- // Need to check for aliased assign, sigh
- return assign(newSize, T(value));
- }
-
- auto const oldSize = size();
- if (oldSize >= newSize) {
- // No reallocation, nice
- auto const newEnd = b_ + newSize;
- fbvector_detail::destroyRange(newEnd, e_);
- e_ = newEnd;
- return;
+ template <class InputIterator>
+ void assign(InputIterator first, InputIterator last,
+ std::input_iterator_tag) {
+ auto p = impl_.b_;
+ for (; first != last && p != impl_.e_; ++first, ++p) {
+ *p = *first;
}
-
- // Need to reallocate
- if (reserve_in_place(newSize)) {
- // Careful here, fill and uninitialized_fill may throw. The
- // latter is transactional, so no need to worry about a
- // buffer partially filled in case of exception.
- std::fill(b_, e_, value);
- auto const newEnd = b_ + newSize;
- std::uninitialized_fill(e_, newEnd, value);
- e_ = newEnd;
- return;
+ if (p != impl_.e_) {
+ M_destroy_range_e(p);
+ } else {
+ for (; first != last; ++first) emplace_back(*first);
}
-
- // Cannot expand or jemalloc not present at all; must just
- // allocate a new chunk and discard the old one. This is
- // tantamount with creating a new fbvector altogether. This won't
- // recurse infinitely; the constructor implements its own.
- fbvector temp(newSize, value);
- temp.swap(*this);
}
- void assign(std::initializer_list<T> il) {
- assign(il.begin(), il.end());
+ // contract dispatch for aliasing under VT optimization
+ bool dataIsInternalAndNotVT(const T& t) {
+ if (should_pass_by_value::value) return false;
+ return dataIsInternal(t);
}
-
- allocator_type get_allocator() const {
- // whatevs
- return allocator_type();
+ bool dataIsInternal(const T& t) {
+ return UNLIKELY(impl_.b_ <= std::addressof(t) &&
+ std::addressof(t) < impl_.e_);
}
-// iterators:
- iterator begin() {
- return b_;
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // iterators
+public:
+
+ iterator begin() noexcept {
+ return impl_.b_;
}
- const_iterator begin() const {
- return b_;
+ const_iterator begin() const noexcept {
+ return impl_.b_;
}
- iterator end() {
- return e_;
+ iterator end() noexcept {
+ return impl_.e_;
}
- const_iterator end() const {
- return e_;
+ const_iterator end() const noexcept {
+ return impl_.e_;
}
- reverse_iterator rbegin() {
+ reverse_iterator rbegin() noexcept {
return reverse_iterator(end());
}
- const_reverse_iterator rbegin() const {
+ const_reverse_iterator rbegin() const noexcept {
return const_reverse_iterator(end());
}
- reverse_iterator rend() {
+ reverse_iterator rend() noexcept {
return reverse_iterator(begin());
}
- const_reverse_iterator rend() const {
+ const_reverse_iterator rend() const noexcept {
return const_reverse_iterator(begin());
}
- const_iterator cbegin() const {
- return b_;
+
+ const_iterator cbegin() const noexcept {
+ return impl_.b_;
+ }
+ const_iterator cend() const noexcept {
+ return impl_.e_;
}
- const_iterator cend() const {
- return e_;
+ const_reverse_iterator crbegin() const noexcept {
+ return const_reverse_iterator(end());
+ }
+ const_reverse_iterator crend() const noexcept {
+ return const_reverse_iterator(begin());
}
-// 23.3.6.2 capacity:
- size_type size() const {
- return e_ - b_;
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // capacity
+public:
+
+ size_type size() const noexcept {
+ return impl_.e_ - impl_.b_;
}
- size_type max_size() {
+ size_type max_size() const noexcept {
// good luck gettin' there
return ~size_type(0);
}
- void resize(const size_type sz) {
- auto const oldSize = size();
- if (sz <= oldSize) {
- auto const newEnd = b_ + sz;
- fbvector_detail::destroyRange(newEnd, e_);
- e_ = newEnd;
+ void resize(size_type n) {
+ if (n <= size()) {
+ M_destroy_range_e(impl_.b_ + n);
} else {
- // Must expand
- reserve(sz);
- auto newEnd = b_ + sz;
- std::uninitialized_fill(e_, newEnd, T());
- e_ = newEnd;
+ reserve(n);
+ M_uninitialized_fill_n_e(n - size());
}
}
- void resize(const size_type sz, const T& c) {
- auto const oldSize = size();
- if (sz <= oldSize) {
- auto const newEnd = b_ + sz;
- fbvector_detail::destroyRange(newEnd, e_);
- e_ = newEnd;
+ void resize(size_type n, VT t) {
+ if (n <= size()) {
+ M_destroy_range_e(impl_.b_ + n);
+ } else if (dataIsInternalAndNotVT(t) && n > capacity()) {
+ T copy(t);
+ reserve(n);
+ M_uninitialized_fill_n_e(n - size(), copy);
} else {
- // Must expand
- reserve(sz);
- auto newEnd = b_ + sz;
- std::uninitialized_fill(e_, newEnd, c);
- e_ = newEnd;
+ reserve(n);
+ M_uninitialized_fill_n_e(n - size(), t);
}
}
- size_type capacity() const {
- return z_ - b_;
+ size_type capacity() const noexcept {
+ return impl_.z_ - impl_.b_;
}
- bool empty() const {
- return b_ == e_;
+
+ bool empty() const noexcept {
+ return impl_.b_ == impl_.e_;
}
-private:
- bool reserve_in_place(const size_type n) {
- auto const crtCapacity = capacity();
- if (n <= crtCapacity) return true;
- if (!rallocm) return false;
-
- // using jemalloc's API. Don't forget that jemalloc can never grow
- // in place blocks smaller than 4096 bytes.
- auto const crtCapacityBytes = crtCapacity * sizeof(T);
- if (crtCapacityBytes < jemallocMinInPlaceExpandable) return false;
-
- auto const newCapacityBytes = goodMallocSize(n * sizeof(T));
- void* p = b_;
- if (rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
- != ALLOCM_SUCCESS) {
- return false;
- }
+ void reserve(size_type n) {
+ if (n <= capacity()) return;
+ if (impl_.b_ && reserve_in_place(n)) return;
- // Managed to expand in place, reflect that in z_
- assert(b_ == p);
- z_ = b_ + newCapacityBytes / sizeof(T);
- return true;
+ auto newCap = folly::goodMallocSize(n * sizeof(T)) / sizeof(T);
+ auto newB = M_allocate(newCap);
+ try {
+ M_relocate(newB);
+ } catch (...) {
+ M_deallocate(newB, newCap);
+ throw;
+ }
+ if (impl_.b_)
+ M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
+ impl_.z_ = newB + newCap;
+ impl_.e_ += newB - impl_.b_; // speed hax
+ impl_.b_ = newB;
}
- void reserve_with_move(const size_type n) {
- // Here we can be sure we'll need to do a full reallocation
- auto const crtCapacity = capacity();
- assert(crtCapacity < n); // reserve_in_place should have taken
- // care of this
- auto const newCapacityBytes = goodMallocSize(n * sizeof(T));
- auto b = static_cast<T*>(checkedMalloc(newCapacityBytes));
- auto const oldSize = size();
- memcpy(b, b_, oldSize * sizeof(T));
- // Done with the old chunk. Free but don't call destructors!
- free(b_);
- b_ = b;
- e_ = b_ + oldSize;
- z_ = b_ + newCapacityBytes / sizeof(T);
- // done with the old chunk
- }
+ void shrink_to_fit() noexcept {
+ auto const newCapacityBytes = folly::goodMallocSize(size() * sizeof(T));
+ auto const newCap = newCapacityBytes / sizeof(T);
+ auto const oldCap = capacity();
-public:
- void reserve(const size_type n) {
- if (reserve_in_place(n)) return;
- reserve_with_move(n);
+ if (newCap >= oldCap) return;
+
+ void* p = impl_.b_;
+ if ((rallocm && usingStdAllocator::value) &&
+ newCapacityBytes >= folly::jemallocMinInPlaceExpandable &&
+ rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
+ == ALLOCM_SUCCESS) {
+ impl_.z_ += newCap - oldCap;
+ } else {
+ T* newB; // intentionally uninitialized
+ try {
+ newB = M_allocate(newCap);
+ try {
+ M_relocate(newB);
+ } catch (...) {
+ M_deallocate(newB, newCap);
+ return; // swallow the error
+ }
+ } catch (...) {
+ return;
+ }
+ if (impl_.b_)
+ M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
+ impl_.z_ = newB + newCap;
+ impl_.e_ += newB - impl_.b_; // speed hax
+ impl_.b_ = newB;
+ }
}
- void shrink_to_fit() {
- if (!rallocm) return;
+private:
+
+ bool reserve_in_place(size_type n) {
+ if (!usingStdAllocator::value || !rallocm) return false;
- // using jemalloc's API. Don't forget that jemalloc can never
- // shrink in place blocks smaller than 4096 bytes.
- void* p = b_;
- auto const crtCapacityBytes = capacity() * sizeof(T);
- auto const newCapacityBytes = goodMallocSize(size() * sizeof(T));
- if (crtCapacityBytes >= jemallocMinInPlaceExpandable &&
- rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
+ // jemalloc can never grow in place blocks smaller than 4096 bytes.
+ if ((impl_.z_ - impl_.b_) * sizeof(T) <
+ folly::jemallocMinInPlaceExpandable) return false;
+
+ auto const newCapacityBytes = folly::goodMallocSize(n * sizeof(T));
+ void* p = impl_.b_;
+ if (rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
== ALLOCM_SUCCESS) {
- // Celebrate
- z_ = b_ + newCapacityBytes / sizeof(T);
+ impl_.z_ = impl_.b_ + newCapacityBytes / sizeof(T);
+ return true;
}
+ return false;
}
-// element access
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // element access
+public:
+
reference operator[](size_type n) {
assert(n < size());
- return b_[n];
+ return impl_.b_[n];
}
const_reference operator[](size_type n) const {
assert(n < size());
- return b_[n];
+ return impl_.b_[n];
}
const_reference at(size_type n) const {
- if (n > size()) {
+ if (UNLIKELY(n >= size())) {
throw std::out_of_range("fbvector: index is greater than size.");
}
return (*this)[n];
}
reference front() {
assert(!empty());
- return *b_;
+ return *impl_.b_;
}
const_reference front() const {
assert(!empty());
- return *b_;
+ return *impl_.b_;
}
reference back() {
assert(!empty());
- return e_[-1];
+ return impl_.e_[-1];
}
const_reference back() const {
assert(!empty());
- return e_[-1];
+ return impl_.e_[-1];
}
-// 23.3.6.3 data access
- T* data() {
- return b_;
- }
- const T* data() const {
- return b_;
- }
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // data access
+public:
-private:
- size_t computePushBackCapacity() const {
- return empty() ? std::max(64 / sizeof(T), size_t(1))
- : capacity() < jemallocMinInPlaceExpandable ? capacity() * 2
- : (capacity() * 3) / 2;
+ T* data() noexcept {
+ return impl_.b_;
+ }
+ const T* data() const noexcept {
+ return impl_.b_;
}
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // modifiers (common)
public:
-// 23.3.6.4 modifiers:
+
template <class... Args>
- void emplace_back(Args&&... args) {
- if (e_ == z_) {
- if (!reserve_in_place(size() + 1)) {
- reserve_with_move(computePushBackCapacity());
- }
+ void emplace_back(Args&&... args) {
+ if (impl_.e_ != impl_.z_) {
+ M_construct(impl_.e_, std::forward<Args>(args)...);
+ ++impl_.e_;
+ } else {
+ emplace_back_aux(std::forward<Args>(args)...);
}
- new (e_) T(std::forward<Args>(args)...);
- ++e_;
}
- void push_back(T x) {
- if (e_ == z_) {
- if (!reserve_in_place(size() + 1)) {
- reserve_with_move(computePushBackCapacity());
- }
+ void
+ push_back(const T& value) {
+ if (impl_.e_ != impl_.z_) {
+ M_construct(impl_.e_, value);
+ ++impl_.e_;
+ } else {
+ emplace_back_aux(value);
}
- fbvector_detail::uninitialized_destructive_move(x, e_);
- ++e_;
}
-private:
- bool expand() {
- if (!rallocm) return false;
- auto const capBytes = capacity() * sizeof(T);
- if (capBytes < jemallocMinInPlaceExpandable) return false;
- auto const newCapBytes = goodMallocSize(capBytes + sizeof(T));
- void * bv = b_;
- if (rallocm(&bv, NULL, newCapBytes, 0, ALLOCM_NO_MOVE) != ALLOCM_SUCCESS) {
- return false;
- }
- // Managed to expand in place
- assert(bv == b_); // nothing moved
- z_ = b_ + newCapBytes / sizeof(T);
- assert(capacity() > capBytes / sizeof(T));
- return true;
+ void
+ push_back(T&& value) {
+ if (impl_.e_ != impl_.z_) {
+ M_construct(impl_.e_, std::move(value));
+ ++impl_.e_;
+ } else {
+ emplace_back_aux(std::move(value));
+ }
}
-public:
void pop_back() {
assert(!empty());
- --e_;
- if (!boost::has_trivial_destructor<T>::value) {
- e_->T::~T();
- }
- }
- // template <class... Args>
- // iterator emplace(const_iterator position, Args&&... args);
-
- iterator insert(const_iterator position, T x) {
- size_t newSize; // intentionally uninitialized
- if (e_ == z_ && !reserve_in_place(newSize = size() + 1)) {
- // Can't reserve in place, make a copy
- auto const offset = position - cbegin();
- fbvector tmp;
- tmp.reserve(newSize);
- memcpy(tmp.b_, b_, offset * sizeof(T));
- fbvector_detail::uninitialized_destructive_move(
- x,
- tmp.b_ + offset);
- memcpy(tmp.b_ + offset + 1, b_ + offset, (size() - offset) * sizeof(T));
- // Brutally reassign this to refer to tmp's guts
- free(b_);
- b_ = tmp.b_;
- e_ = b_ + newSize;
- z_ = tmp.z_;
- // get rid of tmp's guts
- new(&tmp) fbvector;
- return begin() + offset;
- }
- // Here we have enough room
- memmove(const_cast<T*>(&*position) + 1,
- const_cast<T*>(&*position),
- sizeof(T) * (e_ - position));
- fbvector_detail::uninitialized_destructive_move(
- x,
- const_cast<T*>(&*position));
- ++e_;
- return const_cast<iterator>(position);
- }
-
- iterator insert(const_iterator position, const size_type n, const T& x) {
- if (e_ + n >= z_) {
- if (b_ <= &x && &x < e_) {
- // Ew, aliased insert
- auto copy = x;
- return insert(position, n, copy);
- }
- auto const m = position - b_;
- reserve(size() + n);
- position = b_ + m;
- }
- memmove(const_cast<T*>(position) + n,
- position,
- sizeof(T) * (e_ - position));
- if (boost::has_trivial_copy<T>::value) {
- std::uninitialized_fill(const_cast<T*>(position),
- const_cast<T*>(position) + n,
- x);
- } else {
- try {
- std::uninitialized_fill(const_cast<T*>(position),
- const_cast<T*>(position) + n,
- x);
- } catch (...) {
- // Oops, put things back where they were
- memmove(const_cast<T*>(position),
- position + n,
- sizeof(T) * (e_ - position));
- throw;
- }
- }
- e_ += n;
- return const_cast<iterator>(position);
+ --impl_.e_;
+ M_destroy(impl_.e_);
+ }
+
+ void swap(fbvector& other) noexcept {
+ if (!usingStdAllocator::value &&
+ A::propagate_on_container_swap::value)
+ swap(impl_, other.impl_);
+ else impl_.swapData(other.impl_);
+ }
+
+ void clear() noexcept {
+ M_destroy_range_e(impl_.b_);
}
private:
- template <class InputIterator>
- iterator insertImpl(const_iterator position,
- InputIterator first, InputIterator last,
- boost::false_type) {
- // Pair of iterators
- if (fbvector_detail::isForwardIterator<InputIterator>::value) {
- // Can compute distance
- auto const n = std::distance(first, last);
- if (e_ + n >= z_) {
- auto const m = position - b_;
- reserve(size() + n);
- position = b_ + m;
- }
- memmove(const_cast<T*>(position) + n,
- position,
- sizeof(T) * (e_ - position));
- try {
- std::uninitialized_copy(first, last,
- const_cast<T*>(position));
- } catch (...) {
- // Oops, put things back where they were
- memmove(const_cast<T*>(position),
- position + n,
- sizeof(T) * (e_ - position));
- throw;
+
+ // std::vector implements a similar function with a different growth
+ // strategy: empty() ? 1 : capacity() * 2.
+ //
+ // fbvector grows differently on two counts:
+ //
+ // (1) initial size
+ // Instead of grwoing to size 1 from empty, and fbvector allocates at
+ // least 64 bytes. You may still use reserve to reserve a lesser amount
+ // of memory.
+ // (2) 1.5x
+ // For medium-sized vectors, the growth strategy is 1.5x. See the docs
+ // for details.
+ // This does not apply to very small or very large fbvectors. This is a
+ // heuristic.
+ // A nice addition to fbvector would be the capability of having a user-
+ // defined growth strategy, probably as part of the allocator.
+ //
+
+ size_type computePushBackCapacity() const {
+ return empty() ? std::max(64 / sizeof(T), size_type(1))
+ : capacity() < folly::jemallocMinInPlaceExpandable / sizeof(T)
+ ? capacity() * 2
+ : sizeof(T) > folly::jemallocMinInPlaceExpandable / 2 && capacity() == 1
+ ? 2
+ : capacity() > 4096 * 32 / sizeof(T)
+ ? capacity() * 2
+ : (capacity() * 3 + 1) / 2;
+ }
+
+ template <class... Args>
+ void emplace_back_aux(Args&&... args);
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // modifiers (erase)
+public:
+
+ iterator erase(const_iterator position) {
+ return erase(position, position + 1);
+ }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ assert(isValid(first) && isValid(last));
+ assert(first <= last);
+ if (first != last) {
+ if (last == end()) {
+ M_destroy_range_e((iterator)first);
+ } else {
+ if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+ D_destroy_range_a((iterator)first, (iterator)last);
+ if (last - first >= cend() - last) {
+ std::memcpy((iterator)first, last, (cend() - last) * sizeof(T));
+ } else {
+ std::memmove((iterator)first, last, (cend() - last) * sizeof(T));
+ }
+ impl_.e_ -= (last - first);
+ } else {
+ std::copy(std::make_move_iterator((iterator)last),
+ std::make_move_iterator(end()), (iterator)first);
+ auto newEnd = impl_.e_ - std::distance(first, last);
+ M_destroy_range_e(newEnd);
+ }
}
- e_ += n;
- return const_cast<iterator>(position);
+ }
+ return (iterator)first;
+ }
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // modifiers (insert)
+private: // we have the private section first because it defines some macros
+
+ bool isValid(const_iterator it) {
+ return cbegin() <= it && it <= cend();
+ }
+
+ size_type computeInsertCapacity(size_type n) {
+ size_type nc = std::max(computePushBackCapacity(), size() + n);
+ size_type ac = folly::goodMallocSize(nc * sizeof(T)) / sizeof(T);
+ return ac;
+ }
+
+ //---------------------------------------------------------------------------
+ //
+ // make_window takes an fbvector, and creates an uninitialized gap (a
+ // window) at the given position, of the given size. The fbvector must
+ // have enough capacity.
+ //
+ // Explanation by picture.
+ //
+ // 123456789______
+ // ^
+ // make_window here of size 3
+ //
+ // 1234___56789___
+ //
+ // If something goes wrong and the window must be destroyed, use
+ // undo_window to provide a weak exception guarantee. It destroys
+ // the right ledge.
+ //
+ // 1234___________
+ //
+ //---------------------------------------------------------------------------
+ //
+ // wrap_frame takes an inverse window and relocates an fbvector around it.
+ // The fbvector must have at least as many elements as the left ledge.
+ //
+ // Explanation by picture.
+ //
+ // START
+ // fbvector: inverse window:
+ // 123456789______ _____abcde_______
+ // [idx][ n ]
+ //
+ // RESULT
+ // _______________ 12345abcde6789___
+ //
+ //---------------------------------------------------------------------------
+ //
+ // insert_use_fresh_memory returns true iff the fbvector should use a fresh
+ // block of memory for the insertion. If the fbvector does not have enough
+ // spare capacity, then it must return true. Otherwise either true or false
+ // may be returned.
+ //
+ //---------------------------------------------------------------------------
+ //
+ // These three functions, make_window, wrap_frame, and
+ // insert_use_fresh_memory, can be combined into a uniform interface.
+ // Since that interface involves a lot of case-work, it is built into
+ // some macros: FOLLY_FBVECTOR_INSERT_(START|TRY|END)
+ // Macros are used in an attempt to let GCC perform better optimizations,
+ // especially control flow optimization.
+ //
+
+ //---------------------------------------------------------------------------
+ // window
+
+ void make_window(iterator position, size_type n) {
+ assert(isValid(position));
+ assert(size() + n <= capacity());
+ assert(n != 0);
+
+ auto tail = std::distance(position, impl_.e_);
+
+ if (tail <= n) {
+ relocate_move(position + n, position, impl_.e_);
+ relocate_done(position + n, position, impl_.e_);
+ impl_.e_ += n;
} else {
- // Cannot compute distance, crappy approach
- // TODO: OPTIMIZE
- fbvector result(cbegin(), position);
- auto const offset = result.size();
- FOR_EACH_RANGE (i, first, last) {
- result.push_back(*i);
+ if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+ std::memmove(position + n, position, tail * sizeof(T));
+ impl_.e_ += n;
+ } else {
+ D_uninitialized_move_a(impl_.e_, impl_.e_ - n, impl_.e_);
+ impl_.e_ += n;
+ std::copy_backward(std::make_move_iterator(position),
+ std::make_move_iterator(impl_.e_ - n), impl_.e_);
+ D_destroy_range_a(position, position + n);
}
- result.insert(result.end(), position, cend());
- result.swap(*this);
- return begin() + offset;
}
}
- iterator insertImpl(const_iterator position,
- const size_type count, const T value, boost::true_type) {
- // Forward back to unambiguous function
- return insert(position, count, value);
+ void undo_window(iterator position, size_type n) noexcept {
+ D_destroy_range_a(position + n, impl_.e_);
+ impl_.e_ = position;
}
+ //---------------------------------------------------------------------------
+ // frame
+
+ void wrap_frame(T* ledge, size_type idx, size_type n) {
+ assert(size() >= idx);
+ assert(n != 0);
+
+ relocate_move(ledge, impl_.b_, impl_.b_ + idx);
+ try {
+ relocate_move(ledge + idx + n, impl_.b_ + idx, impl_.e_);
+ } catch (...) {
+ relocate_undo(ledge, impl_.b_, impl_.b_ + idx);
+ throw;
+ }
+ relocate_done(ledge, impl_.b_, impl_.b_ + idx);
+ relocate_done(ledge + idx + n, impl_.b_ + idx, impl_.e_);
+ }
+
+ //---------------------------------------------------------------------------
+ // use fresh?
+
+ bool insert_use_fresh(const_iterator cposition, size_type n) {
+ if (cposition == cend()) {
+ if (size() + n <= capacity()) return false;
+ if (reserve_in_place(size() + n)) return false;
+ return true;
+ }
+
+ if (size() + n > capacity()) return true;
+
+ return false;
+ }
+
+ //---------------------------------------------------------------------------
+ // interface
+
+ #define FOLLY_FBVECTOR_INSERT_START(cpos, n) \
+ assert(isValid(cpos)); \
+ T* position = const_cast<T*>(cpos); \
+ size_type idx = std::distance(impl_.b_, position); \
+ bool fresh = insert_use_fresh(position, n); \
+ T* b; \
+ size_type newCap = 0; \
+ \
+ if (fresh) { \
+ newCap = computeInsertCapacity(n); \
+ b = M_allocate(newCap); \
+ } else { \
+ make_window(position, n); \
+ b = impl_.b_; \
+ } \
+ \
+ T* start = b + idx; \
+ \
+ try { \
+
+ // construct the inserted elements
+
+ #define FOLLY_FBVECTOR_INSERT_TRY(cpos, n) \
+ } catch (...) { \
+ if (fresh) { \
+ M_deallocate(b, newCap); \
+ } else { \
+ undo_window(position, n); \
+ } \
+ throw; \
+ } \
+ \
+ if (fresh) { \
+ try { \
+ wrap_frame(b, idx, n); \
+ } catch (...) { \
+
+
+ // delete the inserted elements (exception has been thrown)
+
+ #define FOLLY_FBVECTOR_INSERT_END(cpos, n) \
+ M_deallocate(b, newCap); \
+ throw; \
+ } \
+ if (impl_.b_) M_deallocate(impl_.b_, capacity()); \
+ impl_.set(b, size() + n, newCap); \
+ return impl_.b_ + idx; \
+ } else { \
+ return position; \
+ } \
+
+ //---------------------------------------------------------------------------
+ // insert functions
public:
- template <class InputIteratorOrNum>
- iterator insert(const_iterator position, InputIteratorOrNum first,
- InputIteratorOrNum last) {
- return insertImpl(position, first, last,
- boost::is_arithmetic<InputIteratorOrNum>());
+
+ template <class... Args>
+ iterator emplace(const_iterator cpos, Args&&... args) {
+ FOLLY_FBVECTOR_INSERT_START(cpos, 1)
+ M_construct(start, std::forward<Args>(args)...);
+ FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
+ M_destroy(start);
+ FOLLY_FBVECTOR_INSERT_END(cpos, 1)
}
- iterator insert(const_iterator position, std::initializer_list<T> il) {
- return insert(position, il.begin(), il.end());
+ iterator insert(const_iterator cpos, const T& value) {
+ if (dataIsInternal(value)) return insert(cpos, T(value));
+
+ FOLLY_FBVECTOR_INSERT_START(cpos, 1)
+ M_construct(start, value);
+ FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
+ M_destroy(start);
+ FOLLY_FBVECTOR_INSERT_END(cpos, 1)
}
- iterator erase(const_iterator position) {
- if (position == e_) return e_;
- auto p = const_cast<T*>(position);
- (*p).T::~T();
- memmove(p, p + 1, sizeof(T) * (e_ - p - 1));
- --e_;
- return p;
+ iterator insert(const_iterator cpos, T&& value) {
+ if (dataIsInternal(value)) return insert(cpos, T(std::move(value)));
+
+ FOLLY_FBVECTOR_INSERT_START(cpos, 1)
+ M_construct(start, std::move(value));
+ FOLLY_FBVECTOR_INSERT_TRY(cpos, 1)
+ M_destroy(start);
+ FOLLY_FBVECTOR_INSERT_END(cpos, 1)
}
- iterator erase(const_iterator first, const_iterator last) {
- assert(first <= last);
- auto p1 = const_cast<T*>(first);
- auto p2 = const_cast<T*>(last);
- fbvector_detail::destroyRange(p1, p2);
- memmove(p1, last, sizeof(T) * (e_ - last));
- e_ -= last - first;
- return p1;
+ iterator insert(const_iterator cpos, size_type n, VT value) {
+ if (n == 0) return (iterator)cpos;
+ if (dataIsInternalAndNotVT(value)) return insert(cpos, n, T(value));
+
+ FOLLY_FBVECTOR_INSERT_START(cpos, n)
+ D_uninitialized_fill_n_a(start, n, value);
+ FOLLY_FBVECTOR_INSERT_TRY(cpos, n)
+ D_destroy_range_a(start, start + n);
+ FOLLY_FBVECTOR_INSERT_END(cpos, n)
}
- void swap(fbvector& rhs) {
- std::swap(b_, rhs.b_);
- std::swap(e_, rhs.e_);
- std::swap(z_, rhs.z_);
+ template <class It, class Category = typename
+ std::iterator_traits<It>::iterator_category>
+ iterator insert(const_iterator cpos, It first, It last) {
+ return insert(cpos, first, last, Category());
}
- void clear() {
- fbvector_detail::destroyRange(b_, e_);
- e_ = b_;
+ iterator insert(const_iterator cpos, std::initializer_list<T> il) {
+ return insert(cpos, il.begin(), il.end());
}
+ //---------------------------------------------------------------------------
+ // insert dispatch for iterator types
private:
- // Data
- T *b_, *e_, *z_;
-};
-template <class T, class A>
-bool operator!=(const fbvector<T, A>& lhs,
- const fbvector<T, A>& rhs) {
- return !(lhs == rhs);
+ template <class FIt>
+ iterator insert(const_iterator cpos, FIt first, FIt last,
+ std::forward_iterator_tag) {
+ size_type n = std::distance(first, last);
+ if (n == 0) return (iterator)cpos;
+
+ FOLLY_FBVECTOR_INSERT_START(cpos, n)
+ D_uninitialized_copy_a(start, first, last);
+ FOLLY_FBVECTOR_INSERT_TRY(cpos, n)
+ D_destroy_range_a(start, start + n);
+ FOLLY_FBVECTOR_INSERT_END(cpos, n)
+ }
+
+ template <class IIt>
+ iterator insert(const_iterator cpos, IIt first, IIt last,
+ std::input_iterator_tag) {
+ T* position = const_cast<T*>(cpos);
+ assert(isValid(position));
+ size_type idx = std::distance(begin(), position);
+
+ fbvector storage(std::make_move_iterator(position),
+ std::make_move_iterator(end()),
+ A::select_on_container_copy_construction(impl_));
+ M_destroy_range_e(position);
+ for (; first != last; ++first) emplace_back(*first);
+ insert(cend(), std::make_move_iterator(storage.begin()),
+ std::make_move_iterator(storage.end()));
+ return impl_.b_ + idx;
+ }
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // lexicographical functions (others from boost::totally_ordered superclass)
+public:
+
+ bool operator==(const fbvector& other) const {
+ return size() == other.size() && std::equal(begin(), end(), other.begin());
+ }
+
+ bool operator<(const fbvector& other) const {
+ return std::lexicographical_compare(
+ begin(), end(), other.begin(), other.end());
+ }
+
+ //===========================================================================
+ //---------------------------------------------------------------------------
+ // friends
+private:
+
+ template <class _T, class _A>
+ friend _T* relinquish(fbvector<_T, _A>&);
+
+ template <class _T, class _A>
+ friend void attach(fbvector<_T, _A>&, _T* data, size_t sz, size_t cap);
+
+}; // class fbvector
+
+
+//=============================================================================
+//-----------------------------------------------------------------------------
+// outlined functions (gcc, you finicky compiler you)
+
+template <typename T, typename Allocator>
+template <class... Args>
+void fbvector<T, Allocator>::emplace_back_aux(Args&&... args) {
+ size_type byte_sz = folly::goodMallocSize(
+ computePushBackCapacity() * sizeof(T));
+ if (usingStdAllocator::value
+ && rallocm
+ && ((impl_.z_ - impl_.b_) * sizeof(T) >=
+ folly::jemallocMinInPlaceExpandable)) {
+ // Try to reserve in place.
+ // Ask rallocm to allocate in place at least size()+1 and at most sz space.
+ // rallocm will allocate as much as possible within that range, which
+ // is the best possible outcome: if sz space is available, take it all,
+ // otherwise take as much as possible. If nothing is available, then fail.
+ // In this fashion, we never relocate if there is a possibility of
+ // expanding in place, and we never relocate by less than the desired
+ // amount unless we cannot expand further. Hence we will not relocate
+ // sub-optimally twice in a row (modulo the blocking memory being freed).
+ size_type lower = folly::goodMallocSize(sizeof(T) + size() * sizeof(T));
+ size_type upper = byte_sz;
+ size_type extra = upper - lower;
+ assert(extra >= 0);
+
+ void* p = impl_.b_;
+ size_t actual;
+
+ if (rallocm(&p, &actual, lower, extra, ALLOCM_NO_MOVE)
+ == ALLOCM_SUCCESS) {
+ impl_.z_ = impl_.b_ + actual / sizeof(T);
+ M_construct(impl_.e_, std::forward<Args>(args)...);
+ ++impl_.e_;
+ return;
+ }
+ }
+
+ // Reallocation failed. Perform a manual relocation.
+ size_type sz = byte_sz / sizeof(T);
+ auto newB = M_allocate(sz);
+ auto newE = newB + size();
+ try {
+ if (folly::IsRelocatable<T>::value && usingStdAllocator::value) {
+ // For linear memory access, relocate before construction.
+ // By the test condition, relocate is noexcept.
+ // Note that there is no cleanup to do if M_construct throws - that's
+ // one of the beauties of relocation.
+ // Benchmarks for this code have high variance, and seem to be close.
+ relocate_move(newB, impl_.b_, impl_.e_);
+ M_construct(newE, std::forward<Args>(args)...);
+ ++newE;
+ } else {
+ M_construct(newE, std::forward<Args>(args)...);
+ ++newE;
+ try {
+ M_relocate(newB);
+ } catch (...) {
+ M_destroy(newE - 1);
+ throw;
+ }
+ }
+ } catch (...) {
+ M_deallocate(newB, sz);
+ throw;
+ }
+ if (impl_.b_) M_deallocate(impl_.b_, size());
+ impl_.b_ = newB;
+ impl_.e_ = newE;
+ impl_.z_ = newB + sz;
}
+//=============================================================================
+//-----------------------------------------------------------------------------
+// specialized functions
+
template <class T, class A>
-void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) {
+void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) noexcept {
lhs.swap(rhs);
}
-/**
- * Resizes *v to exactly n elements. May reallocate the vector to a
- * smaller buffer if too much space will be left unused.
- */
-template <class T>
-static void compactResize(folly::fbvector<T> * v, size_t size) {
- auto const oldCap = v->capacity();
- if (oldCap > size + 1024 && size < oldCap * 0.3) {
- // Too much slack memory, reallocate a smaller buffer
- auto const oldSize = v->size();
- if (size <= oldSize) {
- // Shrink
- folly::fbvector<T>(v->begin(), v->begin() + size).swap(*v);
- } else {
- // Expand
- folly::fbvector<T> temp;
- temp.reserve(size);
- copy(v->begin(), v->end(), back_inserter(temp));
- temp.resize(size);
- temp.swap(*v);
- }
- } else {
- // Nolo contendere
- v->resize(size);
- }
+//=============================================================================
+//-----------------------------------------------------------------------------
+// other
+
+template <class T, class A>
+void compactResize(fbvector<T, A>* v, size_t sz) {
+ v->resize(sz);
+ v->shrink_to_fit();
+}
+
+// DANGER
+//
+// relinquish and attach are not a members function specifically so that it is
+// awkward to call them. It is very easy to shoot yourself in the foot with
+// these functions.
+//
+// If you call relinquish, then it is your responsibility to free the data
+// and the storage, both of which may have been generated in a non-standard
+// way through the fbvector's allocator.
+//
+// If you call attach, it is your responsibility to ensure that the fbvector
+// is fresh (size and capacity both zero), and that the supplied data is
+// capable of being manipulated by the allocator.
+// It is acceptable to supply a stack pointer IF:
+// (1) The vector's data does not outlive the stack pointer. This includes
+// extension of the data's life through a move operation.
+// (2) The pointer has enough capacity that the vector will never be
+// relocated.
+// (3) Insert is not called on the vector; these functions have leeway to
+// relocate the vector even if there is enough capacity.
+// (4) A stack pointer is compatible with the fbvector's allocator.
+//
+
+template <class T, class A>
+T* relinquish(fbvector<T, A>& v) {
+ T* ret = v.data();
+ v.impl_.b_ = v.impl_.e_ = v.impl_.z_ = nullptr;
+ return ret;
+}
+
+template <class T, class A>
+void attach(fbvector<T, A>& v, T* data, size_t sz, size_t cap) {
+ assert(v.data() == nullptr);
+ v.impl_.b_ = data;
+ v.impl_.e_ = data + sz;
+ v.impl_.z_ = data + cap;
}
} // namespace folly
-#endif // FOLLY_FBVECTOR_H_
+#endif // FOLLY_FBVECTOR_H
+
--- /dev/null
+/*
+ * Copyright 2012 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// @author Nicholas Ormrod <njormrod@fb.com>
+
+/******************************************************************************
+ *
+ * This file is not perfect - benchmarking is a finicky process.
+ *
+ * TAKE THE NUMBERS YOU SEE TO MIND, NOT TO HEART
+ *
+ *****************************************************************************/
+
+#include <vector>
+#include "OFBVector.h"
+#define FOLLY_BENCHMARK_USE_NS_IFOLLY
+#include "folly/FBVector.h"
+
+#include <chrono>
+#include <deque>
+#include <iomanip>
+#include <iostream>
+#include <locale>
+#include <memory>
+#include <string>
+
+#include <boost/preprocessor.hpp>
+
+using namespace std;
+
+static const bool enableColors = true;
+
+//=============================================================================
+// use the timestamp counter for time measurements
+
+static inline
+void clear_icache() {} // placeholder
+
+// return the CPU timestamp counter
+static uint64_t readTSC() {
+ unsigned reslo, reshi;
+
+ __asm__ __volatile__ (
+ "xorl %%eax,%%eax \n cpuid \n"
+ ::: "%eax", "%ebx", "%ecx", "%edx");
+ __asm__ __volatile__ (
+ "rdtsc\n"
+ : "=a" (reslo), "=d" (reshi) );
+ __asm__ __volatile__ (
+ "xorl %%eax,%%eax \n cpuid \n"
+ ::: "%eax", "%ebx", "%ecx", "%edx");
+
+ return ((uint64_t)reshi << 32) | reslo;
+}
+
+//=============================================================================
+// Timer
+
+// The TIME* macros expand to a sequence of functions and classes whose aim
+// is to run a benchmark function several times with different types and
+// sizes.
+//
+// The first and last thing that TIME* expands to is a function declaration,
+// through the DECL macro. The declared function is templated on the full
+// vector type, its value_type, its allocator_type, and a number N.
+// The first DECL is a forward declaration, and is followed by a
+// semicolon. The second DECL ends the macro - a function body is to be
+// supplied after the macro invocation.
+// The declared function returns a uint64_t, which is assumed to be a time.
+//
+// The GETTER macro calls the DECL function repeatedly. It returns the
+// minimum time taken to execute DECL. GETTER runs DECL between 2 and 100
+// times (it will not run the full 100 if the tests take a long time).
+//
+// The EVALUATOR macro calls the GETTER macro with each of std::vector,
+// the original fbvector (folly::fbvector), and the new fbvector
+// (Ifolly::fbvector). It runs all three twice, and then calls the
+// pretty printer to display the results. Before calling the pretty
+// printer, the EVALUATOR outputs the three message strings.
+//
+// The EXECUTOR macro calls the EVALUATOR with different values of N.
+// It also defines the string message for N.
+//
+// The RUNNER macro defines a struct. That struct defines the testname
+// string. The constructor calls the EXECUTOR with each test type, and
+// also defines the test type string.
+// The RUNNER class is also instantiated, so the constructor will be run
+// before entering main().
+//
+
+#define TIME(str, types) TIME_I(str, types, (0))
+#define TIME_N(str, types) TIME_I(str, types, (0)(16)(64)(1024)(16384)(262144))
+
+#define TIME_I(str, types, values) \
+ TIME_II(str, BOOST_PP_CAT(t_, __LINE__), types, values)
+
+#define TIME_II(str, name, types, values) \
+ DECL(name); \
+ GETTER(name) \
+ EVALUATOR(name) \
+ EXECUTOR(name, values) \
+ RUNNER(str, name, types) \
+ DECL(name)
+
+#define DECL(name) \
+ template <class Vector, typename T, typename Allocator, int N> \
+ static inline uint64_t BOOST_PP_CAT(run_, name) ()
+
+#define GETTER(name) \
+ template <class Vector, int N> \
+ static uint64_t BOOST_PP_CAT(get_, name) () { \
+ auto s = chrono::high_resolution_clock::now(); \
+ uint64_t minticks = ~uint64_t(0); \
+ int burst = 0; \
+ for (; burst < 100; ++burst) { \
+ auto t = BOOST_PP_CAT(run_, name) <Vector, \
+ typename Vector::value_type, typename Vector::allocator_type, N> (); \
+ minticks = min(minticks, t); \
+ if (minticks * burst > 10000000) break; \
+ } \
+ auto e = chrono::high_resolution_clock::now(); \
+ chrono::nanoseconds d(e - s); \
+ return minticks; \
+ return d.count() / burst; \
+ }
+
+#define EVALUATOR(name) \
+ template <typename T, typename Allocator, int N> \
+ void BOOST_PP_CAT(evaluate_, name) \
+ ( string& part1, string& part2, string& part3 ) { \
+ cout << setw(25) << left << part1 \
+ << setw(4) << left << part2 \
+ << setw(6) << right << part3; \
+ part1.clear(); part2.clear(); part3.clear(); \
+ auto v1 = BOOST_PP_CAT(get_, name) \
+ <Ifolly::fbvector<T, Allocator>, N> (); \
+ auto v2 = BOOST_PP_CAT(get_, name) \
+ < folly::fbvector<T, Allocator>, N> (); \
+ auto v3 = BOOST_PP_CAT(get_, name) \
+ < std:: vector<T, Allocator>, N> (); \
+ auto v1b = BOOST_PP_CAT(get_, name) \
+ <Ifolly::fbvector<T, Allocator>, N> (); \
+ auto v2b = BOOST_PP_CAT(get_, name) \
+ < folly::fbvector<T, Allocator>, N> (); \
+ auto v3b = BOOST_PP_CAT(get_, name) \
+ < std:: vector<T, Allocator>, N> (); \
+ prettyPrint(min(v1, v1b), min(v2, v2b), min(v3, v3b)); \
+ cout << endl; \
+ }
+
+#define EXECUTOR(name, values) \
+ template <typename T, typename Allocator> \
+ void BOOST_PP_CAT(execute_, name) ( string& part1, string& part2 ) { \
+ BOOST_PP_SEQ_FOR_EACH(EVALUATE, name, values) \
+ }
+
+#define EVALUATE(r, name, value) \
+ { string part3(BOOST_PP_STRINGIZE(value)); \
+ BOOST_PP_CAT(evaluate_, name) <T, Allocator, value> \
+ ( part1, part2, part3 ); }
+
+#define RUNNER(str, name, types) \
+ struct BOOST_PP_CAT(Runner_, name) { \
+ BOOST_PP_CAT(Runner_, name) () { \
+ string part1(str); \
+ BOOST_PP_SEQ_FOR_EACH(EXECUTE, (part1, name), types) \
+ } \
+ } BOOST_PP_CAT(runner_, name);
+
+#define EXECUTE(r, pn, type) \
+ { string part2(BOOST_PP_STRINGIZE(type)); \
+ BOOST_PP_CAT(execute_, BOOST_PP_TUPLE_ELEM(2, 1, pn)) \
+ <typename type::first_type, typename type::second_type> \
+ ( BOOST_PP_TUPLE_ELEM(2, 0, pn), part2 ); }
+
+//=============================================================================
+// pretty printer
+
+// The pretty printer displays the times for each of the three vectors.
+// The fastest time (or times, if there is a tie) is highlighted in green.
+// Additionally, if the new fbvector (time v1) is not the fastest, then
+// it is highlighted with red or blue. It is highlighted with blue only
+// if it lost by a small margin (5 clock cycles or 2%, whichever is
+// greater).
+
+void prettyPrint(uint64_t v1, uint64_t v2, uint64_t v3) {
+ // rdtsc takes some time to run; about 157 clock cycles
+ // if we see a smaller positive number, adjust readtsc
+ uint64_t readtsc_time = 157;
+ if (v1 != 0 && v1 < readtsc_time) readtsc_time = v1;
+ if (v2 != 0 && v2 < readtsc_time) readtsc_time = v2;
+ if (v3 != 0 && v3 < readtsc_time) readtsc_time = v3;
+
+ if (v1 == 0) v1 = ~uint64_t(0); else v1 -= readtsc_time;
+ if (v2 == 0) v2 = ~uint64_t(0); else v2 -= readtsc_time;
+ if (v3 == 0) v3 = ~uint64_t(0); else v3 -= readtsc_time;
+
+ auto least = min({ v1, v2, v3 });
+ // a good time is less than 2% or 5 clock cycles slower
+ auto good = max(least + 5, (uint64_t)(least * 1.02));
+
+ string w("\x1b[1;;42m"); // green
+ string g("\x1b[1;;44m"); // blue
+ string b("\x1b[1;;41m"); // red
+ string e("\x1b[0m"); // reset
+
+ if (!enableColors) {
+ w = b = e = "";
+ }
+
+ cout << " ";
+
+ if (v1 == least) cout << w;
+ else if (v1 <= good) cout << g;
+ else cout << b;
+ cout << setw(13) << right;
+ if (v1 == ~uint64_t(0)) cout << "-"; else cout << v1;
+ cout << " " << e << " ";
+
+ if (v2 == least) cout << w;
+ cout << setw(13) << right;
+ if (v2 == ~uint64_t(0)) cout << "-"; else cout << v2;
+ cout << " " << e << " ";
+
+ if (v3 == least) cout << w;
+ cout << setw(13) << right;
+ if (v3 == ~uint64_t(0)) cout << "-"; else cout << v3;
+ cout << " " << e << " ";
+}
+
+//=============================================================================
+// table formatting
+
+// Much like the TIME macros, the Leader and Line struct/macros
+// instantiate a class before main, and work is done inside the
+// constructors. The Leader and Line struct print out pretty
+// table boundaries and titles.
+
+uint64_t leader_elapsed() {
+ static auto t = chrono::high_resolution_clock::now();
+ chrono::nanoseconds d(chrono::high_resolution_clock::now() - t);
+ return d.count() / 1000000000;
+}
+
+struct Leader {
+ Leader() {
+ leader_elapsed();
+ std::cout.imbue(std::locale(""));
+
+ cout << endl;
+ cout << "========================================"
+ << "========================================" << endl;
+ cout << setw(35) << left << "Test";
+ cout << setw(15) << right << "new fbvector ";
+ cout << setw(15) << right << "old fbvector ";
+ cout << setw(15) << right << "std::vector ";
+ cout << endl;
+ cout << "========================================"
+ << "========================================" << endl;
+ }
+ ~Leader() {
+ cout << endl;
+ cout << "========================================"
+ << "========================================" << endl;
+ cout << setw(78) << right << leader_elapsed() << " s" << endl;
+ }
+} leader;
+
+struct Line {
+ explicit Line(string text) {
+ cout << "\n--- " << text << " ---" << endl;
+ }
+};
+#define SECTION(str) Line BOOST_PP_CAT(l_, __LINE__) ( str )
+
+//=============================================================================
+// Test types
+
+typedef pair<int, std::allocator<int>> T1;
+typedef pair<vector<int>, std::allocator<vector<int>>> T2;
+
+uint64_t v1_T1 = 0, v2_T1 = 0, v3_T1 = 0;
+uint64_t v1_T2 = 0, v2_T2 = 0, v3_T2 = 0;
+
+#define BASICS (T1)(T2)
+
+//=============================================================================
+// prevent optimizing
+
+std::vector<int> O_vi(10000000);
+
+void O(int i) {
+ O_vi.push_back(i);
+}
+template <class V>
+void O(const V& v) {
+ int s = v.size();
+ for (int i = 0; i < s; ++i) O(v[i]);
+}
+
+//=============================================================================
+// Benchmarks
+
+// #if 0
+//-----------------------------------------------------------------------------
+//SECTION("Dev");
+//#undef BASICS
+//#define BASICS (T1)
+
+
+// #else
+
+//-----------------------------------------------------------------------------
+SECTION("Essentials");
+
+TIME_N("~Vector()", BASICS) {
+ Vector a(N);
+ O(a);
+ clear_icache(); auto b = readTSC();
+ a.~Vector();
+ auto e = readTSC();
+ new (&a) Vector();
+ O(a);
+ return e - b;
+}
+
+TIME_N("a.clear()", BASICS) {
+ Vector a(N);
+ O(a);
+ clear_icache(); auto b = readTSC();
+ a.clear();
+ auto e = readTSC();
+ O(a);
+ return e - b;
+}
+
+TIME("Vector u", BASICS) {
+ clear_icache(); auto b = readTSC();
+ Vector u;
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("Vector u(a)", BASICS) {
+ static const Vector a(N);
+ clear_icache(); auto b = readTSC();
+ Vector u(a);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("Vector u(move(a))", BASICS) {
+ Vector a(N);
+ clear_icache(); auto b = readTSC();
+ Vector u(move(a));
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+//-----------------------------------------------------------------------------
+SECTION("Constructors");
+
+TIME_N("Vector u(n)", BASICS) {
+ clear_icache(); auto b = readTSC();
+ Vector u(N);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("Vector u(n, t)", BASICS) {
+ static const T t(1);
+ clear_icache(); auto b = readTSC();
+ Vector u(N, t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("Vector u(first, last)", BASICS) {
+ static const deque<T> d(N);
+ clear_icache(); auto b = readTSC();
+ Vector u(d.begin(), d.end());
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+//-----------------------------------------------------------------------------
+SECTION("Assignment");
+
+TIME_N("a = b", BASICS) {
+ Vector a(N);
+ static const Vector c(N/2 + 10);
+ O(a);
+ clear_icache(); auto b = readTSC();
+ a = c;
+ auto e = readTSC();
+ O(a);
+ return e - b;
+}
+
+TIME_N("a = move(b)", BASICS) {
+ Vector a(N);
+ Vector c(N/2 + 10);
+ O(a);
+ O(c);
+ clear_icache(); auto b = readTSC();
+ a = move(c);
+ auto e = readTSC();
+ O(a);
+ O(c);
+ return e - b;
+}
+
+TIME_N("a = destructive_move(b)", BASICS) {
+ Vector a(N);
+ Vector c(N/2 + 10);
+ O(a);
+ O(c);
+ clear_icache(); auto b = readTSC();
+ a = move(c);
+ c.clear();
+ auto e = readTSC();
+ O(a);
+ O(c);
+ return e - b;
+}
+
+TIME_N("a.assign(N, t)", BASICS) {
+ Vector a(N/2 + 10);
+ const T t(1);
+ O(a);
+ clear_icache(); auto b = readTSC();
+ a.assign(N, t);
+ auto e = readTSC();
+ O(a);
+ return e - b;
+}
+
+TIME_N("a.assign(first, last)", BASICS) {
+ static const deque<T> d(N);
+ Vector a(N/2 + 10);
+ clear_icache(); auto b = readTSC();
+ a.assign(d.begin(), d.end());
+ auto e = readTSC();
+ O(a);
+ return e - b;
+}
+
+TIME_N("a.swap(b)", BASICS) {
+ Vector a(N/2 + 10);
+ Vector c(N);
+ O(a);
+ O(c);
+ clear_icache(); auto b = readTSC();
+ a.swap(c);
+ auto e = readTSC();
+ O(a);
+ O(c);
+ return e - b;
+}
+
+//-----------------------------------------------------------------------------
+SECTION("Iterators");
+
+TIME("a.begin()", BASICS) {
+ static Vector a(1);
+ clear_icache(); auto b = readTSC();
+ auto r = a.begin();
+ auto e = readTSC();
+ O(*r);
+ return e - b;
+}
+
+TIME("a.cbegin()", BASICS) {
+ static Vector a(1);
+ clear_icache(); auto b = readTSC();
+ auto r = a.cbegin();
+ auto e = readTSC();
+ O(*r);
+ return e - b;
+}
+
+TIME("a.rbegin()", BASICS) {
+ static Vector a(1);
+ clear_icache(); auto b = readTSC();
+ auto r = a.rbegin();
+ auto e = readTSC();
+ O(*r);
+ return e - b;
+}
+
+//-----------------------------------------------------------------------------
+SECTION("Capacity");
+
+TIME_N("a.size()", BASICS) {
+ static const Vector a(N);
+ clear_icache(); auto b = readTSC();
+ int n = a.size();
+ auto e = readTSC();
+ O(n);
+ return e - b;
+}
+
+TIME("a.max_size()", BASICS) {
+ static Vector a;
+ clear_icache(); auto b = readTSC();
+ int n = a.max_size();
+ auto e = readTSC();
+ O(n);
+ return e - b;
+}
+
+TIME_N("a.capacity()", BASICS) {
+ static const Vector a(N);
+ clear_icache(); auto b = readTSC();
+ int n = a.capacity();
+ auto e = readTSC();
+ O(n);
+ return e - b;
+}
+
+TIME_N("a.empty()", BASICS) {
+ static const Vector a(N);
+ clear_icache(); auto b = readTSC();
+ int n = a.empty();
+ auto e = readTSC();
+ O(n);
+ return e - b;
+}
+
+TIME_N("reserve(n)", BASICS) {
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.reserve(N);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("resize(n)", BASICS) {
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.resize(N);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("resize(n, t)", BASICS) {
+ static const T t(1);
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.resize(N, t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("staged reserve", BASICS) {
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.reserve(500);
+ u.reserve(1000);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("staged resize", BASICS) {
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.resize(500);
+ u.resize(1000);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("resize then reserve", BASICS) {
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.resize(500);
+ u.reserve(1000);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("shrink", BASICS) {
+ Vector u;
+ O(u);
+ u.resize(500);
+ u.reserve(1000);
+ clear_icache(); auto b = readTSC();
+ u.shrink_to_fit();
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+//-----------------------------------------------------------------------------
+SECTION("Access");
+
+TIME("operator[]", BASICS) {
+ static const Vector a(10);
+ clear_icache(); auto b = readTSC();
+ const auto& v = a[8];
+ auto e = readTSC();
+ O(v);
+ return e - b;
+}
+
+TIME("at()", BASICS) {
+ static const Vector a(10);
+ clear_icache(); auto b = readTSC();
+ const auto& v = a.at(8);
+ auto e = readTSC();
+ O(v);
+ return e - b;
+}
+
+TIME("front()", BASICS) {
+ static const Vector a(10);
+ clear_icache(); auto b = readTSC();
+ const auto& v = a.front();
+ auto e = readTSC();
+ O(v);
+ return e - b;
+}
+
+TIME("back()", BASICS) {
+ static const Vector a(10);
+ clear_icache(); auto b = readTSC();
+ const auto& v = a.back();
+ auto e = readTSC();
+ O(v);
+ return e - b;
+}
+
+TIME("data()", BASICS) {
+ static const Vector a(10);
+ clear_icache(); auto b = readTSC();
+ const auto& v = a.data();
+ auto e = readTSC();
+ O(*v);
+ return e - b;
+}
+
+//-----------------------------------------------------------------------------
+SECTION("Append");
+
+TIME("reserved emplace", BASICS) {
+ Vector u;
+ u.reserve(1);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.emplace_back(0);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("full emplace", BASICS) {
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.emplace_back(0);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("reserved push_back", BASICS) {
+ static T t(0);
+ Vector u;
+ u.reserve(1);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.push_back(t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("full push_back", BASICS) {
+ static T t(0);
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.push_back(t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("reserved push_back&&", BASICS) {
+ T t(0);
+ Vector u;
+ u.reserve(1);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.push_back(std::move(t));
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("full push_back&&", BASICS) {
+ T t(0);
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.push_back(std::move(t));
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("reserved push emplace", BASICS) {
+ Vector u;
+ u.reserve(1);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.push_back(T(0));
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("full push emplace", BASICS) {
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.push_back(T(0));
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("bulk append", BASICS) {
+ static deque<T> d(N);
+ Vector u(N/2 + 10);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.insert(u.end(), d.begin(), d.end());
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("erase end", BASICS) {
+ Vector u(N);
+ O(u);
+ auto it = u.begin();
+ it += u.size() / 2;
+ if (it != u.end()) O(*it);
+ clear_icache(); auto b = readTSC();
+ u.erase(it, u.end());
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("pop_back", BASICS) {
+ Vector u(1);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ u.pop_back();
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+//-----------------------------------------------------------------------------
+SECTION("Insert/Erase - Bad Ops");
+
+TIME("insert", BASICS) {
+ Vector u(100);
+ T t(1);
+ auto it = u.begin();
+ it += 50;
+ O(u);
+ O(*it);
+ O(t);
+ clear_icache(); auto b = readTSC();
+ u.insert(it, t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("insert&&", BASICS) {
+ Vector u(100);
+ T t(1);
+ auto it = u.begin();
+ it += 50;
+ O(u);
+ O(*it);
+ O(t);
+ clear_icache(); auto b = readTSC();
+ u.insert(it, std::move(t));
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("insert n few", BASICS) {
+ Vector u(100);
+ T t(1);
+ auto it = u.begin();
+ it += 50;
+ O(u);
+ O(*it);
+ O(t);
+ clear_icache(); auto b = readTSC();
+ u.insert(it, 10, t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("insert n many", BASICS) {
+ Vector u(100);
+ T t(1);
+ auto it = u.begin();
+ it += 50;
+ O(u);
+ O(*it);
+ O(t);
+ clear_icache(); auto b = readTSC();
+ u.insert(it, 200, t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("iterator insert few", BASICS) {
+ static deque<T> d(10);
+ Vector u(100);
+ T t(1);
+ auto it = u.begin();
+ it += 50;
+ O(u);
+ O(*it);
+ O(t);
+ clear_icache(); auto b = readTSC();
+ u.insert(it, d.begin(), d.end());
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("iterator insert many", BASICS) {
+ static deque<T> d(200);
+ Vector u(100);
+ T t(1);
+ auto it = u.begin();
+ it += 50;
+ O(u);
+ O(*it);
+ O(t);
+ clear_icache(); auto b = readTSC();
+ u.insert(it, d.begin(), d.end());
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("erase", BASICS) {
+ Vector u(100);
+ O(u);
+ auto it = u.begin();
+ it += 50;
+ O(*it);
+ clear_icache(); auto b = readTSC();
+ u.erase(it);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("erase many", BASICS) {
+ Vector u(100);
+ O(u);
+ auto it1 = u.begin();
+ it1 += 33;
+ O(*it1);
+ auto it2 = u.begin();
+ it2 += 66;
+ O(*it2);
+ clear_icache(); auto b = readTSC();
+ u.erase(it1, it2);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+//-----------------------------------------------------------------------------
+SECTION("Large Tests");
+
+TIME_N("reserved bulk push_back", BASICS) {
+ Vector u;
+ u.reserve(N);
+ T t(0);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ for (int i = 0; i < N; ++i) u.emplace_back(t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("reserved bulk emplace", BASICS) {
+ Vector u;
+ u.reserve(N);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ for (int i = 0; i < N; ++i) u.emplace_back(0);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("populate", BASICS) {
+ static T t(0);
+ Vector u;
+ O(u);
+ clear_icache(); auto b = readTSC();
+ for (int i = 0; i < N; ++i) u.push_back(t);
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("jigsaw growth", BASICS) {
+ int sizes[] =
+ { 1, 5, 2, 80, 17, 8, 9, 8, 140, 130, 1000, 130, 10000, 0, 8000, 2000 };
+ clear_icache(); auto b = readTSC();
+ Vector u;
+ for (auto s : sizes) {
+ if (s < u.size()) {
+ int toAdd = u.size() - s;
+ for (int i = 0; i < toAdd / 2; ++i) u.emplace_back(0);
+ u.insert(u.end(), (toAdd + 1) / 2, T(1));
+ } else {
+ int toRm = u.size() - s;
+ for (int i = 0; i < toRm / 2; ++i) u.pop_back();
+ auto it = u.begin();
+ std::advance(it, s);
+ if (it < u.end()) u.erase(it, u.end());
+ }
+ }
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME("random access and modify", (T1)) {
+ static const int n = 1024 * 1024 * 16;
+ Vector u(n);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ int j = 7;
+ for (int i = 0; i < 100000; ++i) {
+ j = (j * 2 + j) ^ 0xdeadbeef;
+ j = j & (n - 1);
+ u[j] = i;
+ u.at(n - j) = -i;
+ }
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+TIME_N("iterate", (T1)) {
+ static Vector u(N);
+ O(u);
+ clear_icache(); auto b = readTSC();
+ int acc = 0;
+ for (auto& e : u) {
+ acc += e;
+ e++;
+ }
+ auto e = readTSC();
+ O(acc);
+ O(u);
+ return e - b;
+}
+
+TIME("emplace massive", BASICS) {
+ clear_icache(); auto b = readTSC();
+ Vector u;
+ for (int i = 0; i < 10000000; ++i) {
+ u.emplace_back(0);
+ }
+ auto e = readTSC();
+ O(u);
+ return e - b;
+}
+
+// #endif
+
+//=============================================================================
+
+int main() {
+ return 0;
+}
+
--- /dev/null
+/*
+ * Copyright 2012 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Andrei Alexandrescu (aalexandre)
+
+/**
+ * Vector type. Drop-in replacement for std::vector featuring
+ * significantly faster primitives, see e.g. benchmark results at
+ * https:*phabricator.fb.com/D235852.
+ *
+ * In order for a type to be used with fbvector, it must be
+ * relocatable, see Traits.h.
+ *
+ * For user-defined types you must specialize templates
+ * appropriately. Consult Traits.h for ways to do so and for a handy
+ * family of macros FOLLY_ASSUME_FBVECTOR_COMPATIBLE*.
+ *
+ * For more information and documentation see folly/docs/FBVector.md
+ */
+
+#ifndef FOLLY_FBVECTOR_H_
+#define FOLLY_FBVECTOR_H_
+
+#include "folly/Foreach.h"
+#include "folly/Malloc.h"
+#include "folly/Traits.h"
+#include <iterator>
+#include <algorithm>
+#include <stdexcept>
+#include <limits>
+#include <cassert>
+#include <boost/type_traits.hpp>
+#include <boost/operators.hpp>
+#include <boost/utility/enable_if.hpp>
+#include <type_traits>
+
+namespace folly {
+/**
+ * Forward declaration for use by FOLLY_ASSUME_FBVECTOR_COMPATIBLE_2,
+ * see folly/Traits.h.
+ */
+template <typename T, class Allocator = std::allocator<T> >
+class fbvector;
+}
+
+// You can define an fbvector of fbvectors.
+FOLLY_ASSUME_FBVECTOR_COMPATIBLE_2(folly::fbvector);
+
+namespace folly {
+namespace fbvector_detail {
+
+/**
+ * isForwardIterator<T>::value yields true if T is a forward iterator
+ * or better, and false otherwise.
+ */
+template <class It> struct isForwardIterator {
+ enum { value = boost::is_convertible<
+ typename std::iterator_traits<It>::iterator_category,
+ std::forward_iterator_tag>::value
+ };
+};
+
+/**
+ * Destroys all elements in the range [b, e). If the type referred to
+ * by the iterators has a trivial destructor, does nothing.
+ */
+template <class It>
+void destroyRange(It b, It e) {
+ typedef typename boost::remove_reference<decltype(*b)>::type T;
+ if (boost::has_trivial_destructor<T>::value) return;
+ for (; b != e; ++b) {
+ (*b).~T();
+ }
+}
+
+/**
+ * Moves the "interesting" part of value to the uninitialized memory
+ * at address addr, and leaves value in a destroyable state.
+ */
+
+template <class T>
+typename boost::enable_if_c<
+ boost::has_trivial_assign<T>::value
+>::type
+uninitialized_destructive_move(T& value, T* addr) {
+ // Just assign the thing; this is most efficient
+ *addr = value;
+}
+
+template <class T>
+typename boost::enable_if_c<
+ !boost::has_trivial_assign<T>::value &&
+ boost::has_nothrow_constructor<T>::value
+>::type
+uninitialized_destructive_move(T& value, T* addr) {
+ // Cheap default constructor - move and reinitialize
+ memcpy(addr, &value, sizeof(T));
+ new(&value) T;
+}
+
+template <class T>
+typename std::enable_if<
+ !boost::has_trivial_assign<T>::value &&
+ !boost::has_nothrow_constructor<T>::value
+>::type
+uninitialized_destructive_move(T& value, T* addr) {
+ // User defined move construction.
+
+ // TODO: we should probably prefer this over the above memcpy()
+ // version when the type has a user-defined move constructor. We
+ // don't right now because 4.6 doesn't implement
+ // std::is_move_constructible<> yet.
+ new (addr) T(std::move(value));
+}
+
+/**
+ * Fills n objects of type T starting at address b with T's default
+ * value. If the operation throws, destroys all objects constructed so
+ * far and calls free(b).
+ */
+template <class T>
+void uninitializedFillDefaultOrFree(T * b, size_t n) {
+ if (boost::is_arithmetic<T>::value || boost::is_pointer<T>::value) {
+ if (n <= 16384 / sizeof(T)) {
+ memset(b, 0, n * sizeof(T));
+ } else {
+ goto duff_fill;
+ }
+ } else if (boost::has_nothrow_constructor<T>::value) {
+ duff_fill:
+ auto i = b;
+ auto const e1 = b + (n & ~size_t(7));
+ for (; i != e1; i += 8) {
+ new(i) T();
+ new(i + 1) T();
+ new(i + 2) T();
+ new(i + 3) T();
+ new(i + 4) T();
+ new(i + 5) T();
+ new(i + 6) T();
+ new(i + 7) T();
+ }
+ for (auto const e = b + n; i != e; ++i) {
+ new(i) T();
+ }
+ } else {
+ // Conservative approach
+ auto i = b;
+ try {
+ for (auto const e = b + n; i != e; ++i) {
+ new(i) T;
+ }
+ } catch (...) {
+ destroyRange(b, i);
+ free(b);
+ throw;
+ }
+ }
+}
+
+/**
+ * Fills n objects of type T starting at address b with value. If the
+ * operation throws, destroys all objects constructed so far and calls
+ * free(b).
+ */
+template <class T>
+void uninitializedFillOrFree(T * b, size_t n, const T& value) {
+ auto const e = b + n;
+ if (boost::has_trivial_copy<T>::value) {
+ auto i = b;
+ auto const e1 = b + (n & ~size_t(7));
+ for (; i != e1; i += 8) {
+ new(i) T(value);
+ new(i + 1) T(value);
+ new(i + 2) T(value);
+ new(i + 3) T(value);
+ new(i + 4) T(value);
+ new(i + 5) T(value);
+ new(i + 6) T(value);
+ new(i + 7) T(value);
+ }
+ for (; i != e; ++i) {
+ new(i) T(value);
+ }
+ } else {
+ // Conservative approach
+ auto i = b;
+ try {
+ for (; i != e; ++i) {
+ new(i) T(value);
+ }
+ } catch (...) {
+ destroyRange(b, i);
+ free(b);
+ throw;
+ }
+ }
+}
+} // namespace fbvector_detail
+
+/**
+ * This is the std::vector replacement. For conformity, fbvector takes
+ * the same template parameters, but it doesn't use the
+ * allocator. Instead, it uses malloc, and when present, jemalloc's
+ * extensions.
+ */
+template <class T, class Allocator>
+class fbvector : private boost::totally_ordered<fbvector<T,Allocator> > {
+ bool isSane() const {
+ return
+ begin() <= end() &&
+ empty() == (size() == 0) &&
+ empty() == (begin() == end()) &&
+ size() <= max_size() &&
+ capacity() <= max_size() &&
+ size() <= capacity() &&
+
+ // Either we have no capacity or our pointers should make sense:
+ ((!b_ && !e_ && !z_) || (b_ != z_ && e_ <= z_));
+ }
+
+ struct Invariant {
+#ifndef NDEBUG
+ explicit Invariant(const fbvector& s) : s_(s) {
+ assert(s_.isSane());
+ }
+ ~Invariant() {
+ assert(s_.isSane());
+ }
+ private:
+ const fbvector& s_;
+#else
+ explicit Invariant(const fbvector&) {}
+#endif
+ Invariant& operator=(const Invariant&);
+ };
+
+public:
+
+// types:
+ typedef T value_type;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef T* iterator;
+ typedef const T* const_iterator;
+ typedef size_t size_type;
+ typedef ssize_t difference_type;
+ // typedef typename allocator_traits<Allocator>::pointer pointer;
+ // typedef typename allocator_traits<Allocator>::const_pointer const_pointer;
+ typedef Allocator allocator_type;
+ typedef typename Allocator::pointer pointer;
+ typedef typename Allocator::const_pointer const_pointer;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+
+// 23.3.6.1 construct/copy/destroy:
+ fbvector() : b_(NULL), e_(NULL), z_(NULL) {}
+
+ explicit fbvector(const Allocator&) {
+ new(this) fbvector;
+ }
+
+ explicit fbvector(const size_type n) {
+ if (n == 0) {
+ b_ = e_ = z_ = 0;
+ return;
+ }
+
+ auto const nBytes = goodMallocSize(n * sizeof(T));
+ b_ = static_cast<T*>(checkedMalloc(nBytes));
+ fbvector_detail::uninitializedFillDefaultOrFree(b_, n);
+ e_ = b_ + n;
+ z_ = b_ + nBytes / sizeof(T);
+ }
+
+ fbvector(const size_type n, const T& value) {
+ if (!n) {
+ b_ = e_ = z_ = 0;
+ return;
+ }
+
+ auto const nBytes = goodMallocSize(n * sizeof(T));
+ b_ = static_cast<T*>(checkedMalloc(nBytes));
+ fbvector_detail::uninitializedFillOrFree(b_, n, value);
+ e_ = b_ + n;
+ z_ = b_ + nBytes / sizeof(T);
+ }
+
+ fbvector(const size_type n, const T& value, const Allocator&) {
+ new(this) fbvector(n, value);
+ }
+
+ template <class InputIteratorOrNum>
+ fbvector(InputIteratorOrNum first, InputIteratorOrNum last) {
+ new(this) fbvector;
+ assign(first, last);
+ }
+
+ template <class InputIterator>
+ fbvector(InputIterator first, InputIterator last,
+ const Allocator&) {
+ new(this) fbvector(first, last);
+ }
+
+ fbvector(const fbvector& rhs) {
+ new(this) fbvector(rhs.begin(), rhs.end());
+ }
+ fbvector(const fbvector& rhs, const Allocator&) {
+ new(this) fbvector(rhs);
+ }
+
+ fbvector(fbvector&& o, const Allocator& = Allocator())
+ : b_(o.b_)
+ , e_(o.e_)
+ , z_(o.z_)
+ {
+ o.b_ = o.e_ = o.z_ = 0;
+ }
+
+ fbvector(std::initializer_list<T> il, const Allocator& = Allocator()) {
+ new(this) fbvector(il.begin(), il.end());
+ }
+
+ ~fbvector() {
+ // fbvector only works with relocatable objects. We insert this
+ // static check inside the destructor because pretty much any
+ // instantiation of fbvector<T> will generate the destructor (and
+ // therefore refuse compilation if the assertion fails). To see
+ // how you can enable IsRelocatable for your type, refer to the
+ // definition of IsRelocatable in Traits.h.
+ BOOST_STATIC_ASSERT(IsRelocatable<T>::value);
+ if (!b_) return;
+ fbvector_detail::destroyRange(b_, e_);
+ free(b_);
+ }
+ fbvector& operator=(const fbvector& rhs) {
+ assign(rhs.begin(), rhs.end());
+ return *this;
+ }
+
+ fbvector& operator=(fbvector&& v) {
+ clear();
+ swap(v);
+ return *this;
+ }
+
+ fbvector& operator=(std::initializer_list<T> il) {
+ assign(il.begin(), il.end());
+ return *this;
+ }
+
+ bool operator==(const fbvector& rhs) const {
+ return size() == rhs.size() && std::equal(begin(), end(), rhs.begin());
+ }
+
+ bool operator<(const fbvector& rhs) const {
+ return std::lexicographical_compare(begin(), end(),
+ rhs.begin(), rhs.end());
+ }
+
+private:
+ template <class InputIterator>
+ void assignImpl(InputIterator first, InputIterator last, boost::false_type) {
+ // Pair of iterators
+ if (fbvector_detail::isForwardIterator<InputIterator>::value) {
+ auto const oldSize = size();
+ auto const newSize = std::distance(first, last);
+
+ if (static_cast<difference_type>(oldSize) >= newSize) {
+ // No reallocation, nice
+ auto const newEnd = std::copy(first, last, b_);
+ fbvector_detail::destroyRange(newEnd, e_);
+ e_ = newEnd;
+ return;
+ }
+
+ // Must reallocate - just do it on the side
+ auto const nBytes = goodMallocSize(newSize * sizeof(T));
+ auto const b = static_cast<T*>(checkedMalloc(nBytes));
+ std::uninitialized_copy(first, last, b);
+ this->fbvector::~fbvector();
+ b_ = b;
+ e_ = b + newSize;
+ z_ = b_ + nBytes / sizeof(T);
+ } else {
+ // Input iterator sucks
+ FOR_EACH (i, *this) {
+ if (first == last) {
+ fbvector_detail::destroyRange(i, e_);
+ e_ = i;
+ return;
+ }
+ *i = *first;
+ ++first;
+ }
+ FOR_EACH_RANGE (i, first, last) {
+ push_back(*i);
+ }
+ }
+ }
+
+ void assignImpl(const size_type newSize, const T value, boost::true_type) {
+ // Arithmetic type, forward back to unambiguous definition
+ assign(newSize, value);
+ }
+
+public:
+ // Classic ambiguity (and a lot of unnecessary complexity) in
+ // std::vector: assign(10, 20) for vector<int> means "assign 10
+ // elements all having the value 20" but is intercepted by the
+ // two-iterators overload assign(first, last). So we need to
+ // disambiguate here. There is no pretty solution. We use here
+ // overloading based on is_arithmetic. Method insert has the same
+ // issue (and the same solution in this implementation).
+ template <class InputIteratorOrNum>
+ void assign(InputIteratorOrNum first, InputIteratorOrNum last) {
+ assignImpl(first, last, boost::is_arithmetic<InputIteratorOrNum>());
+ }
+
+ void assign(const size_type newSize, const T& value) {
+ if (b_ <= &value && &value < e_) {
+ // Need to check for aliased assign, sigh
+ return assign(newSize, T(value));
+ }
+
+ auto const oldSize = size();
+ if (oldSize >= newSize) {
+ // No reallocation, nice
+ auto const newEnd = b_ + newSize;
+ fbvector_detail::destroyRange(newEnd, e_);
+ e_ = newEnd;
+ return;
+ }
+
+ // Need to reallocate
+ if (reserve_in_place(newSize)) {
+ // Careful here, fill and uninitialized_fill may throw. The
+ // latter is transactional, so no need to worry about a
+ // buffer partially filled in case of exception.
+ std::fill(b_, e_, value);
+ auto const newEnd = b_ + newSize;
+ std::uninitialized_fill(e_, newEnd, value);
+ e_ = newEnd;
+ return;
+ }
+
+ // Cannot expand or jemalloc not present at all; must just
+ // allocate a new chunk and discard the old one. This is
+ // tantamount with creating a new fbvector altogether. This won't
+ // recurse infinitely; the constructor implements its own.
+ fbvector temp(newSize, value);
+ temp.swap(*this);
+ }
+
+ void assign(std::initializer_list<T> il) {
+ assign(il.begin(), il.end());
+ }
+
+ allocator_type get_allocator() const {
+ // whatevs
+ return allocator_type();
+ }
+
+// iterators:
+ iterator begin() {
+ return b_;
+ }
+ const_iterator begin() const {
+ return b_;
+ }
+ iterator end() {
+ return e_;
+ }
+ const_iterator end() const {
+ return e_;
+ }
+ reverse_iterator rbegin() {
+ return reverse_iterator(end());
+ }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+ reverse_iterator rend() {
+ return reverse_iterator(begin());
+ }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+ const_iterator cbegin() const {
+ return b_;
+ }
+ const_iterator cend() const {
+ return e_;
+ }
+
+// 23.3.6.2 capacity:
+ size_type size() const {
+ return e_ - b_;
+ }
+
+ size_type max_size() {
+ // good luck gettin' there
+ return ~size_type(0);
+ }
+
+ void resize(const size_type sz) {
+ auto const oldSize = size();
+ if (sz <= oldSize) {
+ auto const newEnd = b_ + sz;
+ fbvector_detail::destroyRange(newEnd, e_);
+ e_ = newEnd;
+ } else {
+ // Must expand
+ reserve(sz);
+ auto newEnd = b_ + sz;
+ std::uninitialized_fill(e_, newEnd, T());
+ e_ = newEnd;
+ }
+ }
+
+ void resize(const size_type sz, const T& c) {
+ auto const oldSize = size();
+ if (sz <= oldSize) {
+ auto const newEnd = b_ + sz;
+ fbvector_detail::destroyRange(newEnd, e_);
+ e_ = newEnd;
+ } else {
+ // Must expand
+ reserve(sz);
+ auto newEnd = b_ + sz;
+ std::uninitialized_fill(e_, newEnd, c);
+ e_ = newEnd;
+ }
+ }
+
+ size_type capacity() const {
+ return z_ - b_;
+ }
+ bool empty() const {
+ return b_ == e_;
+ }
+
+private:
+ bool reserve_in_place(const size_type n) {
+ auto const crtCapacity = capacity();
+ if (n <= crtCapacity) return true;
+ if (!rallocm) return false;
+
+ // using jemalloc's API. Don't forget that jemalloc can never grow
+ // in place blocks smaller than 4096 bytes.
+ auto const crtCapacityBytes = crtCapacity * sizeof(T);
+ if (crtCapacityBytes < jemallocMinInPlaceExpandable) return false;
+
+ auto const newCapacityBytes = goodMallocSize(n * sizeof(T));
+ void* p = b_;
+ if (rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
+ != ALLOCM_SUCCESS) {
+ return false;
+ }
+
+ // Managed to expand in place, reflect that in z_
+ assert(b_ == p);
+ z_ = b_ + newCapacityBytes / sizeof(T);
+ return true;
+ }
+
+ void reserve_with_move(const size_type n) {
+ // Here we can be sure we'll need to do a full reallocation
+ auto const crtCapacity = capacity();
+ assert(crtCapacity < n); // reserve_in_place should have taken
+ // care of this
+ auto const newCapacityBytes = goodMallocSize(n * sizeof(T));
+ auto b = static_cast<T*>(checkedMalloc(newCapacityBytes));
+ auto const oldSize = size();
+ memcpy(b, b_, oldSize * sizeof(T));
+ // Done with the old chunk. Free but don't call destructors!
+ free(b_);
+ b_ = b;
+ e_ = b_ + oldSize;
+ z_ = b_ + newCapacityBytes / sizeof(T);
+ // done with the old chunk
+ }
+
+public:
+ void reserve(const size_type n) {
+ if (reserve_in_place(n)) return;
+ reserve_with_move(n);
+ }
+
+ void shrink_to_fit() {
+ if (!rallocm) return;
+
+ // using jemalloc's API. Don't forget that jemalloc can never
+ // shrink in place blocks smaller than 4096 bytes.
+ void* p = b_;
+ auto const crtCapacityBytes = capacity() * sizeof(T);
+ auto const newCapacityBytes = goodMallocSize(size() * sizeof(T));
+ if (crtCapacityBytes >= jemallocMinInPlaceExpandable &&
+ rallocm(&p, NULL, newCapacityBytes, 0, ALLOCM_NO_MOVE)
+ == ALLOCM_SUCCESS) {
+ // Celebrate
+ z_ = b_ + newCapacityBytes / sizeof(T);
+ }
+ }
+
+// element access
+ reference operator[](size_type n) {
+ assert(n < size());
+ return b_[n];
+ }
+ const_reference operator[](size_type n) const {
+ assert(n < size());
+ return b_[n];
+ }
+ const_reference at(size_type n) const {
+ if (n > size()) {
+ throw std::out_of_range("fbvector: index is greater than size.");
+ }
+ return (*this)[n];
+ }
+ reference at(size_type n) {
+ auto const& cThis = *this;
+ return const_cast<reference>(cThis.at(n));
+ }
+ reference front() {
+ assert(!empty());
+ return *b_;
+ }
+ const_reference front() const {
+ assert(!empty());
+ return *b_;
+ }
+ reference back() {
+ assert(!empty());
+ return e_[-1];
+ }
+ const_reference back() const {
+ assert(!empty());
+ return e_[-1];
+ }
+
+// 23.3.6.3 data access
+ T* data() {
+ return b_;
+ }
+ const T* data() const {
+ return b_;
+ }
+
+private:
+ size_t computePushBackCapacity() const {
+ return empty() ? std::max(64 / sizeof(T), size_t(1))
+ : capacity() < jemallocMinInPlaceExpandable ? capacity() * 2
+ : (capacity() * 3) / 2;
+ }
+
+public:
+// 23.3.6.4 modifiers:
+ template <class... Args>
+ void emplace_back(Args&&... args) {
+ if (e_ == z_) {
+ if (!reserve_in_place(size() + 1)) {
+ reserve_with_move(computePushBackCapacity());
+ }
+ }
+ new (e_) T(std::forward<Args>(args)...);
+ ++e_;
+ }
+
+ void push_back(T x) {
+ if (e_ == z_) {
+ if (!reserve_in_place(size() + 1)) {
+ reserve_with_move(computePushBackCapacity());
+ }
+ }
+ fbvector_detail::uninitialized_destructive_move(x, e_);
+ ++e_;
+ }
+
+private:
+ bool expand() {
+ if (!rallocm) return false;
+ auto const capBytes = capacity() * sizeof(T);
+ if (capBytes < jemallocMinInPlaceExpandable) return false;
+ auto const newCapBytes = goodMallocSize(capBytes + sizeof(T));
+ void * bv = b_;
+ if (rallocm(&bv, NULL, newCapBytes, 0, ALLOCM_NO_MOVE) != ALLOCM_SUCCESS) {
+ return false;
+ }
+ // Managed to expand in place
+ assert(bv == b_); // nothing moved
+ z_ = b_ + newCapBytes / sizeof(T);
+ assert(capacity() > capBytes / sizeof(T));
+ return true;
+ }
+
+public:
+ void pop_back() {
+ assert(!empty());
+ --e_;
+ if (!boost::has_trivial_destructor<T>::value) {
+ e_->T::~T();
+ }
+ }
+ // template <class... Args>
+ // iterator emplace(const_iterator position, Args&&... args);
+
+ iterator insert(const_iterator position, T x) {
+ size_t newSize; // intentionally uninitialized
+ if (e_ == z_ && !reserve_in_place(newSize = size() + 1)) {
+ // Can't reserve in place, make a copy
+ auto const offset = position - cbegin();
+ fbvector tmp;
+ tmp.reserve(newSize);
+ memcpy(tmp.b_, b_, offset * sizeof(T));
+ fbvector_detail::uninitialized_destructive_move(
+ x,
+ tmp.b_ + offset);
+ memcpy(tmp.b_ + offset + 1, b_ + offset, (size() - offset) * sizeof(T));
+ // Brutally reassign this to refer to tmp's guts
+ free(b_);
+ b_ = tmp.b_;
+ e_ = b_ + newSize;
+ z_ = tmp.z_;
+ // get rid of tmp's guts
+ new(&tmp) fbvector;
+ return begin() + offset;
+ }
+ // Here we have enough room
+ memmove(const_cast<T*>(&*position) + 1,
+ const_cast<T*>(&*position),
+ sizeof(T) * (e_ - position));
+ fbvector_detail::uninitialized_destructive_move(
+ x,
+ const_cast<T*>(&*position));
+ ++e_;
+ return const_cast<iterator>(position);
+ }
+
+ iterator insert(const_iterator position, const size_type n, const T& x) {
+ if (e_ + n >= z_) {
+ if (b_ <= &x && &x < e_) {
+ // Ew, aliased insert
+ auto copy = x;
+ return insert(position, n, copy);
+ }
+ auto const m = position - b_;
+ reserve(size() + n);
+ position = b_ + m;
+ }
+ memmove(const_cast<T*>(position) + n,
+ position,
+ sizeof(T) * (e_ - position));
+ if (boost::has_trivial_copy<T>::value) {
+ std::uninitialized_fill(const_cast<T*>(position),
+ const_cast<T*>(position) + n,
+ x);
+ } else {
+ try {
+ std::uninitialized_fill(const_cast<T*>(position),
+ const_cast<T*>(position) + n,
+ x);
+ } catch (...) {
+ // Oops, put things back where they were
+ memmove(const_cast<T*>(position),
+ position + n,
+ sizeof(T) * (e_ - position));
+ throw;
+ }
+ }
+ e_ += n;
+ return const_cast<iterator>(position);
+ }
+
+private:
+ template <class InputIterator>
+ iterator insertImpl(const_iterator position,
+ InputIterator first, InputIterator last,
+ boost::false_type) {
+ // Pair of iterators
+ if (fbvector_detail::isForwardIterator<InputIterator>::value) {
+ // Can compute distance
+ auto const n = std::distance(first, last);
+ if (e_ + n >= z_) {
+ auto const m = position - b_;
+ reserve(size() + n);
+ position = b_ + m;
+ }
+ memmove(const_cast<T*>(position) + n,
+ position,
+ sizeof(T) * (e_ - position));
+ try {
+ std::uninitialized_copy(first, last,
+ const_cast<T*>(position));
+ } catch (...) {
+ // Oops, put things back where they were
+ memmove(const_cast<T*>(position),
+ position + n,
+ sizeof(T) * (e_ - position));
+ throw;
+ }
+ e_ += n;
+ return const_cast<iterator>(position);
+ } else {
+ // Cannot compute distance, crappy approach
+ // TODO: OPTIMIZE
+ fbvector result(cbegin(), position);
+ auto const offset = result.size();
+ FOR_EACH_RANGE (i, first, last) {
+ result.push_back(*i);
+ }
+ result.insert(result.end(), position, cend());
+ result.swap(*this);
+ return begin() + offset;
+ }
+ }
+
+ iterator insertImpl(const_iterator position,
+ const size_type count, const T value, boost::true_type) {
+ // Forward back to unambiguous function
+ return insert(position, count, value);
+ }
+
+public:
+ template <class InputIteratorOrNum>
+ iterator insert(const_iterator position, InputIteratorOrNum first,
+ InputIteratorOrNum last) {
+ return insertImpl(position, first, last,
+ boost::is_arithmetic<InputIteratorOrNum>());
+ }
+
+ iterator insert(const_iterator position, std::initializer_list<T> il) {
+ return insert(position, il.begin(), il.end());
+ }
+
+ iterator erase(const_iterator position) {
+ if (position == e_) return e_;
+ auto p = const_cast<T*>(position);
+ (*p).T::~T();
+ memmove(p, p + 1, sizeof(T) * (e_ - p - 1));
+ --e_;
+ return p;
+ }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ assert(first <= last);
+ auto p1 = const_cast<T*>(first);
+ auto p2 = const_cast<T*>(last);
+ fbvector_detail::destroyRange(p1, p2);
+ memmove(p1, last, sizeof(T) * (e_ - last));
+ e_ -= last - first;
+ return p1;
+ }
+
+ void swap(fbvector& rhs) {
+ std::swap(b_, rhs.b_);
+ std::swap(e_, rhs.e_);
+ std::swap(z_, rhs.z_);
+ }
+
+ void clear() {
+ fbvector_detail::destroyRange(b_, e_);
+ e_ = b_;
+ }
+
+private:
+ // Data
+ T *b_, *e_, *z_;
+};
+
+template <class T, class A>
+bool operator!=(const fbvector<T, A>& lhs,
+ const fbvector<T, A>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <class T, class A>
+void swap(fbvector<T, A>& lhs, fbvector<T, A>& rhs) {
+ lhs.swap(rhs);
+}
+
+/**
+ * Resizes *v to exactly n elements. May reallocate the vector to a
+ * smaller buffer if too much space will be left unused.
+ */
+template <class T>
+static void compactResize(folly::fbvector<T> * v, size_t size) {
+ auto const oldCap = v->capacity();
+ if (oldCap > size + 1024 && size < oldCap * 0.3) {
+ // Too much slack memory, reallocate a smaller buffer
+ auto const oldSize = v->size();
+ if (size <= oldSize) {
+ // Shrink
+ folly::fbvector<T>(v->begin(), v->begin() + size).swap(*v);
+ } else {
+ // Expand
+ folly::fbvector<T> temp;
+ temp.reserve(size);
+ copy(v->begin(), v->end(), back_inserter(temp));
+ temp.resize(size);
+ temp.swap(*v);
+ }
+ } else {
+ // Nolo contendere
+ v->resize(size);
+ }
+}
+
+} // namespace folly
+
+#endif // FOLLY_FBVECTOR_H_
--- /dev/null
+/*
+ * Copyright 2012 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// @author Nicholas Ormrod <njormrod@fb.com>
+
+/*
+
+This file contains an extensive STL compliance test suite for an STL vector
+implementation (such as FBVector).
+
+GCC 4.7 is required.
+
+*/
+
+// only compile if GCC is at least 4.7
+#if __GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 7
+
+#if 0
+#define USING_STD_VECTOR
+#endif
+
+/*
+
+The insanity of this file deserves a superficial explanation.
+
+This file tests an implementation of STL vector. It is extremely comprehensive.
+If it compiles (more on that later) it generates a binary which, when run,
+exhaustively tests its vector for standard compliance.
+
+Limitations:
+-If it doesn't compile, the compiler errors are mind-boggling.
+-Not everything is testable. There are a few comments in the code where
+ the implementation must be inspected, as opposed to tested. These are very
+ simple inspections. Search for 'whitebox'.
+-It does not test boolean specialization.
+
+==========================
+How this file is organized
+
+--------------
+Data and Alloc
+
+Data is a class designed to provide diagnostics when stored in a vector. It
+counts the number of operations performed on it, can have any function
+disabled or labeled as noexcept, throws errors from anywhere that is not
+noexcept, tracks its supposed location in memory (optional), tracks
+aggregate information, and can print a trace of its action.
+
+Alloc, like Data, is a full-blown diagnostic allocator. It keeps track of
+all space it has allocated, keeps counters, throws exceptions, and can easily
+compare equal or not equal with other Allocs.
+
+These two classes have a few useful helper functions:
+isSane - checks that all the tracked variables make sense
+softReset - simplifies the variables before a test
+hardReset - brutally resets all variables to the default state
+
+--------
+STL_TEST
+
+Google test is not quite good enough for this test file, because we need to
+run tests across different input values and different types.
+
+The STL_TEST macro takes a few arguments:
+string - what is being tested
+id - unique id, passed to TEST
+restriction - requirements for test types
+parameters - which variables to range over
+
+Eg: STL_TEST("23.2.3", isCopyable, is_copy_constructible, a) { ... }
+
+The restriction is used to select which types get tested. Copy construction,
+for example, requires a data type which is copy constructible, whereas to test
+the clear operation, the data only needs to be destructible. If the type does
+not pass the restriction, then the test is not instantiated with that type (if
+it were, then there would be a compiler error).
+
+The variable names in the standard have very specific meaning. For example,
+a and b are always vectors, i and j are always external iterators, etc. These
+bindings are used in the STL_TEST - if you need a vector and an int, have
+parameters a and n.
+
+There is a list (BOOST_PP_SEQ) of test types and interface types. If the
+type passes the restriction, then the test body is instantiated with that
+type as its template parameter. Instantiation ensures that the contractual
+elements of the standard are satisfied. Only the test types, however, and
+not the interfact types, are actually tested.
+
+If a test type passes the restriction, then it is run with a variety of
+arguments. Each variable (e.g. a and b) have a generator, which generates
+a range of values for that variable before each test. Generated values are not
+reused - they are remade for every run. This causes long runtimes, but ensures
+that corner cases are not missed.
+
+There are two implicit extra parameters, z and ticks. Ignore z. Ticks, on the
+other hand, is very important. Each is test is run multiple times with the
+same arguments; the first time with no ticks (and hence no Data or Alloc
+exceptions), and then once again for each and every location that an
+exception can be thrown. This ensures that exception corner cases are alse
+thoroughly tested.
+
+At the end of each test, a set of verification functions is run to ensure
+that nothing was corrupted.
+
+---------
+The tests
+
+All specifications from N3337 Chapter 23 (Containers) that pertains to
+vector is tested (if possible). Each aspect has a dedicated STL_TEST, so that
+there are no compounding errors. The tests are organized as they appear in
+N3337.
+
+The backbone of the testing framework is based on a small set of vector
+operations:
+-empty construction
+-copy construction (a little bit)
+-size
+-capacity
+-data
+-emplace_back
+
+These functions are used to generate and verify the tests. If they fail, then
+the cascade of errors will be enormous. They are, therefore, tested first.
+
+*/
+/*
+
+THOUGHTS:
+
+-Not all complexity checks are verified. These will be relentlessly hunted down
+ in the benchmarking phase.
+
+-It seems that initializer lists with implicit arguments are constructed before
+ being passed into the vector. When one of the constructors fails, it fails in
+ the initializer list, before it even gets to the vector. The IL, however,
+ doesn't clean up properly, and already-constructed elements are not
+ destroyed. This causes a memory leak, and the tests break, but it is not the
+ fault of the vector itself. Further, since the implementation for the
+ initializer lists is specified in the standard as calling an associated
+ function with (il.begin(), il.end()), we really just have to check the throws
+ cases for the associated functions (which all work fine). Initializer lists
+ also do not work with explicit constructors.
+
+-The implementation of std::copy from iterators prevents Data(int) from being
+ explicit. Explicitness is, perhaps, a desirable quality, but with fundamental
+ std library code like copy not supporting it, it seems impractical.
+
+*/
+
+// include the vector first, to ensure its header is self-sufficient
+#ifdef USING_STD_VECTOR
+#include <vector>
+#define VECTOR_ std::vector
+#else
+#define FOLLY_BENCHMARK_USE_NS_IFOLLY
+#include "folly/FBVector.h"
+#define VECTOR_ Ifolly::fbvector
+#endif
+
+//#define USING_STD_VECTOR
+
+#include <iostream>
+#include <sstream>
+#include <typeinfo>
+#include <type_traits>
+#include <map>
+#include <set>
+#include <string>
+#include <stdexcept>
+#include <exception>
+#include <climits>
+#include <cstddef>
+#include <iomanip>
+
+#include "folly/ScopeGuard.h"
+#include "folly/Conv.h"
+#include "boost/preprocessor.hpp"
+#include "boost/iterator/iterator_adaptor.hpp"
+#include <gflags/gflags.h>
+#include <gtest/gtest.h>
+
+using namespace std;
+using namespace folly;
+namespace Ifolly {}
+using namespace Ifolly;
+
+//=============================================================================
+//=============================================================================
+// Data type
+
+//-----------------------------------------------------------------------------
+// Flags
+
+typedef uint32_t Flags;
+
+// each method has 3 options: normal, noexcept, throw, and deleted
+// normal is the default
+// throw is mutually exclusive with noexcept
+//
+// DC - default constructor
+// CC - copy constructor
+// MC - move constructor
+// OC - other constructor
+// CA - copy assignment
+// MA - move assignment
+enum FlagVals : Flags {
+ DC_NOEXCEPT = 0x1,
+ DC_THROW = 0x2,
+ DC_DELETE = 0x8000,
+ CC_NOEXCEPT = 0x4,
+ CC_THROW = 0x8,
+ CC_DELETE = 0x10000,
+ MC_NOEXCEPT = 0x10,
+ MC_THROW = 0x20,
+ MC_DELETE = 0x20000,
+ OC_NOEXCEPT = 0x40,
+ OC_THROW = 0x80,
+ // OC_DELETE - DNE
+
+ CA_NOEXCEPT = 0x100,
+ CA_THROW = 0x200,
+ CA_DELETE = 0x40000,
+ MA_NOEXCEPT = 0x400,
+ MA_THROW = 0x800,
+ MA_DELETE = 0x80000,
+
+ ALL_DELETE = DC_DELETE | CC_DELETE | MC_DELETE
+ | CA_DELETE | MA_DELETE,
+
+ IS_RELOCATABLE
+ = 0x2000,
+
+ // for the allocator
+ PROP_COPY = 0x100000,
+ PROP_MOVE = 0x200000,
+ PROP_SWAP = 0x400000,
+};
+
+//-----------------------------------------------------------------------------
+// Deletors
+
+template <bool b> struct D0 {
+ D0() = default;
+ D0(const D0&) = default;
+ D0(D0&&) = default;
+ explicit D0(std::nullptr_t) {}
+ D0& operator=(const D0&) = default;
+ D0& operator=(D0&&) = default;
+};
+template <> struct D0<true> {
+ D0() = delete;
+ D0(const D0&) = default;
+ D0(D0&&) = default;
+ explicit D0(std::nullptr_t) {}
+ D0& operator=(const D0&) = default;
+ D0& operator=(D0&&) = default;
+};
+
+template <bool b> struct D1 {
+ D1() = default;
+ D1(const D1&) = default;
+ D1(D1&&) = default;
+ explicit D1(std::nullptr_t) {}
+ D1& operator=(const D1&) = default;
+ D1& operator=(D1&&) = default;
+};
+template <> struct D1<true> {
+ D1() = default;
+ D1(const D1&) = delete;
+ D1(D1&&) = default;
+ explicit D1(std::nullptr_t) {}
+ D1& operator=(const D1&) = default;
+ D1& operator=(D1&&) = default;
+};
+
+template <bool b> struct D2 {
+ D2() = default;
+ D2(const D2&) = default;
+ D2(D2&&) = default;
+ explicit D2(std::nullptr_t) {}
+ D2& operator=(const D2&) = default;
+ D2& operator=(D2&&) = default;
+};
+template <> struct D2<true> {
+ D2() = default;
+ D2(const D2&) = default;
+ D2(D2&&) = delete;
+ explicit D2(std::nullptr_t) {}
+ D2& operator=(const D2&) = default;
+ D2& operator=(D2&&) = default;
+};
+
+template <bool b> struct D3 {
+ D3() = default;
+ D3(const D3&) = default;
+ D3(D3&&) = default;
+ explicit D3(std::nullptr_t) {}
+ D3& operator=(const D3&) = default;
+ D3& operator=(D3&&) = default;
+};
+template <> struct D3<true> {
+ D3() = default;
+ D3(const D3&) = default;
+ D3(D3&&) = default;
+ explicit D3(std::nullptr_t) {}
+ D3& operator=(const D3&) = delete;
+ D3& operator=(D3&&) = default;
+};
+
+template <bool b> struct D4 {
+ D4() = default;
+ D4(const D4&) = default;
+ D4(D4&&) = default;
+ explicit D4(std::nullptr_t) {}
+ D4& operator=(const D4&) = default;
+ D4& operator=(D4&&) = default;
+};
+template <> struct D4<true> {
+ D4() = default;
+ D4(const D4&) = default;
+ D4(D4&&) = default;
+ explicit D4(std::nullptr_t) {}
+ D4& operator=(const D4&) = default;
+ D4& operator=(D4&&) = delete;
+};
+
+template <Flags f>
+struct Delete : D0<f & DC_DELETE>
+ , D1<f & CC_DELETE>
+ , D2<f & MC_DELETE>
+ , D3<f & CA_DELETE>
+ , D4<f & MA_DELETE> {
+ Delete() = default;
+ Delete(const Delete&) = default;
+ Delete(Delete&&) = default;
+ Delete& operator=(const Delete&) = default;
+ Delete& operator=(Delete&&) = default;
+
+ explicit Delete(std::nullptr_t)
+ : D0<f & DC_DELETE>(nullptr)
+ , D1<f & CC_DELETE>(nullptr)
+ , D2<f & MC_DELETE>(nullptr)
+ , D3<f & CA_DELETE>(nullptr)
+ , D4<f & MA_DELETE>(nullptr)
+ {}
+};
+
+//-----------------------------------------------------------------------------
+// Ticker
+
+struct TickException : std::runtime_error {
+ explicit TickException(const std::string& s)
+ : std::runtime_error("tick: " + s) {}
+};
+
+struct Ticker {
+ static int CountTicks;
+ static int TicksLeft;
+ static void Tick(const std::string& s) {
+ if (TicksLeft == 0) throw TickException(s);
+ CountTicks++;
+ TicksLeft--;
+ }
+};
+
+int Ticker::CountTicks = 0;
+int Ticker::TicksLeft = -1;
+
+template <Flags f>
+struct DataTicker : Ticker {
+ DataTicker() noexcept(f & DC_NOEXCEPT) {
+ if (!(f & DC_NOEXCEPT)) Tick("Data()");
+ }
+ DataTicker(const DataTicker&) noexcept(f & CC_NOEXCEPT) {
+ if (!(f & CC_NOEXCEPT)) Tick("Data(const Data&)");
+ }
+ DataTicker(DataTicker&&) noexcept(f & MC_NOEXCEPT) {
+ if (!(f & MC_NOEXCEPT)) Tick("Data(Data&&)");
+ }
+ explicit DataTicker(std::nullptr_t) noexcept(f & OC_NOEXCEPT) {
+ if (!(f & OC_NOEXCEPT)) Tick("Data(int)");
+ }
+ ~DataTicker() noexcept {}
+ void operator=(const DataTicker&) noexcept(f & CA_NOEXCEPT) {
+ if (!(f & CA_NOEXCEPT)) Tick("op=(const Data&)");
+ }
+ void operator=(DataTicker&&) noexcept(f & MA_NOEXCEPT) {
+ if (!(f & MA_NOEXCEPT)) Tick("op=(Data&&)");
+ }
+};
+
+//-----------------------------------------------------------------------------
+// Operation counter
+
+struct Counter {
+ static int CountDC, CountCC, CountMC, CountOC, CountCA, CountMA;
+ static int CountDestroy, CountTotalOps, CountLoggedConstruction;
+
+ Counter() noexcept { CountTotalOps++; CountDC++; }
+ Counter(const Counter&) noexcept { CountTotalOps++; CountCC++; }
+ Counter(Counter&&) noexcept { CountTotalOps++; CountMC++; }
+ explicit Counter(std::nullptr_t) noexcept { CountTotalOps++; CountOC++; }
+ void operator=(const Counter&) noexcept { CountTotalOps++; CountCA++; }
+ void operator=(Counter&&) noexcept { CountTotalOps++; CountMA++; }
+ ~Counter() noexcept { CountTotalOps++; CountDestroy++; }
+};
+
+int Counter::CountDC = 0;
+int Counter::CountCC = 0;
+int Counter::CountMC = 0;
+int Counter::CountOC = 0;
+int Counter::CountCA = 0;
+int Counter::CountMA = 0;
+int Counter::CountDestroy = 0;
+int Counter::CountTotalOps = 0;
+int Counter::CountLoggedConstruction = 0;
+
+//-----------------------------------------------------------------------------
+// Tracker
+
+struct Tracker {
+ static int UID;
+ static std::map<int, int> UIDCount;
+ static int UIDTotal;
+ static std::map<const Tracker*, int> Locations;
+ static bool Print;
+
+ Tracker* self;
+ int uid;
+
+ Tracker(Tracker* self, int uid) : self(self), uid(uid) {}
+};
+
+template <bool isRelocatable>
+struct DataTracker : Tracker {
+ DataTracker() noexcept : Tracker(this, UID++) {
+ UIDCount[uid]++;
+ UIDTotal++;
+ if (!isRelocatable) Locations[self] = uid;
+ print("Data()");
+ }
+ DataTracker(const DataTracker& o) noexcept : Tracker(this, o.uid) {
+ UIDCount[uid]++;
+ UIDTotal++;
+ if (!isRelocatable) Locations[self] = uid;
+ print("Data(const Data&)");
+ }
+ DataTracker(DataTracker&& o) noexcept : Tracker(this, o.uid) {
+ UIDCount[uid]++;
+ UIDTotal++;
+ if (!isRelocatable) Locations[self] = uid;
+ print("Data(Data&&)");
+ }
+
+ explicit DataTracker(int uid) noexcept : Tracker(this, uid) {
+ UIDCount[uid]++;
+ UIDTotal++;
+ if (!isRelocatable) Locations[self] = uid;
+ print("Data(int)");
+ }
+
+ ~DataTracker() noexcept {
+ UIDCount[uid]--;
+ UIDTotal--;
+ if (!isRelocatable) Locations.erase(self);
+ print("~Data()");
+ uid = 0xdeadbeef;
+ self = (DataTracker*)0xfeebdaed;
+ }
+
+ DataTracker& operator=(const DataTracker& o) noexcept {
+ UIDCount[uid]--;
+ uid = o.uid;
+ UIDCount[uid]++;
+ if (!isRelocatable) Locations[self] = uid;
+ print("op=(const Data&)");
+ return *this;
+ }
+ DataTracker& operator=(DataTracker&& o) noexcept {
+ UIDCount[uid]--;
+ uid = o.uid;
+ UIDCount[uid]++;
+ if (!isRelocatable) Locations[self] = uid;
+ print("op=(Data&&)");
+ return *this;
+ }
+
+ void print(const std::string& fun) {
+ if (Print) {
+ std::cerr << std::setw(20) << fun << ": uid = " << std::setw(3) << uid;
+ if (!isRelocatable) std::cerr << ", self = " << self;
+ std::cerr << std::endl;
+ }
+ }
+};
+
+int Tracker::UID = 1234;
+std::map<int, int> Tracker::UIDCount;
+int Tracker::UIDTotal = 0;
+std::map<const Tracker*, int> Tracker::Locations;
+bool Tracker::Print = false;
+
+//-----------------------------------------------------------------------------
+//-----------------------------------------------------------------------------
+// Data
+
+template <Flags f = 0, size_t pad = 0>
+struct Data : DataTracker<f & IS_RELOCATABLE>,
+ Counter, DataTicker<f>, Delete<f> {
+ static const Flags flags = f;
+ char spacehog[pad ? pad : 1];
+
+ Data() = default;
+ Data(const Data&) = default;
+ Data(Data&&) = default;
+ /* implicit */ Data(int i)
+ : DataTracker<f & IS_RELOCATABLE>(i), Counter()
+ , DataTicker<f>(nullptr)
+ , Delete<f>(nullptr)
+ {}
+ ~Data() = default;
+ Data& operator=(const Data&) = default;
+ Data& operator=(Data&&) = default;
+
+private:
+ int operator&() const;
+};
+
+namespace folly {
+template <Flags f, size_t pad>
+struct IsRelocatable<Data<f, pad>>
+ : std::integral_constant<bool,
+ f & IS_RELOCATABLE
+ > {};
+};
+
+//-----------------------------------------------------------------------------
+//-----------------------------------------------------------------------------
+// Allocator
+
+template <typename T>
+struct isPropCopy : true_type {};
+template <Flags f, size_t pad>
+struct isPropCopy<Data<f, pad>> :
+ std::integral_constant<bool, f & PROP_COPY> {};
+
+template <typename T>
+struct isPropMove : true_type {};
+template <Flags f, size_t pad>
+struct isPropMove<Data<f, pad>> :
+ std::integral_constant<bool, f & PROP_MOVE> {};
+
+template <typename T>
+struct isPropSwap : true_type {};
+template <Flags f, size_t pad>
+struct isPropSwap<Data<f, pad>> :
+ std::integral_constant<bool, f & PROP_SWAP> {};
+
+
+struct AllocTracker {
+ static int Constructed;
+ static int Destroyed;
+ static map<void*, size_t> Allocated;
+ static map<void*, int> Owner;
+};
+int AllocTracker::Constructed = 0;
+int AllocTracker::Destroyed = 0;
+map<void*, size_t> AllocTracker::Allocated;
+map<void*, int> AllocTracker::Owner;
+
+template <class T>
+struct Alloc : AllocTracker, Ticker {
+ typedef typename std::allocator<T>::pointer pointer;
+ typedef typename std::allocator<T>::const_pointer const_pointer;
+ typedef typename std::allocator<T>::size_type size_type;
+ typedef typename std::allocator<T>::value_type value_type;
+
+ //-----
+ // impl
+
+ std::allocator<T> a;
+ int id;
+ explicit Alloc(int i = 8) : a(a), id(i) {}
+ Alloc(const Alloc& o) : a(o.a), id(o.id) {}
+ Alloc(Alloc&& o) : a(move(o.a)), id(o.id) {}
+ Alloc& operator=(const Alloc&) = default;
+ Alloc& operator=(Alloc&&) = default;
+ bool operator==(const Alloc& o) const { return a == o.a && id == o.id; }
+ bool operator!=(const Alloc& o) const { return !(*this == o); }
+
+ //---------
+ // tracking
+
+ pointer allocate(size_type n) {
+ if (n == 0) {
+ cerr << "called allocate(0)" << endl;
+ throw runtime_error("allocate fail");
+ }
+ Tick("allocate");
+ auto p = a.allocate(n);
+ Allocated[p] = n;
+ Owner[p] = id;
+ return p;
+ }
+
+ void deallocate(pointer p, size_type n) {
+ if (p == nullptr) {
+ cerr << "deallocate(nullptr, " << n << ")" << endl;
+ FAIL() << "deallocate failed";
+ }
+ if (Allocated[p] != n) {
+ cerr << "deallocate(" << p << ", " << n << ") invalid: ";
+ if (Allocated[p] == 0) cerr << "never allocated";
+ else if (Allocated[p] == -1) cerr << "already deallocated";
+ else cerr << "wrong number (want " << Allocated[p] << ")";
+ cerr << endl;
+ FAIL() << "deallocate failed";
+ }
+ if (Owner[p] != id) {
+ cerr << "deallocate(" << p << "), where pointer is owned by "
+ << Owner[p] << ", instead of self - " << id << endl;
+ FAIL() << "deallocate failed";
+ }
+ Allocated[p] = -1;
+ a.deallocate(p, n);
+ }
+
+ template <class U, class... Args>
+ void construct(U* p, Args&&... args) {
+ Tick("construct");
+ a.construct(p, std::forward<Args>(args)...);
+ Constructed++;
+ }
+
+ template <class U>
+ void destroy(U* p) {
+ Destroyed++;
+ a.destroy(p);
+ }
+
+ //--------------
+ // container ops
+
+ Alloc select_on_container_copy_construction() const {
+ Tick("select allocator for copy");
+ return Alloc(id + 1);
+ }
+
+ typedef isPropCopy<T> propagate_on_container_copy_assignment;
+ typedef isPropMove<T> propagate_on_container_move_assignment;
+ typedef isPropSwap<T> propagate_on_container_swap;
+};
+
+//=============================================================================
+//=============================================================================
+// Verification and resetting
+
+void softReset(int ticks = -1) {
+ Counter::CountLoggedConstruction +=
+ Counter::CountDC + Counter::CountCC + Counter::CountMC
+ + Counter::CountOC - Counter::CountDestroy;
+ Counter::CountDC = Counter::CountCC = Counter::CountMC
+ = Counter::CountOC = Counter::CountCA = Counter::CountMA = 0;
+ Counter::CountDestroy = Counter::CountTotalOps = 0;
+ Ticker::CountTicks = 0;
+ Ticker::TicksLeft = ticks;
+}
+
+void hardReset() {
+ Tracker::UIDCount.clear();
+ Tracker::UIDTotal = 0;
+ Tracker::Locations.clear();
+ softReset();
+ Counter::CountLoggedConstruction = 0;
+
+ AllocTracker::Constructed = 0;
+ AllocTracker::Destroyed = 0;
+ AllocTracker::Allocated.clear();
+ AllocTracker::Owner.clear();
+}
+
+int getTotal() {
+ int con = Counter::CountDC + Counter::CountCC
+ + Counter::CountMC + Counter::CountOC
+ + Counter::CountLoggedConstruction;
+ int del = Counter::CountDestroy;
+ return con - del;
+}
+
+void isSane() {
+ int tot = getTotal();
+ ASSERT_GE(tot, 0) << "more objects deleted than constructed";
+
+ ASSERT_EQ(tot, Tracker::UIDTotal)
+ << "UIDTotal has incorrect number of objects";
+
+ int altTot = 0;
+ for (const auto& kv : Tracker::UIDCount) {
+ ASSERT_TRUE(kv.second >= 0) << "there exists " << kv.second << " Data "
+ "with uid " << kv.first;
+ altTot += kv.second;
+ }
+ ASSERT_EQ(tot, altTot) << "UIDCount corrupted";
+
+ if (!Tracker::Locations.empty()) { // implied by IsRelocatable
+ ASSERT_EQ(tot, Tracker::Locations.size())
+ << "Locations has incorrect number of objects";
+ for (const auto& du : Tracker::Locations) {
+ ASSERT_EQ(du.second, du.first->uid) << "Locations contains wrong uid";
+ ASSERT_EQ(du.first, du.first->self) << "Data.self is corrupted";
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Traits
+
+template <typename T>
+struct is_copy_constructibleAndAssignable
+ : std::integral_constant<bool,
+ std::is_copy_constructible<T>::value &&
+ std::is_copy_assignable<T>::value
+ > {};
+
+template <typename T>
+struct is_move_constructibleAndAssignable
+ : std::integral_constant<bool,
+ std::is_move_constructible<T>::value &&
+ std::is_move_assignable<T>::value
+ > {};
+
+template <class Vector>
+struct customAllocator
+ : std::integral_constant<bool,
+ !is_same<
+ typename Vector::allocator_type,
+ std::allocator<typename Vector::value_type>
+ >::value
+ > {};
+
+template <typename T>
+struct special_move_assignable
+ : is_move_constructibleAndAssignable<T> {};
+template <Flags f, size_t pad>
+struct special_move_assignable<Data<f, pad>>
+ : std::integral_constant<bool,
+ is_move_constructibleAndAssignable<Data<f, pad>>::value ||
+ f & PROP_MOVE
+ > {};
+
+//=============================================================================
+//=============================================================================
+// Framework
+
+//-----------------------------------------------------------------------------
+// Timing
+
+uint64_t ReadTSC() {
+ unsigned reslo, reshi;
+
+ __asm__ __volatile__ (
+ "xorl %%eax,%%eax \n cpuid \n"
+ ::: "%eax", "%ebx", "%ecx", "%edx");
+ __asm__ __volatile__ (
+ "rdtsc\n"
+ : "=a" (reslo), "=d" (reshi) );
+ __asm__ __volatile__ (
+ "xorl %%eax,%%eax \n cpuid \n"
+ ::: "%eax", "%ebx", "%ecx", "%edx");
+
+ return ((uint64_t)reshi << 32) | reslo;
+}
+
+//-----------------------------------------------------------------------------
+// New Boost
+
+#define IBOOST_PP_VARIADIC_SIZE(...) IBOOST_PP_VARIADIC_SIZE_I(__VA_ARGS__, \
+ 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, \
+ 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, \
+ 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, \
+ 7, 6, 5, 4, 3, 2, 1,)
+#define IBOOST_PP_VARIADIC_SIZE_I(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, \
+ e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, \
+ e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, \
+ e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, \
+ e55, e56, e57, e58, e59, e60, e61, e62, e63, size, ...) size
+#define IBOOST_PP_VARIADIC_TO_SEQ(args...) \
+ BOOST_PP_TUPLE_TO_SEQ(IBOOST_PP_VARIADIC_SIZE(args), (args))
+
+//-----------------------------------------------------------------------------
+// STL_TEST
+
+#define GEN_TEST(r, name, type) \
+ { \
+ string atype = PrettyType<typename type::allocator_type>()(); \
+ string ptype = PrettyType<typename type::value_type>()(); \
+ SCOPED_TRACE("allocator: " + atype); { \
+ SCOPED_TRACE("datatype: " + ptype); { \
+ test_ ## name ## 3 <type> (); \
+ if (::testing::Test::HasFatalFailure()) return; \
+ }}}
+#define GEN_TYPE_TEST(r, name, type) \
+ if (0) test_I_ ## name ## 3 <type> ();
+#define GEN_RUNNABLE_TEST(r, name, type) \
+ one = test_I_ ## name ## 3 <type> () || one;
+
+#define GEN_LOOPER(r, d, arg) BOOST_PP_CAT(LOOPER_, arg)
+#define GEN_VMAKER(r, d, arg) { BOOST_PP_CAT(VMAKER_, arg) {
+#define GEN_UMAKER(r, d, arg) } BOOST_PP_CAT(UMAKER_, arg) }
+#define GEN_CLOSER(r, d, arg) BOOST_PP_CAT(CLOSER_, arg)
+
+#define TYPIFY(r, d, name) BOOST_PP_CAT(TYPIFY_, name)
+#define ARGIFY(r, d, name) TYPIFY(r, d, name) name
+
+#define MAKE_TEST(ref, name, types, restriction, argseq, rawargs...) \
+ template <class Vector> void test_ ## name ## 2 (std::false_type) {} \
+ template <class Vector> void test_ ## name ## 2 (std::true_type) { \
+ BOOST_PP_SEQ_FOR_EACH(GEN_LOOPER, _, argseq) \
+ { SETUP { \
+ BOOST_PP_SEQ_FOR_EACH(GEN_VMAKER, _, argseq) \
+ { \
+ test_ ## name <Vector, typename Vector::value_type, \
+ typename Vector::allocator_type> ( rawargs ); \
+ if (::testing::Test::HasFatalFailure()) return; \
+ } \
+ BOOST_PP_SEQ_FOR_EACH(GEN_UMAKER, _, BOOST_PP_SEQ_REVERSE(argseq)) \
+ } TEARDOWN } \
+ BOOST_PP_SEQ_FOR_EACH(GEN_CLOSER, _, BOOST_PP_SEQ_REVERSE(argseq)) \
+ } \
+ template <class Vector> void test_ ## name ## 3 () { \
+ test_ ## name ## 2 <Vector> (std::integral_constant<bool, \
+ restriction<typename Vector::value_type>::value && \
+ is_copy_constructible<typename Vector::value_type>::value \
+ >()); \
+ } \
+ \
+ template <class Vector> bool test_I_ ## name ## 2 (std::false_type) \
+ { return false; } \
+ template <class Vector> bool test_I_ ## name ## 2 (std::true_type) { \
+ return true; \
+ auto f = test_ ## name <Vector, \
+ typename Vector::value_type, typename Vector::allocator_type>; \
+ return true; \
+ } \
+ template <class Vector> bool test_I_ ## name ## 3 () { \
+ return test_I_ ## name ## 2 <Vector> (std::integral_constant<bool, \
+ restriction<typename Vector::value_type>::value>()); \
+ return false; \
+ } \
+ \
+ TEST(FBVector, name) { \
+ SCOPED_TRACE("N3337 reference: " ref); \
+ BOOST_PP_SEQ_FOR_EACH(GEN_TEST, name, types) \
+ BOOST_PP_SEQ_FOR_EACH(GEN_TYPE_TEST, name, INTERFACE_TYPES) \
+ bool one = false; \
+ BOOST_PP_SEQ_FOR_EACH(GEN_RUNNABLE_TEST, name, types) \
+ if (!one) FAIL() << "No tests qualified to run"; \
+ }
+
+#define DECL(name, args...) \
+ template <class Vector, typename T, typename Allocator> \
+ void test_ ## name (BOOST_PP_SEQ_ENUM(BOOST_PP_SEQ_TRANSFORM( \
+ ARGIFY, _, IBOOST_PP_VARIADIC_TO_SEQ(args))))
+
+#define STL_TEST_I(ref, name, restriction, args...) \
+ DECL(name, args); \
+ MAKE_TEST(ref, name, TEST_TYPES, restriction, \
+ IBOOST_PP_VARIADIC_TO_SEQ(args), args) \
+ DECL(name, args)
+
+#define STL_TEST(ref, name, restriction, args...) \
+ STL_TEST_I(ref, name, restriction, z, ## args, ticks)
+
+//-----------------------------------------------------------------------------
+// Test Types
+
+typedef Data<> ED1;
+typedef Data<0, 4080> ED2;
+typedef Data<MC_NOEXCEPT> ED3;
+typedef Data<MC_NOEXCEPT | CC_DELETE> ED4;
+typedef Data<IS_RELOCATABLE> ED5;
+
+typedef VECTOR_<int, std::allocator<int>> _TVIS;
+typedef VECTOR_<int, Alloc<int>> _TVI;
+typedef VECTOR_<ED1, std::allocator<ED1>> _TV1;
+typedef VECTOR_<ED2, std::allocator<ED2>> _TV2;
+typedef VECTOR_<ED3, std::allocator<ED3>> _TV3;
+typedef VECTOR_<ED4, std::allocator<ED4>> _TV4;
+typedef VECTOR_<ED5, std::allocator<ED5>> _TV5v1;
+typedef VECTOR_<ED5, Alloc<ED5>> _TV5;
+
+typedef Data<PROP_COPY> EP1;
+typedef Data<PROP_MOVE> EP2;
+typedef Data<PROP_SWAP> EP3;
+
+typedef VECTOR_<EP1, Alloc<EP1>> _TP1;
+typedef VECTOR_<EP2, Alloc<EP2>> _TP2;
+typedef VECTOR_<EP3, Alloc<EP3>> _TP3;
+
+#define TEST_TYPES (_TVIS)(_TVI)(_TV1)(_TV2)(_TV3)(_TV4)(_TV5v1)(_TV5) \
+ (_TP1)(_TP2)(_TP3)
+
+typedef Data<ALL_DELETE> DD1; // unoperable
+typedef Data<DC_DELETE | CC_DELETE | MC_DELETE> DD2; // unconstructible
+typedef Data<CA_DELETE | MA_DELETE> DD3; // unassignable
+typedef Data<CC_DELETE | MC_DELETE> DD4; // uncopyable
+typedef Data<ALL_DELETE & ~DC_DELETE> DD5; // only default constructible
+typedef Data<CC_DELETE> DD6; // move-only copy construction
+typedef Data<CA_DELETE> DD7; // move-only assignment
+
+typedef Data<ALL_DELETE | PROP_MOVE> DDSMA;
+typedef VECTOR_<DDSMA, Alloc<DDSMA>> _TSpecialMA;
+
+#define INTERFACE_TYPES \
+ (_TVI)(VECTOR_<DD1>)(VECTOR_<DD2>)(VECTOR_<DD3>) \
+ (VECTOR_<DD4>)(VECTOR_<DD5>)(VECTOR_<DD6>) \
+ (VECTOR_<DD7>)(_TSpecialMA)
+
+//-----------------------------------------------------------------------------
+// Pretty printers
+
+template <typename T>
+struct PrettyType {
+ string operator()() {
+ if (is_same<T, int>::value) return "int";
+ if (is_same<T, char>::value) return "char";
+ if (is_same<T, uint64_t>::value) return "uint64_t";
+ return typeid(T).name();
+ }
+};
+
+template <Flags f, size_t pad>
+struct PrettyType<Data<f, pad>> {
+ string operator()() {
+ stringstream tpe;
+ tpe << "Data";
+
+ if ((f & DC_DELETE) ||
+ (f & CC_DELETE) ||
+ (f & MC_DELETE) ||
+ (f & CA_DELETE) ||
+ (f & MA_DELETE)) {
+ tpe << "[^";
+ if (f & DC_DELETE) tpe << " DC,";
+ if (f & CC_DELETE) tpe << " CC,";
+ if (f & MC_DELETE) tpe << " MC,";
+ if (f & CA_DELETE) tpe << " CA,";
+ if (f & MA_DELETE) tpe << " MA,";
+ tpe << "]";
+ }
+
+ if ((f & DC_NOEXCEPT) ||
+ (f & CC_NOEXCEPT) ||
+ (f & MC_NOEXCEPT) ||
+ (f & CA_NOEXCEPT) ||
+ (f & MA_NOEXCEPT)) {
+ tpe << "[safe";
+ if (f & DC_NOEXCEPT) tpe << " DC,";
+ if (f & CC_NOEXCEPT) tpe << " CC,";
+ if (f & MC_NOEXCEPT) tpe << " MC,";
+ if (f & CA_NOEXCEPT) tpe << " CA,";
+ if (f & MA_NOEXCEPT) tpe << " MA,";
+ tpe << "]";
+ }
+
+ if (f & IS_RELOCATABLE) {
+ tpe << "(relocatable)";
+ }
+
+ if (pad != 0) {
+ tpe << "{pad " << pad << "}";
+ }
+
+ return tpe.str();
+ }
+};
+
+template <typename T>
+struct PrettyType<std::allocator<T>> {
+ string operator()() {
+ return "std::allocator<" + PrettyType<T>()() + ">";
+ }
+};
+
+template <typename T>
+struct PrettyType<Alloc<T>> {
+ string operator()() {
+ return "Alloc<" + PrettyType<T>()() + ">";
+ }
+};
+
+//-----------------------------------------------------------------------------
+// Setup, teardown, runup, rundown
+
+// These four macros are run once per test. Setup and runup occur before the
+// test, teardown and rundown after. Setup and runup straddle the
+// initialization sequence, whereas rundown and teardown straddle the
+// cleanup.
+
+#define SETUP hardReset();
+#define TEARDOWN
+
+//-----------------------------------------------------------------------------
+// Types and typegens
+
+//------
+// dummy
+
+#define TYPIFY_z std::nullptr_t
+#define LOOPER_z \
+ Vector* a_p = nullptr; Vector* b_p = nullptr; \
+ typename Vector::value_type* t_p = nullptr;
+#define VMAKER_z std::nullptr_t z = nullptr;
+#define UMAKER_z \
+ verify<Vector>(0); \
+ if (::testing::Test::HasFatalFailure()) return;
+#define CLOSER_z
+
+//------
+// ticks
+
+#define VERIFICATION \
+ if (b_p != nullptr) verify(t_p != nullptr ,*a_p, *b_p); \
+ else if (a_p != nullptr) verify(t_p != nullptr, *a_p); \
+ else verify<Vector>(t_p != nullptr); \
+ if (::testing::Test::HasFatalFailure()) return;
+
+#define TYPIFY_ticks int
+#define LOOPER_ticks \
+ int _maxTicks_ = 0; \
+ bool ticks_thrown = false; \
+ for (int ticks = -1; ticks < _maxTicks_; ++ticks) {
+#define VMAKER_ticks \
+ string ticks_st = folly::to<string>("ticks = ", ticks); \
+ SCOPED_TRACE(ticks_st); \
+ { SCOPED_TRACE("pre-run verification"); \
+ VERIFICATION } \
+ try { \
+ softReset(ticks);
+#define UMAKER_ticks _maxTicks_ = Ticker::CountTicks; } \
+ catch (const TickException&) { ticks_thrown = true; } \
+ catch (const std::exception& e) \
+ { FAIL() << "EXCEPTION: " << e.what(); } \
+ catch (...) \
+ { FAIL() << "UNKNOWN EXCEPTION"; } \
+ if (ticks >= 0 && Ticker::CountTicks > ticks && !ticks_thrown) \
+ FAIL() << "CountTicks = " << Ticker::CountTicks << " > " \
+ << ticks << " = ticks" \
+ << ", but no tick error was observed"; \
+ VERIFICATION
+#define CLOSER_ticks }
+
+
+//--------------------------------------------------
+// vectors (second could be .equal, ==, or distinct)
+
+static const vector<pair<int, int>> VectorSizes = {
+ { 0, -1},
+ { 1, -1},
+ { 2, -1},
+ { 10, -1}, { 10, 1}, { 10, 0},
+ {100, -1}, {100, 1},
+
+ //{ 10, -1}, { 10, 0}, { 10, 1}, { 10, 2}, { 10, 10},
+ //{ 100, -1}, { 100, 0}, { 100, 1}, { 100, 2}, { 100, 10}, { 100, 100},
+ //{ 1000, -1}, { 1000, 0}, { 1000, 1}, { 1000, 2}, { 1000, 10}, { 1000, 100},
+ // { 1000, 1000},
+};
+
+int populateIndex = 1426;
+template <class Vector>
+void populate(Vector& v, const pair<int, int>& ss) {
+ int i = 0;
+ for (; i < ss.first; ++i) {
+ v.emplace_back(populateIndex++);
+ }
+ if (ss.second >= 0) {
+ while (v.capacity() - v.size() != ss.second) {
+ v.emplace_back(populateIndex++);
+ }
+ }
+}
+
+template <typename A>
+struct allocGen {
+ static A get() { return A(); }
+};
+template <typename T>
+struct allocGen<Alloc<T>> {
+ static Alloc<T> get() {
+ static int c = 0;
+ c += 854;
+ return Alloc<T>(c);
+ }
+};
+
+#define TYPIFY_a Vector&
+#define LOOPER_a for (const auto& a_ss : VectorSizes) {
+#define VMAKER_a \
+ Vector a(allocGen<typename Vector::allocator_type>::get()); \
+ a_p = &a; \
+ populate(*a_p, a_ss); \
+ string a_st = folly::to<string>("a (", a.size(), "/", a.capacity(), ")"); \
+ SCOPED_TRACE(a_st);
+#define UMAKER_a verify(0, a); if (::testing::Test::HasFatalFailure()) return;
+#define CLOSER_a }
+
+#define TYPIFY_b Vector&
+#define LOOPER_b for (int b_i = -2; b_i < (int)VectorSizes.size(); ++b_i) {
+#define VMAKER_b \
+ Vector b_s(allocGen<typename Vector::allocator_type>::get()); \
+ b_p = &b_s; string b_st; \
+ if (b_i == -2) { \
+ b_p = &a; \
+ b_st = "b is an alias of a"; \
+ } \
+ else if (b_i == -1) { \
+ b_s.~Vector(); \
+ new (&b_s) Vector(a); \
+ b_st = "b is a deep copy of a"; \
+ } \
+ else { \
+ populate(b_s, VectorSizes[b_i]); \
+ b_st = folly::to<string>("b (", b_s.size(), "/", b_s.capacity(), ")"); \
+ } \
+ Vector& b = *b_p; \
+ SCOPED_TRACE(b_st);
+#define UMAKER_b \
+ verify(0, a, b); if (::testing::Test::HasFatalFailure()) return;
+#define CLOSER_b }
+
+//----
+// int
+
+static const vector<int> nSizes = { 0, 1, 2, 9, 10, 11 };
+
+#define TYPIFY_n int
+#define LOOPER_n for (int n : nSizes) {
+#define VMAKER_n \
+ string n_st = folly::to<string>("n = ", n); SCOPED_TRACE(n_st);
+#define UMAKER_n
+#define CLOSER_n }
+
+//-----------------------
+// non-internal iterators
+
+static int ijarr[12] = { 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 };
+static int ijarC[12] = { 0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89 };
+
+#define TYPIFY_i int*
+#define LOOPER_i
+#define VMAKER_i int* i = ijarr; SCOPED_TRACE("i = fib[0]");
+#define UMAKER_i
+#define CLOSER_i
+
+#define TYPIFY_j int*
+#define LOOPER_j for (int j_i = 0; j_i < 12; ++j_i) {
+#define VMAKER_j \
+ int* j = ijarr + j_i; \
+ string j_st = folly::to<string>("j = fib[", j_i, "]"); \
+ SCOPED_TRACE(j_st);
+#define UMAKER_j \
+ for (int j_c = 0; j_c < 12; ++j_c) ASSERT_EQ(ijarC[j_c], ijarr[j_c]);
+#define CLOSER_j }
+
+//-------------------
+// internal iterators
+
+template <class Vector>
+std::pair<typename Vector::iterator, string>
+iterSpotter(Vector& v, int i) {
+ typename Vector::iterator it;
+ string msg;
+
+ switch(i) {
+ case 1:
+ if (v.empty()) ; // fall through
+ else {
+ it = v.begin();
+ ++it;
+ msg = "a[1]";
+ break;
+ }
+ case 0:
+ it = v.begin();
+ msg = "a.begin";
+ break;
+
+ case 2:
+ if (v.empty()) ; // fall through
+ else {
+ it = v.end();
+ --it;
+ msg = "a[-1]";
+ break;
+ }
+ case 3:
+ it = v.end();
+ msg = "a.end";
+ break;
+
+ default:
+ cerr << "internal error" << endl;
+ exit(1);
+ }
+
+ return make_pair(it, msg);
+}
+
+#define TYPIFY_p typename Vector::iterator
+#define LOOPER_p for (int p_i = 0; p_i < 4; ++p_i) {
+#define VMAKER_p \
+ auto p_im = iterSpotter(a, p_i); \
+ auto& p = p_im.first; \
+ auto& p_m = p_im.second; \
+ SCOPED_TRACE("p = " + p_m);
+#define UMAKER_p
+#define CLOSER_p }
+
+#define TYPIFY_q typename Vector::iterator
+#define LOOPER_q for (int q_i = p_i; q_i < 4; ++q_i) {
+#define VMAKER_q \
+ auto q_im = iterSpotter(a, q_i); \
+ auto& q = q_im.first; \
+ auto& q_m = q_im.second; \
+ SCOPED_TRACE("q = " + q_m);
+#define UMAKER_q
+#define CLOSER_q }
+
+//---------
+// datatype
+
+static const vector<int> tVals = { 0, 1, 2, 3, 17, 66, 521 };
+
+#define TYPIFY_t typename Vector::value_type&
+#define LOOPER_t for (int t_v : tVals) {
+#define VMAKER_t \
+ typename Vector::value_type t_s(t_v); \
+ t_p = addressof(t_s); \
+ string t_st = folly::to<string>("t(", t_v, ")"); \
+ if (t_v < 4 && a_p != nullptr) { \
+ auto t_im = iterSpotter(*a_p, t_v); \
+ if (t_im.first != a_p->end()) { \
+ t_p = addressof(*t_im.first); \
+ t_st = "t is " + t_im.second; \
+ } \
+ } \
+ typename Vector::value_type& t = *t_p; \
+ SCOPED_TRACE(t_st);
+#define UMAKER_t
+#define CLOSER_t }
+
+//----------
+// allocator
+
+#define TYPIFY_m typename Vector::allocator_type
+#define LOOPER_m \
+ int m_max = 1 + (a_p != nullptr); \
+ for (int m_i = 0; m_i < m_max; ++m_i) {
+#define VMAKER_m \
+ typename Vector::allocator_type m = m_i == 0 \
+ ? typename Vector::allocator_type() \
+ : a_p->get_allocator();
+#define UMAKER_m
+#define CLOSER_m }
+
+//-----------------------------------------------------------------------------
+// Verifiers
+
+// verify a vector
+template <class Vector>
+void verifyVector(const Vector& v) {
+ ASSERT_TRUE(v.begin() <= v.end()) << "end is before begin";
+ ASSERT_TRUE(v.empty() == (v.begin() == v.end())) << "empty != (begin == end)";
+ ASSERT_TRUE(v.size() == distance(v.begin(), v.end()))
+ << "size != end - begin";
+ ASSERT_TRUE(v.size() <= v.capacity()) << "size > capacity";
+ ASSERT_TRUE(v.capacity() <= v.max_size()) << "capacity > max_size";
+ ASSERT_TRUE(v.data() || true); // message won't print - it will just crash
+ ASSERT_TRUE(v.size() == 0 || v.data() != nullptr)
+ << "nullptr data points to at least one element";
+}
+
+void verifyAllocator(int ele, int cap) {
+ ASSERT_EQ(ele, AllocTracker::Constructed - AllocTracker::Destroyed);
+
+ int tot = 0;
+ for (auto kv : AllocTracker::Allocated)
+ if (kv.second != -1) tot += kv.second;
+ ASSERT_EQ(cap, tot) << "the allocator counts " << tot << " space, "
+ "but the vector(s) have (combined) capacity " << cap;
+}
+
+// Master verifier
+template <class Vector>
+void verify(int extras) {
+ if (!is_arithmetic<typename Vector::value_type>::value)
+ ASSERT_EQ(0 + extras, getTotal()) << "there exist Data but no vectors";
+ isSane();
+ if (::testing::Test::HasFatalFailure()) return;
+ if (customAllocator<Vector>::value) verifyAllocator(0, 0);
+}
+template <class Vector>
+void verify(int extras, const Vector& v) {
+ verifyVector(v);
+ if (!is_arithmetic<typename Vector::value_type>::value)
+ ASSERT_EQ(v.size() + extras, getTotal())
+ << "not all Data are in the vector";
+ isSane();
+ if (::testing::Test::HasFatalFailure()) return;
+ if (customAllocator<Vector>::value) verifyAllocator(v.size(), v.capacity());
+}
+template <class Vector>
+void verify(int extras, const Vector& v1, const Vector& v2) {
+ verifyVector(v1);
+ verifyVector(v2);
+ auto size = v1.size();
+ auto cap = v1.capacity();
+ if (&v1 != &v2) {
+ size += v2.size();
+ cap += v2.capacity();
+ }
+ if (!is_arithmetic<typename Vector::value_type>::value)
+ ASSERT_EQ(size + extras, getTotal()) << "not all Data are in the vector(s)";
+ isSane();
+ if (::testing::Test::HasFatalFailure()) return;
+ if (customAllocator<Vector>::value) verifyAllocator(size, cap);
+}
+
+//=============================================================================
+// Helpers
+
+// save the state of a vector
+int convertToInt(int t) {
+ return t;
+}
+template <Flags f, size_t pad>
+int convertToInt(const Data<f, pad>& t) {
+ return t.uid;
+}
+template <typename T>
+int convertToInt(const std::allocator<T>&) {
+ return -1;
+}
+template <typename T>
+int convertToInt(const Alloc<T>& a) {
+ return a.id;
+}
+
+template <class Vector>
+class DataState {
+ typedef typename Vector::size_type size_type;
+ size_type size_;
+ int* data_;
+public:
+ /* implicit */ DataState(const Vector& v) {
+ size_ = v.size();
+ if (size_ != 0) {
+ data_ = new int[size_];
+ for (size_type i = 0; i < size_; ++i) {
+ data_[i] = convertToInt(v.data()[i]);
+ }
+ } else {
+ data_ = nullptr;
+ }
+ }
+ ~DataState() {
+ delete[] data_;
+ }
+
+ bool operator==(const DataState& o) const {
+ if (size_ != o.size_) return false;
+ for (size_type i = 0; i < size_; ++i) {
+ if (data_[i] != o.data_[i]) return false;
+ }
+ return true;
+ }
+
+ int operator[](size_type i) {
+ if (i >= size_) {
+ cerr << "trying to access DataState out of bounds" << endl;
+ exit(1);
+ }
+ return data_[i];
+ }
+
+ size_type size() { return size_; }
+};
+
+// downgrade iterators
+template <typename It, class tag>
+class Transformer : public boost::iterator_adaptor<
+ Transformer<It, tag>,
+ It,
+ typename iterator_traits<It>::value_type,
+ tag
+ > {
+ friend class boost::iterator_core_access;
+ shared_ptr<set<It>> dereferenced;
+
+public:
+ explicit Transformer(const It& it)
+ : Transformer::iterator_adaptor_(it)
+ , dereferenced(new set<It>()) {}
+
+ typename iterator_traits<It>::value_type& dereference() const {
+ if (dereferenced->find(this->base_reference()) != dereferenced->end()) {
+ cerr << "iterator dereferenced more than once" << endl;
+ exit(1);
+ }
+ dereferenced->insert(this->base_reference());
+ return *this->base_reference();
+ }
+};
+
+template <typename It>
+Transformer<It, forward_iterator_tag> makeForwardIterator(const It& it) {
+ return Transformer<It, forward_iterator_tag>(it);
+}
+template <typename It>
+Transformer<It, input_iterator_tag> makeInputIterator(const It& it) {
+ return Transformer<It, input_iterator_tag>(it);
+}
+
+// mutate a value (in contract only)
+void mutate(int& i) {
+ if (false) i = 0;
+}
+void mutate(uint64_t& i) {
+ if (false) i = 0;
+}
+template <Flags f, size_t pad>
+void mutate(Data<f, pad>& ds) {
+ if (false) ds.uid = 0;
+}
+
+//=============================================================================
+// Tests
+
+// #if 0
+
+
+
+// #else
+
+//-----------------------------------------------------------------------------
+// Container
+
+STL_TEST("23.2.1 Table 96.1-7", containerTypedefs, is_destructible) {
+ static_assert(is_same<T, typename Vector::value_type>::value,
+ "T != Vector::value_type");
+ static_assert(is_same<T&, typename Vector::reference>::value,
+ "T& != Vector::reference");
+ static_assert(is_same<const T&, typename Vector::const_reference>::value,
+ "const T& != Vector::const_reference");
+ static_assert(is_convertible<
+ typename iterator_traits<typename Vector::iterator>::iterator_category,
+ forward_iterator_tag>::value,
+ "Vector::iterator is not a forward iterator");
+ static_assert(is_same<T,
+ typename iterator_traits<typename Vector::iterator>::value_type>::value,
+ "Vector::iterator does not iterate over type T");
+ static_assert(is_convertible<
+ typename iterator_traits<typename Vector::const_iterator>
+ ::iterator_category,
+ forward_iterator_tag>::value,
+ "Vector::const_iterator is not a forward iterator");
+ static_assert(is_same<T,
+ typename iterator_traits<typename Vector::const_iterator>
+ ::value_type>::value,
+ "Vector::const_iterator does not iterate over type T");
+ static_assert(is_convertible<
+ typename Vector::iterator, typename Vector::const_iterator>::value,
+ "Vector::iterator is not convertible to Vector::const_iterator");
+ static_assert(is_signed<typename Vector::difference_type>::value,
+ "Vector::difference_type is not signed");
+ static_assert(is_same<typename Vector::difference_type,
+ typename iterator_traits<typename Vector::iterator>
+ ::difference_type>::value,
+ "Vector::difference_type != Vector::iterator::difference_type");
+ static_assert(is_same<typename Vector::difference_type,
+ typename iterator_traits<typename Vector::const_iterator>
+ ::difference_type>::value,
+ "Vector::difference_type != Vector::const_iterator::difference_type");
+ static_assert(is_unsigned<typename Vector::size_type>::value,
+ "Vector::size_type is not unsigned");
+ static_assert(sizeof(typename Vector::size_type) >=
+ sizeof(typename Vector::difference_type),
+ "Vector::size_type is smaller than Vector::difference_type");
+}
+
+STL_TEST("23.2.1 Table 96.8-9", emptyConstruction, is_destructible) {
+ Vector u;
+
+ ASSERT_TRUE(u.get_allocator() == Allocator());
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ ASSERT_TRUE(u.empty()) << u.size();
+ ASSERT_EQ(0, u.capacity());
+
+ if (false) {
+ Vector();
+ }
+}
+
+STL_TEST("framework", populate, is_copy_constructible) {
+ // We use emplace_back to construct vectors for testing, as well as size,
+ // data, and capacity. We make sure these work before proceeding with tests.
+
+ Vector u;
+ ASSERT_EQ(0, u.size());
+ ASSERT_EQ(nullptr, u.data());
+
+ u.emplace_back(17);
+ ASSERT_EQ(1, u.size());
+ ASSERT_LT(u.capacity(), 100)
+ << "single push_back increased capacity to " << u.capacity();
+ ASSERT_NE(nullptr, u.data());
+ ASSERT_EQ(17, convertToInt(u.data()[0]))
+ << "first object did not get emplaced correctly";
+
+ for (int i = 0; i < 3; ++i) {
+ auto cap = u.capacity();
+ while (u.size() < cap) {
+ u.emplace_back(22);
+ ASSERT_EQ(cap, u.capacity()) << "Vector grew when it did not need to";
+ ASSERT_EQ(22, convertToInt(u.data()[u.size() - 1]))
+ << "push_back with excess capacity failed";
+ }
+
+ ASSERT_EQ(cap, u.size());
+
+ u.emplace_back(4);
+ ASSERT_GT(u.capacity(), cap) << "capacity did not grow on overflow";
+ ASSERT_EQ(cap + 1, u.size());
+ ASSERT_EQ(4, convertToInt(u.data()[u.size() - 1]))
+ << "grow object did not get emplaced correctly";
+ }
+}
+
+STL_TEST("23.2.1 Table 96.10-11", copyConstruction,
+ is_copy_constructible, a) {
+ const auto& ca = a;
+ DataState<Vector> dsa(ca);
+ auto am = a.get_allocator();
+
+ Vector u(ca);
+
+ ASSERT_TRUE(std::allocator_traits<Allocator>::
+ select_on_container_copy_construction(am) == u.get_allocator());
+ ASSERT_TRUE(dsa == u);
+ ASSERT_TRUE(
+ (ca.data() == nullptr && u.data() == nullptr) ||
+ (ca.data() != u.data())
+ ) << "only a shallow copy was made";
+
+ if (false) {
+ Vector(ca);
+ Vector u = ca;
+ }
+}
+
+STL_TEST("23.2.1 Table 96.12", moveConstruction, is_destructible, a) {
+ DataState<Vector> dsa(a);
+ auto m = a.get_allocator();
+
+ Vector u(move(a));
+
+ ASSERT_TRUE(m == u.get_allocator());
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ ASSERT_TRUE(dsa == u);
+
+ if (false) {
+ Vector u = move(a);
+ }
+}
+
+STL_TEST("23.2.1 Table 96.13", moveAssignment, special_move_assignable, a, b) {
+ DataState<Vector> dsb(b);
+ auto am = a.get_allocator();
+ auto bm = b.get_allocator();
+
+ Vector& ret = a = std::move(b);
+
+ if (std::allocator_traits<Allocator>::
+ propagate_on_container_move_assignment::value) {
+ ASSERT_TRUE(bm == a.get_allocator());
+ } else {
+ ASSERT_TRUE(am == a.get_allocator());
+ }
+ ASSERT_TRUE(&ret == &a);
+ ASSERT_TRUE(&a == &b || dsb == a) << "move assignment did not create a copy";
+ // The source of the move may be left in any (albeit valid) state.
+}
+
+STL_TEST("23.2.1 Table 96.14", destructible, is_destructible) {
+ // The test generators check this clause already.
+}
+
+STL_TEST("23.2.1 Table 96.15-18", iterators, is_destructible, a) {
+ DataState<Vector> dsa(a);
+ const auto& ca = a;
+
+ auto itb = a.begin();
+ auto citb = ca.begin();
+ auto Citb = a.cbegin();
+ auto ite = a.end();
+ auto cite = ca.end();
+ auto Cite = a.cend();
+
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ ASSERT_TRUE(dsa == a) << "call to begin or end modified internal data";
+
+ ASSERT_TRUE(citb == Citb) << "cv.begin != v.cbegin";
+ ASSERT_TRUE(cite == Cite) << "cv.end != v.cend";
+
+ if (ca.size() == 0) {
+ ASSERT_TRUE( itb == ite) << "begin != end when empty";
+ ASSERT_TRUE(Citb == Cite) << "cbegin != cend when empty";
+ } else {
+ ASSERT_TRUE( itb != ite) << "begin == end when non-empty";
+ ASSERT_TRUE(Citb != Cite) << "cbegin == cend when non-empty";
+ }
+
+ auto dist = std::distance( itb, ite);
+ auto Cdist = std::distance(Citb, Cite);
+ ASSERT_TRUE( dist == ca.size()) << "distance(begin, end) != size";
+ ASSERT_TRUE(Cdist == ca.size()) << "distance(cbegin, cend) != size";
+}
+
+STL_TEST("23.2.1 Table 96.19-20", equitable, is_arithmetic, a, b) {
+ const auto& ca = a;
+ const auto& cb = b;
+ DataState<Vector> dsa(a);
+ DataState<Vector> dsb(b);
+
+ ASSERT_TRUE((bool)(ca == cb) == (bool)(dsa == dsb))
+ << "== does not return equality";
+ ASSERT_TRUE((bool)(ca == cb) != (bool)(ca != cb))
+ << "!= is not the opposite of ==";
+
+ // Data is uncomparable, by design; therefore this test's restriction
+ // is 'is_arithmetic'
+}
+
+STL_TEST("23.2.1 Table 96.21", memberSwappable, is_destructible, a, b) {
+ if (!std::allocator_traits<Allocator>::
+ propagate_on_container_swap::value &&
+ convertToInt(a.get_allocator()) != convertToInt(b.get_allocator())) {
+ // undefined behaviour
+ return;
+ }
+
+ DataState<Vector> dsa(a);
+ DataState<Vector> dsb(b);
+ auto adata = a.data();
+ auto bdata = b.data();
+ auto am = a.get_allocator();
+ auto bm = b.get_allocator();
+
+ try {
+ a.swap(b);
+ } catch (...) {
+ FAIL() << "swap is noexcept";
+ }
+
+ if (std::allocator_traits<Allocator>::
+ propagate_on_container_swap::value) {
+ ASSERT_TRUE(bm == a.get_allocator());
+ ASSERT_TRUE(am == b.get_allocator());
+ } else {
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_TRUE(bm == b.get_allocator());
+ }
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ ASSERT_TRUE(adata == b.data() && bdata == a.data());
+ ASSERT_TRUE(dsa == b && dsb == a) << "swap did not swap";
+}
+
+STL_TEST("23.2.1 Table 96.22", nonmemberSwappable,
+ is_destructible, a, b) {
+ if (!std::allocator_traits<Allocator>::
+ propagate_on_container_swap::value &&
+ convertToInt(a.get_allocator()) != convertToInt(b.get_allocator())) {
+ // undefined behaviour
+ return;
+ }
+
+ DataState<Vector> dsa(a);
+ DataState<Vector> dsb(b);
+ auto adata = a.data();
+ auto bdata = b.data();
+ auto am = a.get_allocator();
+ auto bm = b.get_allocator();
+
+ try {
+ swap(a, b);
+ } catch (...) {
+ FAIL() << "swap is noexcept";
+ }
+
+ if (std::allocator_traits<Allocator>::
+ propagate_on_container_swap::value) {
+ ASSERT_TRUE(bm == a.get_allocator());
+ ASSERT_TRUE(am == b.get_allocator());
+ } else {
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_TRUE(bm == b.get_allocator());
+ }
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ ASSERT_TRUE(adata == b.data() && bdata == a.data());
+ ASSERT_TRUE(dsa == b && dsb == a) << "swap did not swap";
+}
+
+STL_TEST("23.2.1 Table 96.23", copyAssign,
+ is_copy_constructibleAndAssignable, a, b) {
+ // it is possible to make use of just the copy constructor.
+
+ #ifdef USING_STD_VECTOR
+ if (std::allocator_traits<Allocator>::
+ propagate_on_container_copy_assignment::value &&
+ convertToInt(a.get_allocator()) != convertToInt(b.get_allocator())) {
+ // Bug. By the looks of things, in the above case, their bez is being
+ // cleared and deallocated, but then the garbage pointers are being used.
+ return;
+ }
+ #endif
+
+ const auto& cb = b;
+ DataState<Vector> dsb(cb);
+ auto am = a.get_allocator();
+ auto bm = b.get_allocator();
+
+ Vector& ret = a = cb;
+
+ if (std::allocator_traits<Allocator>::
+ propagate_on_container_copy_assignment::value) {
+ ASSERT_TRUE(bm == a.get_allocator());
+ } else {
+ ASSERT_TRUE(am == a.get_allocator());
+ }
+ ASSERT_TRUE(&ret == &a);
+ ASSERT_TRUE(dsb == a) << "copy-assign not equal to original";
+}
+
+STL_TEST("23.2.1 Table 96.24-26", sizeops, is_destructible) {
+ // This check generators check this clause already.
+}
+
+//-----------------------------------------------------------------------------
+// Reversible container
+
+STL_TEST("23.2.1 Table 97.1-2", reversibleContainerTypedefs,
+ is_destructible) {
+ static_assert(is_same<typename Vector::reverse_iterator,
+ std::reverse_iterator<typename Vector::iterator>>::value,
+ "Vector::reverse_iterator != reverse_iterator<Vector:iterator");
+ static_assert(is_same<typename Vector::const_reverse_iterator,
+ std::reverse_iterator<typename Vector::const_iterator>>::value,
+ "Vector::const_reverse_iterator != "
+ "const_reverse_iterator<Vector::iterator");
+}
+
+STL_TEST("23.2.1 Table 97.3-5", reversibleIterators, is_destructible, a) {
+ const auto& ca = a;
+ DataState<Vector> ds(a);
+
+ auto ritb = a.rbegin();
+ auto critb = ca.rbegin();
+ auto Critb = a.crbegin();
+ auto rite = a.rend();
+ auto crite = ca.rend();
+ auto Crite = a.crend();
+
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ ASSERT_TRUE(ds == a) << "call to rbegin or rend modified internal data";
+
+ ASSERT_TRUE(critb == Critb) << "cv.rbegin != v.crbegin";
+ ASSERT_TRUE(crite == Crite) << "cv.rend != v.crend";
+
+ if (ca.size() == 0) {
+ ASSERT_TRUE( ritb == rite) << "rbegin != rend when empty";
+ ASSERT_TRUE(Critb == Crite) << "crbegin != crend when empty";
+ } else {
+ ASSERT_TRUE( ritb != rite) << "rbegin == rend when non-empty";
+ ASSERT_TRUE(Critb != Crite) << "crbegin == crend when non-empty";
+ }
+
+ auto dist = std::distance( ritb, rite);
+ auto Cdist = std::distance(Critb, Crite);
+ ASSERT_TRUE( dist == ca.size()) << "distance(rbegin, rend) != size";
+ ASSERT_TRUE(Cdist == ca.size()) << "distance(crbegin, crend) != size";
+}
+
+//-----------------------------------------------------------------------------
+// Lexicographical functions
+
+STL_TEST("23.2.1 Table 98", comparable, is_arithmetic) {
+ const Vector v1 = { 1, 2, 3, 4 };
+ const Vector v2 = { 1, 2, 3, 4, 5 };
+ const Vector v3 = { 1, 2, 2 };
+ const Vector v4 = { 1, 2, 2, 4, 5 };
+ const Vector v5 = { };
+ const Vector v6 = { 1, 2, 3, 4 };
+
+ ASSERT_TRUE(v1 < v2);
+ ASSERT_TRUE(v1 > v3);
+ ASSERT_TRUE(v1 > v4);
+ ASSERT_TRUE(v1 > v5);
+ ASSERT_TRUE(v1 <= v6);
+ ASSERT_TRUE(v1 >= v6);
+}
+
+//-----------------------------------------------------------------------------
+// Allocator-aware requirements (AA)
+
+STL_TEST("23.2.1 Table 99.1", allocatorTypedefs, is_destructible) {
+ static_assert(is_same<T, typename Vector::allocator_type::value_type>::value,
+ "Vector and vector's allocator value_type mismatch");
+}
+
+STL_TEST("23.2.1 Table 99.2", getAllocator, is_destructible) {
+ // whitebox: ensure that a.get_allocator() returns a copy of its allocator
+}
+
+STL_TEST("23.2.1 Table 99.3", defaultAllocator, is_destructible) {
+ // there is nothing new to test here
+}
+
+STL_TEST("23.2.1 Table 99.4", customAllocator, is_destructible, m) {
+ const auto& cm = m;
+
+ Vector u(cm);
+
+ ASSERT_TRUE(u.get_allocator() == m);
+
+ if (false) {
+ Vector(m);
+ }
+}
+
+STL_TEST("23.2.1 Table 99.5", copyWithAllocator, is_copy_constructible, a, m) {
+ DataState<Vector> dsa(a);
+ const auto& ca = a;
+ const auto& cm = m;
+
+ Vector u(ca, cm);
+
+ ASSERT_TRUE(u.get_allocator() == m);
+ ASSERT_TRUE(dsa == u);
+ ASSERT_TRUE(
+ (ca.data() == nullptr && u.data() == nullptr) ||
+ (ca.data() != u.data())
+ ) << "only a shallow copy was made";
+}
+
+STL_TEST("23.2.1 Table 99.6", moveConstructionWithAllocator,
+ is_destructible, a) {
+ // there is nothing new to test here
+}
+
+STL_TEST("23.2.1 Table 99.6", moveConstructionWithAllocatorSupplied,
+ is_move_constructible, a, m) {
+ bool deep = m != a.get_allocator();
+ auto osize = a.size();
+ auto oalloc = AllocTracker::Constructed;
+ const auto& cm = m;
+
+ Vector u(std::move(a), cm);
+
+ ASSERT_TRUE(u.get_allocator() == m);
+
+ if (deep) {
+ if (!AllocTracker::Allocated.empty()) {
+ ASSERT_EQ(osize, AllocTracker::Constructed - oalloc);
+ }
+ } else {
+ ASSERT_EQ(0, Counter::CountTotalOps);
+ }
+}
+
+STL_TEST("23.2.1 Table 99.7-9", allocAssign, is_destructible) {
+ // there is nothing new to test here
+}
+
+STL_TEST("23.2.1-7", nAllocConstruction, is_copy_constructible, n, m) {
+ #ifndef USING_STD_VECTOR
+ const auto& cm = m;
+
+ Vector u(n, cm);
+
+ ASSERT_TRUE(m == u.get_allocator());
+ #endif
+}
+
+STL_TEST("23.2.1-7", nCopyAllocConstruction, is_copy_constructible, n, t, m) {
+ const auto& cm = m;
+ const auto& ct = t;
+
+ Vector u(n, ct, cm);
+
+ ASSERT_TRUE(m == u.get_allocator());
+}
+
+STL_TEST("23.2.1-7", forwardIteratorAllocConstruction,
+ is_destructible, i, j, m) {
+ auto fi = makeForwardIterator(i);
+ auto fj = makeForwardIterator(j);
+ const auto& cfi = fi;
+ const auto& cfj = fj;
+ const auto& cm = m;
+
+ Vector u(cfi, cfj, cm);
+
+ ASSERT_TRUE(m == u.get_allocator());
+}
+
+STL_TEST("23.2.1-7", inputIteratorAllocConstruction,
+ is_move_constructible, i, j, m) {
+ #ifdef USING_STD_VECTOR
+ if (Ticker::TicksLeft >= 0) return;
+ #endif
+
+ auto ii = makeInputIterator(i);
+ auto ij = makeInputIterator(j);
+ const auto& cii = ii;
+ const auto& cij = ij;
+ const auto& cm = m;
+
+ Vector u(cii, cij, cm);
+
+ ASSERT_TRUE(m == u.get_allocator());
+}
+
+STL_TEST("23.2.1-7", ilAllocConstruction, is_arithmetic, m) {
+ // gcc fail
+ if (Ticker::TicksLeft >= 0) return;
+
+ const auto& cm = m;
+
+ Vector u({ 1, 4, 7 }, cm);
+
+ ASSERT_TRUE(m == u.get_allocator());
+}
+
+//-----------------------------------------------------------------------------
+// Data races
+
+STL_TEST("23.2.2", dataRaces, is_destructible) {
+ if (false) {
+ const Vector* cv = nullptr;
+ typename Vector::size_type* s = nullptr;
+
+ cv->begin();
+ cv->end();
+ cv->rbegin();
+ cv->rend();
+ cv->front();
+ cv->back();
+ cv->data();
+
+ (*cv).at(*s);
+ (*cv)[*s];
+ }
+
+ // White-box: check that the non-const versions of each of the above
+ // functions is implemented in terms of (or the same as) the const version
+}
+
+//-----------------------------------------------------------------------------
+// Sequence container
+
+STL_TEST("23.2.3 Table 100.1, alt", nConstruction, is_constructible, n) {
+ Vector u(n);
+
+ ASSERT_TRUE(Allocator() == u.get_allocator());
+ ASSERT_EQ(n, u.size());
+ ASSERT_EQ(Counter::CountTotalOps, Counter::CountDC);
+}
+
+STL_TEST("23.2.3 Table 100.1", nCopyConstruction,
+ is_copy_constructible, n, t) {
+ const auto& ct = t;
+
+ Vector u(n, ct);
+
+ ASSERT_TRUE(Allocator() == u.get_allocator());
+ ASSERT_EQ(n, u.size()) << "Vector(n, t).size() != n" << endl;
+ for (const auto& val : u) ASSERT_EQ(convertToInt(t), convertToInt(val))
+ << "not all elements of Vector(n, t) are equal to t";
+}
+
+STL_TEST("23.2.3 Table 100.2", forwardIteratorConstruction,
+ is_destructible, i, j) {
+ // All data is emplace-constructible from int, so we restrict to
+ // is_destructible
+
+ auto fi = makeForwardIterator(i);
+ auto fj = makeForwardIterator(j);
+ const auto& cfi = fi;
+ const auto& cfj = fj;
+
+ Vector u(cfi, cfj);
+
+ ASSERT_TRUE(Allocator() == u.get_allocator());
+ ASSERT_LE(Counter::CountTotalOps, j-i);
+
+ ASSERT_EQ(j - i, u.size()) << "u(i,j).size() != j-i";
+ for (auto it = u.begin(); it != u.end(); ++it, ++i)
+ ASSERT_EQ(*i, convertToInt(*it)) << "u(i,j) constructed incorrectly";
+}
+
+STL_TEST("23.2.3 Table 100.2", inputIteratorConstruction,
+ is_move_constructible, i, j) {
+ #ifdef USING_STD_VECTOR
+ if (Ticker::TicksLeft >= 0) return;
+ #endif
+
+ auto ii = makeInputIterator(i);
+ auto ij = makeInputIterator(j);
+ const auto& cii = ii;
+ const auto& cij = ij;
+
+ Vector u(cii, cij);
+
+ ASSERT_TRUE(Allocator() == u.get_allocator());
+ ASSERT_EQ(j - i, u.size()) << "u(i,j).size() != j-i";
+ for (auto it = u.begin(); it != u.end(); ++it, ++i)
+ ASSERT_EQ(*i, convertToInt(*it)) << "u(i,j) constructed incorrectly";
+}
+
+STL_TEST("23.2.3 Table 100.3", ilConstruction, is_arithmetic) {
+ // whitebox: ensure that Vector(il) is implemented in terms of
+ // Vector(il.begin(), il.end())
+
+ // gcc fail
+ if (Ticker::TicksLeft >= 0) return;
+
+ Vector u = { 1, 4, 7 };
+
+ ASSERT_TRUE(Allocator() == u.get_allocator());
+ ASSERT_EQ(3, u.size()) << "u(il).size() fail";
+ int i = 1;
+ auto it = u.begin();
+ for (; it != u.end(); ++it, i += 3)
+ ASSERT_EQ(i, convertToInt(*it)) << "u(il) constructed incorrectly";
+}
+
+STL_TEST("23.2.3 Table 100.4", ilAssignment,
+ is_arithmetic, a) {
+ // whitebox: ensure that assign(il) is implemented in terms of
+ // assign(il.begin(), il.end())
+
+ // gcc fail
+ if (Ticker::TicksLeft >= 0) return;
+
+ auto am = a.get_allocator();
+
+ Vector& b = a = { 1, 4, 7 };
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_TRUE(&b == &a) << "'a = ...' did not return *this";
+
+ ASSERT_EQ(3, a.size()) << "u(il).size() fail";
+ int i = 1;
+ auto it = a.begin();
+ for (; it != a.end(); ++it, i += 3)
+ ASSERT_EQ(i, convertToInt(*it)) << "u(il) constructed incorrectly";
+}
+
+//----------------------------
+// insert-and-erase subsection
+
+template <class Vector>
+void insertNTCheck(const Vector& a, DataState<Vector>& dsa,
+ int idx, int n, int val) {
+ ASSERT_EQ(dsa.size() + n, a.size());
+ int i = 0;
+ for (; i < idx; ++i) {
+ ASSERT_EQ(dsa[i], convertToInt(a.data()[i])) << i;
+ }
+ for (; i < idx + n; ++i) {
+ ASSERT_EQ(val, convertToInt(a.data()[i])) << i;
+ }
+ for (; i < a.size(); ++i) {
+ ASSERT_EQ(dsa[i-n], convertToInt(a.data()[i])) << i;
+ }
+}
+
+STL_TEST("23.2.3 Table 100.5", iteratorEmplacement,
+ is_move_constructibleAndAssignable, a, p) {
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+ auto am = a.get_allocator();
+
+ auto q = a.emplace(p, 44);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(idx, distance(a.begin(), q)) << "incorrect iterator returned";
+ insertNTCheck(a, dsa, idx, 1, 44);
+}
+
+STL_TEST("23.2.3 Table 100.6", iteratorInsertion,
+ is_copy_constructibleAndAssignable, a, p, t) {
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+ int tval = convertToInt(t);
+ auto am = a.get_allocator();
+ const auto& ct = t;
+
+ auto q = a.insert(p, ct);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(idx, distance(a.begin(), q)) << "incorrect iterator returned";
+ insertNTCheck(a, dsa, idx, 1, tval);
+}
+
+STL_TEST("23.2.3 Table 100.7", iteratorInsertionRV,
+ is_move_constructibleAndAssignable, a, p, t) {
+ // rvalue-references cannot have their address checked for aliased inserts
+ if (a.data() <= addressof(t) && addressof(t) < a.data() + a.size()) return;
+
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+ int tval = convertToInt(t);
+ auto am = a.get_allocator();
+
+ auto q = a.insert(p, std::move(t));
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(idx, distance(a.begin(), q)) << "incorrect iterator returned";
+ insertNTCheck(a, dsa, idx, 1, tval);
+}
+
+STL_TEST("23.2.3 Table 100.8", iteratorInsertionN,
+ is_copy_constructibleAndAssignable, a, p, n, t) {
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+ int tval = convertToInt(t);
+ auto am = a.get_allocator();
+ const auto& ct = t;
+
+ #ifndef USING_STD_VECTOR
+ auto q =
+ #endif
+
+ a.insert(p, n, ct);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ #ifndef USING_STD_VECTOR
+ ASSERT_EQ(idx, distance(a.begin(), q)) << "incorrect iterator returned";
+ #endif
+
+ insertNTCheck(a, dsa, idx, n, tval);
+}
+
+template <class Vector>
+void insertItCheck(const Vector& a, DataState<Vector>& dsa,
+ int idx, int* b, int* e) {
+ ASSERT_EQ(dsa.size() + (e - b), a.size());
+ int i = 0;
+ for (; i < idx; ++i) {
+ ASSERT_EQ(dsa[i], convertToInt(a.data()[i]));
+ }
+ for (; i < idx + (e - b); ++i) {
+ ASSERT_EQ(*(b + i - idx), convertToInt(a.data()[i]));
+ }
+ for (; i < a.size(); ++i) {
+ ASSERT_EQ(dsa[i - (e - b)], convertToInt(a.data()[i]));
+ }
+}
+
+STL_TEST("23.2.3 Table 100.9", iteratorInsertionIterator,
+ is_move_constructibleAndAssignable, a, p, i, j) {
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+
+ auto fi = makeForwardIterator(i);
+ auto fj = makeForwardIterator(j);
+ auto am = a.get_allocator();
+ const auto& cfi = fi;
+ const auto& cfj = fj;
+
+ #ifndef USING_STD_VECTOR
+ auto q =
+ #endif
+
+ a.insert(p, cfi, cfj);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ #ifndef USING_STD_VECTOR
+ ASSERT_EQ(idx, distance(a.begin(), q)) << "incorrect iterator returned";
+ #endif
+
+ insertItCheck(a, dsa, idx, i, j);
+}
+
+STL_TEST("23.2.3 Table 100.9", iteratorInsertionInputIterator,
+ is_move_constructibleAndAssignable, a, p, i, j) {
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+
+ auto ii = makeInputIterator(i);
+ auto ij = makeInputIterator(j);
+ auto am = a.get_allocator();
+ const auto& cii = ii;
+ const auto& cij = ij;
+
+ #ifndef USING_STD_VECTOR
+ auto q =
+ #endif
+
+ a.insert(p, cii, cij);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ #ifndef USING_STD_VECTOR
+ ASSERT_EQ(idx, distance(a.begin(), q)) << "incorrect iterator returned";
+ #endif
+
+ insertItCheck(a, dsa, idx, i, j);
+}
+
+STL_TEST("23.2.3 Table 100.10", iteratorInsertIL,
+ is_arithmetic, a, p) {
+ // gcc fail
+ if (Ticker::TicksLeft >= 0) return;
+
+ // whitebox: ensure that insert(p, il) is implemented in terms of
+ // insert(p, il.begin(), il.end())
+
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+ auto am = a.get_allocator();
+
+ #ifndef USING_STD_VECTOR
+ auto q =
+ #endif
+
+ a.insert(p, {1, 4, 7});
+
+ ASSERT_TRUE(am == a.get_allocator());
+ #ifndef USING_STD_VECTOR
+ ASSERT_EQ(idx, distance(a.begin(), q)) << "incorrect iterator returned";
+ #endif
+
+ int ila[] = { 1, 4, 7 };
+ int* i = ila;
+ int* j = ila + 3;
+ insertItCheck(a, dsa, idx, i, j);
+}
+
+template <class Vector>
+void eraseCheck(Vector& a, DataState<Vector>& dsa, int idx, int n) {
+ ASSERT_EQ(dsa.size() - n, a.size());
+ size_t i = 0;
+ auto it = a.begin();
+ for (; it != a.end(); ++it, ++i) {
+ if (i == idx) i += n;
+ ASSERT_EQ(dsa[i], convertToInt(*it));
+ }
+}
+
+STL_TEST("23.2.3 Table 100.11", iteratorErase, is_move_assignable, a, p) {
+ if (p == a.end()) return;
+
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+ auto am = a.get_allocator();
+
+ auto rit = a.erase(p);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(idx, distance(a.begin(), rit)) << "wrong iterator returned";
+ eraseCheck(a, dsa, idx, 1);
+}
+
+STL_TEST("23.2.3 Table 100.12", iteratorEraseRange,
+ is_move_assignable, a, p, q) {
+ if (p == a.end()) return;
+
+ DataState<Vector> dsa(a);
+ int idx = distance(a.begin(), p);
+ auto am = a.get_allocator();
+
+ auto rit = a.erase(p, q);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(idx, distance(a.begin(), rit)) << "wrong iterator returned";
+ eraseCheck(a, dsa, idx, distance(p,q));
+}
+
+//--------------------------------
+// end insert-and-erase subsection
+
+STL_TEST("23.2.3 Table 100.13", clear, is_destructible, a) {
+
+ auto am = a.get_allocator();
+
+ try {
+ a.clear();
+ } catch (...) {
+ FAIL() << "clear must be noexcept";
+ }
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_TRUE(a.empty());
+}
+
+STL_TEST("23.2.3 Table 100.14", assignRange, is_move_assignable, a, i, j) {
+ auto fi = makeForwardIterator(i);
+ auto fj = makeForwardIterator(j);
+ const auto& cfi = fi;
+ const auto& cfj = fj;
+ auto am = a.get_allocator();
+
+ a.assign(cfi, cfj);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(distance(i, j), a.size());
+ for (auto it = a.begin(); it != a.end(); ++it, ++i)
+ ASSERT_EQ(*i, convertToInt(*it));
+}
+
+STL_TEST("23.2.3 Table 100.14", assignInputRange,
+ is_move_constructibleAndAssignable, a, i, j) {
+ auto ii = makeInputIterator(i);
+ auto ij = makeInputIterator(j);
+ const auto& cii = ii;
+ const auto& cij = ij;
+ auto am = a.get_allocator();
+
+ a.assign(cii, cij);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(distance(i, j), a.size());
+ for (auto it = a.begin(); it != a.end(); ++it, ++i)
+ ASSERT_EQ(*i, convertToInt(*it));
+}
+
+STL_TEST("23.2.3 Table 100.15", assignIL,
+ is_arithmetic, a) {
+
+ // whitebox: ensure that assign(il) is implemented in terms of
+ // assign(il.begin(), il.end())
+
+ // gcc fail
+ if (Ticker::TicksLeft >= 0) return;
+
+ auto am = a.get_allocator();
+
+ a.assign({1, 4, 7});
+
+ ASSERT_TRUE(am == a.get_allocator());
+ int ila[] = { 1, 4, 7 };
+ int* i = ila;
+
+ ASSERT_EQ(3, a.size());
+ for (auto it = a.begin(); it != a.end(); ++it, ++i)
+ ASSERT_EQ(*i, convertToInt(*it));
+}
+
+STL_TEST("23.2.3 Table 100.16", assignN,
+ is_copy_constructibleAndAssignable, a, n, t) {
+ auto am = a.get_allocator();
+ auto const& ct = t;
+ auto tval = convertToInt(t);
+
+ a.assign(n, ct);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(n, a.size());
+ for (auto it = a.begin(); it != a.end(); ++it)
+ ASSERT_EQ(tval, convertToInt(*it));
+}
+
+STL_TEST("23.2.3 Table 101.1", front, is_destructible, a) {
+ if (a.empty()) return;
+
+ ASSERT_TRUE(addressof(a.front()) == a.data());
+
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ if (false) {
+ mutate(a.front());
+ const Vector& ca = a;
+ ca.front();
+ }
+}
+
+STL_TEST("23.2.3 Table 101.2", back, is_destructible, a) {
+ if (a.empty()) return;
+
+ ASSERT_TRUE(addressof(a.back()) == a.data() + a.size() - 1);
+
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ if (false) {
+ mutate(a.back());
+ const Vector& ca = a;
+ ca.back();
+ }
+}
+
+STL_TEST("23.2.3 Table 101.4", emplaceBack,
+ is_move_constructible, a) {
+ DataState<Vector> dsa(a);
+ auto adata = a.data();
+ int excess = a.capacity() - a.size();
+ auto am = a.get_allocator();
+
+ try {
+ a.emplace_back(44);
+ } catch (...) {
+ ASSERT_TRUE(dsa == a) << "failed strong exception guarantee";
+ throw;
+ }
+
+ ASSERT_TRUE(am == a.get_allocator());
+ if (excess > 0) ASSERT_TRUE(a.data() == adata) << "unnecessary relocation";
+ ASSERT_EQ(dsa.size() + 1, a.size());
+ size_t i = 0;
+ auto it = a.begin();
+ for (; i < dsa.size(); ++i, ++it)
+ ASSERT_EQ(dsa[i], convertToInt(*it));
+ ASSERT_EQ(44, convertToInt(a.back()));
+}
+
+STL_TEST("23.2.3 Table 101.7", pushBack, is_copy_constructible, a, t) {
+ DataState<Vector> dsa(a);
+ int tval = convertToInt(t);
+ auto adata = a.data();
+ int excess = a.capacity() - a.size();
+ auto am = a.get_allocator();
+ const auto& ct = t;
+
+ try {
+ a.push_back(ct);
+ } catch (...) {
+ ASSERT_TRUE(dsa == a) << "failed strong exception guarantee";
+ throw;
+ }
+
+ ASSERT_TRUE(am == a.get_allocator());
+ if (excess > 0) ASSERT_TRUE(a.data() == adata) << "unnecessary relocation";
+ ASSERT_EQ(dsa.size() + 1, a.size());
+ size_t i = 0;
+ auto it = a.begin();
+ for (; i < dsa.size(); ++i, ++it)
+ ASSERT_EQ(dsa[i], convertToInt(*it));
+ ASSERT_EQ(tval, convertToInt(a.back()));
+}
+
+STL_TEST("23.2.3 Table 101.8", pushBackRV,
+ is_move_constructible, a, t) {
+ DataState<Vector> dsa(a);
+ int tval = convertToInt(t);
+ auto adata = a.data();
+ int excess = a.capacity() - a.size();
+ auto am = a.get_allocator();
+
+ try {
+ a.push_back(move(t));
+ } catch (...) {
+ ASSERT_TRUE(dsa == a) << "failed strong exception guarantee";
+ throw;
+ }
+
+ ASSERT_TRUE(am == a.get_allocator());
+ if (excess > 0) ASSERT_TRUE(a.data() == adata) << "unnecessary relocation";
+ ASSERT_EQ(dsa.size() + 1, a.size());
+ size_t i = 0;
+ auto it = a.begin();
+ for (; i < dsa.size(); ++i, ++it)
+ ASSERT_EQ(dsa[i], convertToInt(*it));
+ ASSERT_EQ(tval, convertToInt(a.back()));
+}
+
+STL_TEST("23.2.3 Table 100.10", popBack, is_destructible, a) {
+ if (a.empty()) return;
+
+ DataState<Vector> dsa(a);
+ auto am = a.get_allocator();
+
+ a.pop_back();
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(dsa.size() - 1, a.size());
+ size_t i = 0;
+ auto it = a.begin();
+ for (; it != a.end(); ++it, ++i)
+ ASSERT_EQ(dsa[i], convertToInt(*it));
+}
+
+STL_TEST("23.2.3 Table 100.11", operatorBrace, is_destructible, a) {
+ const auto& ca = a;
+ for (int i = 0; i < ca.size(); ++i)
+ ASSERT_TRUE(addressof(ca[i]) == ca.data()+i);
+
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ if (false) {
+ mutate(a[0]);
+ }
+}
+
+STL_TEST("23.2.3 Table 100.12", at, is_destructible, a) {
+ const auto& ca = a;
+ for (int i = 0; i < ca.size(); ++i)
+ ASSERT_TRUE(addressof(ca.at(i)) == ca.data()+i);
+
+ ASSERT_EQ(0, Counter::CountTotalOps);
+
+ try {
+ ca.at(ca.size());
+ FAIL() << "at(size) should have thrown an error";
+ } catch (const std::out_of_range& e) {
+ } catch (...) {
+ FAIL() << "at(size) threw error other than out_of_range";
+ }
+
+ if (false) {
+ mutate(a.at(0));
+ }
+}
+
+STL_TEST("move iterators", moveIterators, is_copy_constructibleAndAssignable) {
+ if (false) {
+ int* i = nullptr;
+ int* j = nullptr;
+
+ auto mfi = make_move_iterator(makeForwardIterator(i));
+ auto mfj = make_move_iterator(makeForwardIterator(j));
+ auto mii = make_move_iterator(makeInputIterator(i));
+ auto mij = make_move_iterator(makeInputIterator(j));
+
+ Vector u1(mfi, mfj);
+ Vector u2(mii, mij);
+
+ u1.insert(u1.begin(), mfi, mfj);
+ u1.insert(u1.begin(), mii, mij);
+
+ u1.assign(mfi, mfj);
+ u1.assign(mii, mij);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Vector-specifics
+
+STL_TEST("23.3.6.4", dataAndCapacity, is_destructible) {
+ // there isn't anything new to test here - data and capacity are used as the
+ // backbone of DataState. The minimal testing we might want to do is already
+ // done in the populate test
+}
+
+STL_TEST("23.3.6.3", reserve, is_move_constructible, a, n) {
+ auto adata = a.data();
+ auto ocap = a.capacity();
+ auto am = a.get_allocator();
+
+ a.reserve(n);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ if (n <= ocap) {
+ ASSERT_EQ(0, Counter::CountTotalOps);
+ ASSERT_TRUE(adata == a.data());
+ } else {
+ ASSERT_TRUE(a.capacity() >= n);
+ ASSERT_LE(Counter::CountTotalOps, 2*a.size()); // move and delete
+ }
+}
+
+STL_TEST("23.3.6.3", lengthError, is_move_constructible) {
+ auto mx = Vector().max_size();
+ auto big = mx+1;
+ if (mx >= big) return; // max_size is the biggest size_type; overflowed
+
+ Vector u;
+ try {
+ u.reserve(big);
+ FAIL() << "reserve(big) should have thrown an error";
+ } catch (const std::length_error& e) {
+ } catch (...) {
+ FAIL() << "reserve(big) threw error other than length_error";
+ }
+}
+
+STL_TEST("23.3.6.3", resize, is_copy_constructible, a, n) {
+ DataState<Vector> dsa(a);
+ int sz = a.size();
+ auto am = a.get_allocator();
+
+ a.resize(n);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(n, a.size());
+
+ if (n <= sz) {
+ for (int i = 0; i < n; ++i) {
+ ASSERT_EQ(dsa[i], convertToInt(a[i]));
+ }
+ } else {
+ for (int i = 0; i < sz; ++i) {
+ ASSERT_EQ(dsa[i], convertToInt(a[i]));
+ }
+ }
+}
+
+STL_TEST("23.3.6.3", resizeT, is_copy_constructibleAndAssignable, a, n, t) {
+ #ifdef USING_STD_VECTOR
+ if (a.data() <= addressof(t) && addressof(t) < a.data() + a.size()) return;
+ #endif
+
+ DataState<Vector> dsa(a);
+ int sz = a.size();
+ auto am = a.get_allocator();
+ const auto& ct = t;
+ int val = convertToInt(t);
+
+ a.resize(n, ct);
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_EQ(n, a.size());
+
+ if (n <= sz) {
+ for (int i = 0; i < n; ++i) {
+ ASSERT_EQ(dsa[i], convertToInt(a[i]));
+ }
+ } else {
+ int i = 0;
+ for ( ; i < sz; ++i) {
+ ASSERT_EQ(dsa[i], convertToInt(a[i]));
+ }
+ for ( ; i < n; ++i) {
+ ASSERT_EQ(val, convertToInt(a[i]));
+ }
+ }
+}
+
+STL_TEST("23.3.6.3", shrinkToFit, is_move_constructible, a) {
+ bool willThrow = Ticker::TicksLeft >= 0;
+
+ a.reserve(a.capacity() * 11);
+
+ auto ocap = a.capacity();
+ DataState<Vector> dsa(a);
+
+ auto am = a.get_allocator();
+
+ try {
+ a.shrink_to_fit();
+ } catch (...) {
+ FAIL() << "shrink_to_fit should swallow errors";
+ }
+
+ ASSERT_TRUE(am == a.get_allocator());
+ ASSERT_TRUE(dsa == a);
+ if (willThrow) {
+ //ASSERT_EQ(ocap, a.capacity()); might shrink in place
+ throw TickException("I swallowed the error");
+ } else {
+ ASSERT_TRUE(a.capacity() == 0 || a.capacity() < ocap) << "Look into this";
+ }
+}
+
+#ifndef USING_STD_VECTOR
+STL_TEST("EBO", ebo, is_destructible) {
+ static_assert(!is_same<Allocator, std::allocator<T>>::value ||
+ sizeof(Vector) == 3 * sizeof(void*),
+ "fbvector has default allocator, but has size != 3*sizeof(void*)");
+}
+
+STL_TEST("relinquish", relinquish, is_destructible, a) {
+ auto sz = a.size();
+ auto cap = a.capacity();
+ auto data = a.data();
+
+ auto guts = relinquish(a);
+
+ ASSERT_EQ(data, guts);
+ ASSERT_TRUE(a.empty());
+ ASSERT_EQ(0, a.capacity());
+
+ auto alloc = a.get_allocator();
+ for (size_t i = 0; i < sz; ++i)
+ std::allocator_traits<decltype(alloc)>::destroy(alloc, guts + i);
+ if (guts != nullptr)
+ std::allocator_traits<decltype(alloc)>::deallocate(alloc, guts, cap);
+}
+
+STL_TEST("attach", attach, is_destructible, a) {
+ DataState<Vector> dsa(a);
+
+ auto sz = a.size();
+ auto cap = a.capacity();
+ auto guts = relinquish(a);
+
+ ASSERT_EQ(a.data(), nullptr);
+ attach(a, guts, sz, cap);
+
+ ASSERT_TRUE(dsa == a);
+}
+
+#endif
+
+// #endif
+
+int main(int argc, char** argv) {
+ testing::InitGoogleTest(&argc, argv);
+ google::ParseCommandLineFlags(&argc, &argv, true);
+
+ return RUN_ALL_TESTS();
+}
+
+#else // GCC 4.7 guard
+
+int main(int argc, char** argv) {
+ return 0;
+}
+
+#endif // GCC 4.7 guard
+