X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=folly%2Fsmall_vector.h;h=1a8c542b33d83431c67645f9aab3aed2fceddad0;hb=46e3ed2921bd4e5de81fbc36fcb6a82d30d1499f;hp=911f0630fc936439c31bfffbcf96c264aef9b649;hpb=83da0042ba4f4c6622bde2002a0bd968de1ed701;p=folly.git diff --git a/folly/small_vector.h b/folly/small_vector.h index 911f0630..1a8c542b 100644 --- a/folly/small_vector.h +++ b/folly/small_vector.h @@ -1,5 +1,5 @@ /* - * Copyright 2012 Facebook, Inc. + * Copyright 2014 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,8 @@ */ /* - * For high-level documentation and usage examples see folly/doc/small_vector.md + * For high-level documentation and usage examples see + * folly/docs/small_vector.md * * @author Jordan DeLong */ @@ -45,18 +46,22 @@ #include #include -#include "folly/Malloc.h" +#include -#if defined(__GNUC__) && defined(__x86_64__) +#if defined(__GNUC__) && FOLLY_X64 # include "folly/SmallLocks.h" -# define FB_PACKED __attribute__((packed)) +# define FB_PACK_ATTR FOLLY_PACK_ATTR +# define FB_PACK_PUSH FOLLY_PACK_PUSH +# define FB_PACK_POP FOLLY_PACK_POP #else -# define FB_PACKED +# define FB_PACK_ATTR +# define FB_PACK_PUSH +# define FB_PACK_POP #endif -#ifdef FOLLY_HAVE_MALLOC_SIZE +#if FOLLY_HAVE_MALLOC_SIZE extern "C" std::size_t malloc_size(const void*); -# ifndef FOLLY_HAVE_MALLOC_USABLE_SIZE +# if !FOLLY_HAVE_MALLOC_USABLE_SIZE # define malloc_usable_size malloc_size # endif # ifndef malloc_usable_size @@ -64,6 +69,10 @@ # endif #endif +// Ignore shadowing warnings within this file, so includers can use -Wshadow. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" + namespace folly { ////////////////////////////////////////////////////////////////////// @@ -78,21 +87,6 @@ namespace small_vector_policy { */ struct NoHeap; -/* - * Passing this policy will cause small_vector to provide lock() and - * unlock() functions using a 1-bit spin lock in the size value. - * - * Note that this is intended for a fairly specialized (although - * strangely common at facebook) use case, where you have billions of - * vectors in memory where none of them are "hot" and most of them are - * small. This allows you to get fine-grained locks without spending - * a lot of memory on mutexes (the alternative of a large hashtable of - * locks leads to extra cache misses in the lookup path). - * - * __x86_64__ only. - */ -struct OneBitMutex; - ////////////////////////////////////////////////////////////////////// } // small_vector_policy @@ -112,7 +106,7 @@ namespace detail { */ template typename std::enable_if< - !boost::has_trivial_copy::value + !FOLLY_IS_TRIVIALLY_COPYABLE(T) >::type moveToUninitialized(T* first, T* last, T* out) { auto const count = last - first; @@ -134,11 +128,10 @@ namespace detail { } } - // Specialization for trivially copyable types. (TODO: change to - // std::is_trivially_copyable when that works.) + // Specialization for trivially copyable types. template typename std::enable_if< - boost::has_trivial_copy::value + FOLLY_IS_TRIVIALLY_COPYABLE(T) >::type moveToUninitialized(T* first, T* last, T* out) { std::memmove(out, first, (last - first) * sizeof *first); @@ -152,7 +145,7 @@ namespace detail { */ template typename std::enable_if< - !boost::has_trivial_copy::value + !FOLLY_IS_TRIVIALLY_COPYABLE(T) >::type moveObjectsRight(T* first, T* lastConstructed, T* realLast) { if (lastConstructed == realLast) { @@ -191,7 +184,7 @@ namespace detail { // change to std::is_trivially_copyable when that works.) template typename std::enable_if< - boost::has_trivial_copy::value + FOLLY_IS_TRIVIALLY_COPYABLE(T) >::type moveObjectsRight(T* first, T* lastConstructed, T* realLast) { std::move_backward(first, lastConstructed, realLast); @@ -205,7 +198,7 @@ namespace detail { void populateMemForward(T* mem, std::size_t n, Function const& op) { std::size_t idx = 0; try { - for (int i = 0; i < n; ++i) { + for (size_t i = 0; i < n; ++i) { op(&mem[idx]); ++idx; } @@ -224,7 +217,7 @@ namespace detail { IntegralSizePolicy() : size_(0) {} protected: - std::size_t policyMaxSize() const { + static constexpr std::size_t policyMaxSize() { return SizeType(~kExternMask); } @@ -264,65 +257,6 @@ namespace detail { SizeType size_; }; -#ifdef __x86_64__ - template - struct OneBitMutexImpl { - typedef SizeType InternalSizeType; - - OneBitMutexImpl() { psl_.init(); } - - void lock() const { psl_.lock(); } - void unlock() const { psl_.unlock(); } - bool try_lock() const { return psl_.try_lock(); } - - protected: - static bool const kShouldUseHeap = ShouldUseHeap; - - std::size_t policyMaxSize() const { - return SizeType(~(SizeType(1) << kLockBit | kExternMask)); - } - - std::size_t doSize() const { - return psl_.getData() & ~kExternMask; - } - - std::size_t isExtern() const { - return psl_.getData() & kExternMask; - } - - void setExtern(bool b) { - if (b) { - setSize(SizeType(doSize()) | kExternMask); - } else { - setSize(SizeType(doSize()) & ~kExternMask); - } - } - - void setSize(std::size_t sz) { - assert(sz < (std::size_t(1) << kLockBit)); - psl_.setData((kExternMask & psl_.getData()) | SizeType(sz)); - } - - void swapSizePolicy(OneBitMutexImpl& o) { - std::swap(psl_, o.psl_); - } - - private: - static SizeType const kLockBit = sizeof(SizeType) * 8 - 1; - static SizeType const kExternMask = - kShouldUseHeap ? SizeType(1) << (sizeof(SizeType) * 8 - 2) - : 0; - - PicoSpinLock psl_; - }; -#else - template - struct OneBitMutexImpl { - static_assert(std::is_same::value, - "OneBitMutex only works on x86-64"); - }; -#endif - /* * If you're just trying to use this class, ignore everything about * this next small_vector_base class thing. @@ -362,17 +296,6 @@ namespace detail { mpl::size::value == 1, "Multiple size types specified in small_vector<>"); - /* - * Figure out if we're supposed to supply a one-bit mutex. :) - */ - typedef typename mpl::count< - PolicyList,small_vector_policy::OneBitMutex - >::type HasMutex; - - static_assert(HasMutex::value == 0 || HasMutex::value == 1, - "Multiple copies of small_vector_policy::OneBitMutex " - "supplied; this is probably a mistake"); - /* * Determine whether we should allow spilling to the heap or not. */ @@ -387,11 +310,8 @@ namespace detail { /* * Make the real policy base classes. */ - typedef typename mpl::if_< - HasMutex, - OneBitMutexImpl, - IntegralSizePolicy - >::type ActualSizePolicy; + typedef IntegralSizePolicy + ActualSizePolicy; /* * Now inherit from them all. This is done in such a convoluted @@ -414,7 +334,8 @@ namespace detail { } template T* pointerFlagClear(T* p) { - return reinterpret_cast(reinterpret_cast(p) & ~1); + return reinterpret_cast( + reinterpret_cast(p) & ~uintptr_t(1)); } inline void* shiftPointer(void* p, size_t sizeBytes) { return static_cast(p) + sizeBytes; @@ -422,7 +343,7 @@ namespace detail { } ////////////////////////////////////////////////////////////////////// - +FB_PACK_PUSH templateisExtern()) u.freeHeap(); + throw; + } + this->setSize(n); } small_vector(small_vector&& o) { - *this = std::move(o); + if (o.isExtern()) { + swap(o); + } else { + std::uninitialized_copy(std::make_move_iterator(o.begin()), + std::make_move_iterator(o.end()), + begin()); + this->setSize(o.size()); + } } small_vector(std::initializer_list il) { @@ -503,16 +439,11 @@ public: } small_vector& operator=(small_vector&& o) { + // TODO: optimization: + // if both are internal, use move assignment where possible + if (this == &o) return *this; clear(); - if (!o.isExtern()) { - makeSize(o.size()); - for (std::size_t i = 0; i < o.size(); ++i) { - new (data() + i) value_type(std::move(o[i])); - } - this->setSize(o.size()); - } else { - swap(o); - } + swap(o); return *this; } @@ -524,9 +455,9 @@ public: return std::lexicographical_compare(begin(), end(), o.begin(), o.end()); } - size_type max_size() const { + static constexpr size_type max_size() { return !BaseType::kShouldUseHeap ? MaxInline - : this->policyMaxSize(); + : BaseType::policyMaxSize(); } size_type size() const { return this->doSize(); } @@ -587,19 +518,23 @@ public: } size_type i = oldSmall.size(); + const size_type ci = i; try { for (; i < oldLarge.size(); ++i) { - new (&oldSmall[i]) value_type(std::move(oldLarge[i])); + auto addr = oldSmall.begin() + i; + new (addr) value_type(std::move(oldLarge[i])); oldLarge[i].~value_type(); } } catch (...) { + oldSmall.setSize(i); for (; i < oldLarge.size(); ++i) { oldLarge[i].~value_type(); } - oldLarge.setSize(oldSmall.size()); + oldLarge.setSize(ci); throw; } - this->swapSizePolicy(o); + oldSmall.setSize(i); + oldLarge.setSize(ci); return; } @@ -802,8 +737,9 @@ public: } iterator erase(const_iterator q1, const_iterator q2) { + if (q1 == q2) return unconst(q1); std::move(unconst(q2), end(), unconst(q1)); - for (auto it = q1; it != end(); ++it) { + for (auto it = (end() - std::distance(q1, q2)); it != end(); ++it) { it->~value_type(); } this->setSize(size() - (q2 - q1)); @@ -961,7 +897,7 @@ private: doConstruct(n, val); } - void makeSize(size_type size, value_type* v = NULL) { + void makeSize(size_type size, value_type* v = nullptr) { makeSize(size, v, size - 1); } @@ -1004,12 +940,12 @@ private: detail::shiftPointer(newh, kHeapifyCapacitySize) : newh); - if (v != NULL) { + if (v != nullptr) { // move new element try { new (&newp[pos]) value_type(std::move(*v)); } catch (...) { - std::free(newh); + free(newh); throw; } @@ -1018,7 +954,7 @@ private: detail::moveToUninitialized(begin(), begin() + pos, newp); } catch (...) { newp[pos].~value_type(); - std::free(newh); + free(newh); throw; } @@ -1031,7 +967,7 @@ private: for (size_type i = 0; i <= pos; ++i) { newp[i].~value_type(); } - std::free(newh); + free(newh); throw; } } else { @@ -1039,7 +975,7 @@ private: try { detail::moveToUninitialized(begin(), end(), newp); } catch (...) { - std::free(newh); + free(newh); throw; } } @@ -1081,7 +1017,7 @@ private: InternalSizeType* getCapacity() { return &capacity_; } - } FB_PACKED; + } FB_PACK_ATTR; struct HeapPtr { // Lower order bit of heap_ is used as flag to indicate whether capacity is @@ -1093,9 +1029,9 @@ private: return static_cast( detail::pointerFlagClear(heap_)); } - } FB_PACKED; + } FB_PACK_ATTR; -#if defined(__x86_64_) +#if FOLLY_X64 typedef unsigned char InlineStorageType[sizeof(value_type) * MaxInline]; #else typedef typename std::aligned_storage< @@ -1160,10 +1096,11 @@ private: void freeHeap() { auto vp = detail::pointerFlagClear(pdata_.heap_); - std::free(vp); + free(vp); } - } FB_PACKED u; -} FB_PACKED; + } FB_PACK_ATTR u; +} FB_PACK_ATTR; +FB_PACK_POP ////////////////////////////////////////////////////////////////////// @@ -1179,8 +1116,12 @@ void swap(small_vector& a, } -#ifdef FB_PACKED -# undef FB_PACKED +#pragma GCC diagnostic pop + +#ifdef FB_PACK_ATTR +# undef FB_PACK_ATTR +# undef FB_PACK_PUSH +# undef FB_PACK_POP #endif #endif