X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=include%2Fllvm%2FADT%2FDenseMap.h;h=6ee1960b5c82395e9d61f69aa491ac12af671dbf;hb=HEAD;hp=4b815f697833e656e1375a0a9f20787a1c0e769b;hpb=b6bbe6320b4a60b7399eea08426aec834701d514;p=oota-llvm.git diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h index 4b815f69783..6ee1960b5c8 100644 --- a/include/llvm/ADT/DenseMap.h +++ b/include/llvm/ADT/DenseMap.h @@ -14,205 +14,187 @@ #ifndef LLVM_ADT_DENSEMAP_H #define LLVM_ADT_DENSEMAP_H -#include "llvm/Support/DataTypes.h" +#include "llvm/ADT/DenseMapInfo.h" +#include "llvm/ADT/EpochTracker.h" +#include "llvm/Support/AlignOf.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/PointerLikeTypeTraits.h" +#include "llvm/Support/type_traits.h" +#include #include +#include +#include +#include +#include +#include #include namespace llvm { - -template -struct DenseMapInfo { - //static inline T getEmptyKey(); - //static inline T getTombstoneKey(); - //static unsigned getHashValue(const T &Val); - //static bool isEqual(const T &LHS, const T &RHS); - //static bool isPod() -}; - -// Provide DenseMapInfo for all pointers. -template -struct DenseMapInfo { - static inline T* getEmptyKey() { return reinterpret_cast(-1); } - static inline T* getTombstoneKey() { return reinterpret_cast(-2); } - static unsigned getHashValue(const T *PtrVal) { - return (unsigned((uintptr_t)PtrVal) >> 4) ^ - (unsigned((uintptr_t)PtrVal) >> 9); - } - static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; } - static bool isPod() { return true; } -}; - -// Provide DenseMapInfo for unsigned ints. -template<> struct DenseMapInfo { - static inline uint32_t getEmptyKey() { return ~0; } - static inline uint32_t getTombstoneKey() { return ~0 - 1; } - static unsigned getHashValue(const uint32_t& Val) { return Val * 37; } - static bool isPod() { return true; } - static bool isEqual(const uint32_t& LHS, const uint32_t& RHS) { - return LHS == RHS; - } -}; -// Provide DenseMapInfo for all pairs whose members have info. -template -struct DenseMapInfo > { - typedef std::pair Pair; - typedef DenseMapInfo FirstInfo; - typedef DenseMapInfo SecondInfo; - - static inline Pair getEmptyKey() { - return std::make_pair(FirstInfo::getEmptyKey(), - SecondInfo::getEmptyKey()); - } - static inline Pair getTombstoneKey() { - return std::make_pair(FirstInfo::getTombstoneKey(), - SecondInfo::getEmptyKey()); - } - static unsigned getHashValue(const Pair& PairVal) { - uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32 - | (uint64_t)SecondInfo::getHashValue(PairVal.second); - key += ~(key << 32); - key ^= (key >> 22); - key += ~(key << 13); - key ^= (key >> 8); - key += (key << 3); - key ^= (key >> 15); - key += ~(key << 27); - key ^= (key >> 31); - return (unsigned)key; - } - static bool isEqual(const Pair& LHS, const Pair& RHS) { return LHS == RHS; } - static bool isPod() { return FirstInfo::isPod() && SecondInfo::isPod(); } +namespace detail { +// We extend a pair to allow users to override the bucket type with their own +// implementation without requiring two members. +template +struct DenseMapPair : public std::pair { + KeyT &getFirst() { return std::pair::first; } + const KeyT &getFirst() const { return std::pair::first; } + ValueT &getSecond() { return std::pair::second; } + const ValueT &getSecond() const { return std::pair::second; } }; +} -template, - typename ValueInfoT = DenseMapInfo > +template < + typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo, + typename Bucket = detail::DenseMapPair, bool IsConst = false> class DenseMapIterator; -template, - typename ValueInfoT = DenseMapInfo > -class DenseMapConstIterator; - -template, - typename ValueInfoT = DenseMapInfo > -class DenseMap { - typedef std::pair BucketT; - unsigned NumBuckets; - BucketT *Buckets; - - unsigned NumEntries; - unsigned NumTombstones; + +template +class DenseMapBase : public DebugEpochBase { public: + typedef unsigned size_type; typedef KeyT key_type; typedef ValueT mapped_type; typedef BucketT value_type; - - DenseMap(const DenseMap& other) { - NumBuckets = 0; - CopyFrom(other); - } - - explicit DenseMap(unsigned NumInitBuckets = 64) { - init(NumInitBuckets); - } - - ~DenseMap() { - const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); - for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) { - if (!KeyInfoT::isEqual(P->first, EmptyKey) && - !KeyInfoT::isEqual(P->first, TombstoneKey)) - P->second.~ValueT(); - P->first.~KeyT(); - } - operator delete(Buckets); - } - - typedef DenseMapIterator iterator; - typedef DenseMapConstIterator const_iterator; + + typedef DenseMapIterator iterator; + typedef DenseMapIterator + const_iterator; inline iterator begin() { - return iterator(Buckets, Buckets+NumBuckets); + // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets(). + return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this); } inline iterator end() { - return iterator(Buckets+NumBuckets, Buckets+NumBuckets); + return iterator(getBucketsEnd(), getBucketsEnd(), *this, true); } inline const_iterator begin() const { - return const_iterator(Buckets, Buckets+NumBuckets); + return empty() ? end() + : const_iterator(getBuckets(), getBucketsEnd(), *this); } inline const_iterator end() const { - return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets); + return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true); + } + + bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { + return getNumEntries() == 0; } - - bool empty() const { return NumEntries == 0; } - unsigned size() const { return NumEntries; } + unsigned size() const { return getNumEntries(); } /// Grow the densemap so that it has at least Size buckets. Does not shrink - void resize(size_t Size) { grow(Size); } - + void resize(size_type Size) { + incrementEpoch(); + if (Size > getNumBuckets()) + grow(Size); + } + void clear() { + incrementEpoch(); + if (getNumEntries() == 0 && getNumTombstones() == 0) return; + // If the capacity of the array is huge, and the # elements used is small, // shrink the array. - if (NumEntries * 4 < NumBuckets && NumBuckets > 64) { + if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) { shrink_and_clear(); return; } - + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); - for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) { - if (!KeyInfoT::isEqual(P->first, EmptyKey)) { - if (!KeyInfoT::isEqual(P->first, TombstoneKey)) { - P->second.~ValueT(); + unsigned NumEntries = getNumEntries(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) { + if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { + P->getSecond().~ValueT(); --NumEntries; } - P->first = EmptyKey; + P->getFirst() = EmptyKey; } } assert(NumEntries == 0 && "Node count imbalance!"); - NumTombstones = 0; + setNumEntries(0); + setNumTombstones(0); } - /// count - Return true if the specified key is in the map. - bool count(const KeyT &Val) const { - BucketT *TheBucket; - return LookupBucketFor(Val, TheBucket); + /// Return 1 if the specified key is in the map, 0 otherwise. + size_type count(const KeyT &Val) const { + const BucketT *TheBucket; + return LookupBucketFor(Val, TheBucket) ? 1 : 0; } - + iterator find(const KeyT &Val) { BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return iterator(TheBucket, Buckets+NumBuckets); + return iterator(TheBucket, getBucketsEnd(), *this, true); return end(); } const_iterator find(const KeyT &Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return const_iterator(TheBucket, getBucketsEnd(), *this, true); + return end(); + } + + /// Alternate version of find() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template + iterator find_as(const LookupKeyT &Val) { BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return const_iterator(TheBucket, Buckets+NumBuckets); + return iterator(TheBucket, getBucketsEnd(), *this, true); + return end(); + } + template + const_iterator find_as(const LookupKeyT &Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return const_iterator(TheBucket, getBucketsEnd(), *this, true); return end(); } - + /// lookup - Return the entry for the specified key, or a default /// constructed value if no such entry exists. ValueT lookup(const KeyT &Val) const { - BucketT *TheBucket; + const BucketT *TheBucket; if (LookupBucketFor(Val, TheBucket)) - return TheBucket->second; + return TheBucket->getSecond(); return ValueT(); } + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. std::pair insert(const std::pair &KV) { BucketT *TheBucket; if (LookupBucketFor(KV.first, TheBucket)) - return std::make_pair(iterator(TheBucket, Buckets+NumBuckets), + return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), false); // Already in map. - + // Otherwise, insert the new element. TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket); - return std::make_pair(iterator(TheBucket, Buckets+NumBuckets), + return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), + true); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. + std::pair insert(std::pair &&KV) { + BucketT *TheBucket; + if (LookupBucketFor(KV.first, TheBucket)) + return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = InsertIntoBucket(std::move(KV.first), + std::move(KV.second), + TheBucket); + return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true), true); } - + /// insert - Range insertion of pairs. template void insert(InputIt I, InputIt E) { @@ -220,78 +202,230 @@ public: insert(*I); } - + bool erase(const KeyT &Val) { BucketT *TheBucket; if (!LookupBucketFor(Val, TheBucket)) return false; // not in map. - TheBucket->second.~ValueT(); - TheBucket->first = getTombstoneKey(); - --NumEntries; - ++NumTombstones; + TheBucket->getSecond().~ValueT(); + TheBucket->getFirst() = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); return true; } - bool erase(iterator I) { + void erase(iterator I) { BucketT *TheBucket = &*I; - TheBucket->second.~ValueT(); - TheBucket->first = getTombstoneKey(); - --NumEntries; - ++NumTombstones; - return true; + TheBucket->getSecond().~ValueT(); + TheBucket->getFirst() = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); } value_type& FindAndConstruct(const KeyT &Key) { BucketT *TheBucket; if (LookupBucketFor(Key, TheBucket)) return *TheBucket; - + return *InsertIntoBucket(Key, ValueT(), TheBucket); } - + ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; } - - DenseMap& operator=(const DenseMap& other) { - CopyFrom(other); - return *this; + + value_type& FindAndConstruct(KeyT &&Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket); } - -private: - void CopyFrom(const DenseMap& other) { - if (NumBuckets != 0 && (!KeyInfoT::isPod() || !ValueInfoT::isPod())) { - const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); - for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) { - if (!KeyInfoT::isEqual(P->first, EmptyKey) && - !KeyInfoT::isEqual(P->first, TombstoneKey)) - P->second.~ValueT(); - P->first.~KeyT(); + + ValueT &operator[](KeyT &&Key) { + return FindAndConstruct(std::move(Key)).second; + } + + /// isPointerIntoBucketsArray - Return true if the specified pointer points + /// somewhere into the DenseMap's array of buckets (i.e. either to a key or + /// value in the DenseMap). + bool isPointerIntoBucketsArray(const void *Ptr) const { + return Ptr >= getBuckets() && Ptr < getBucketsEnd(); + } + + /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets + /// array. In conjunction with the previous method, this can be used to + /// determine whether an insertion caused the DenseMap to reallocate. + const void *getPointerIntoBucketsArray() const { return getBuckets(); } + +protected: + DenseMapBase() = default; + + void destroyAll() { + if (getNumBuckets() == 0) // Nothing to do. + return; + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) + P->getSecond().~ValueT(); + P->getFirst().~KeyT(); + } + } + + void initEmpty() { + setNumEntries(0); + setNumTombstones(0); + + assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && + "# initial buckets must be a power of two!"); + const KeyT EmptyKey = getEmptyKey(); + for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) + ::new (&B->getFirst()) KeyT(EmptyKey); + } + + void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { + initEmpty(); + + // Insert all the old elements. + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { + if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) { + // Insert the key/value into the new table. + BucketT *DestBucket; + bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket); + (void)FoundVal; // silence warning. + assert(!FoundVal && "Key already in new map?"); + DestBucket->getFirst() = std::move(B->getFirst()); + ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond())); + incrementNumEntries(); + + // Free the value. + B->getSecond().~ValueT(); } + B->getFirst().~KeyT(); } - - NumEntries = other.NumEntries; - NumTombstones = other.NumTombstones; - - if (NumBuckets) - operator delete(Buckets); - Buckets = static_cast(operator new(sizeof(BucketT) * - other.NumBuckets)); - - if (KeyInfoT::isPod() && ValueInfoT::isPod()) - memcpy(Buckets, other.Buckets, other.NumBuckets * sizeof(BucketT)); + } + + template + void copyFrom( + const DenseMapBase &other) { + assert(&other != this); + assert(getNumBuckets() == other.getNumBuckets()); + + setNumEntries(other.getNumEntries()); + setNumTombstones(other.getNumTombstones()); + + if (isPodLike::value && isPodLike::value) + memcpy(getBuckets(), other.getBuckets(), + getNumBuckets() * sizeof(BucketT)); else - for (size_t i = 0; i < other.NumBuckets; ++i) { - new (&Buckets[i].first) KeyT(other.Buckets[i].first); - if (!KeyInfoT::isEqual(Buckets[i].first, getEmptyKey()) && - !KeyInfoT::isEqual(Buckets[i].first, getTombstoneKey())) - new (&Buckets[i].second) ValueT(other.Buckets[i].second); + for (size_t i = 0; i < getNumBuckets(); ++i) { + ::new (&getBuckets()[i].getFirst()) + KeyT(other.getBuckets()[i].getFirst()); + if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) && + !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey())) + ::new (&getBuckets()[i].getSecond()) + ValueT(other.getBuckets()[i].getSecond()); } - NumBuckets = other.NumBuckets; } - + + static unsigned getHashValue(const KeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + template + static unsigned getHashValue(const LookupKeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + static const KeyT getEmptyKey() { + return KeyInfoT::getEmptyKey(); + } + static const KeyT getTombstoneKey() { + return KeyInfoT::getTombstoneKey(); + } + +private: + unsigned getNumEntries() const { + return static_cast(this)->getNumEntries(); + } + void setNumEntries(unsigned Num) { + static_cast(this)->setNumEntries(Num); + } + void incrementNumEntries() { + setNumEntries(getNumEntries() + 1); + } + void decrementNumEntries() { + setNumEntries(getNumEntries() - 1); + } + unsigned getNumTombstones() const { + return static_cast(this)->getNumTombstones(); + } + void setNumTombstones(unsigned Num) { + static_cast(this)->setNumTombstones(Num); + } + void incrementNumTombstones() { + setNumTombstones(getNumTombstones() + 1); + } + void decrementNumTombstones() { + setNumTombstones(getNumTombstones() - 1); + } + const BucketT *getBuckets() const { + return static_cast(this)->getBuckets(); + } + BucketT *getBuckets() { + return static_cast(this)->getBuckets(); + } + unsigned getNumBuckets() const { + return static_cast(this)->getNumBuckets(); + } + BucketT *getBucketsEnd() { + return getBuckets() + getNumBuckets(); + } + const BucketT *getBucketsEnd() const { + return getBuckets() + getNumBuckets(); + } + + void grow(unsigned AtLeast) { + static_cast(this)->grow(AtLeast); + } + + void shrink_and_clear() { + static_cast(this)->shrink_and_clear(); + } + + BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value, BucketT *TheBucket) { + TheBucket = InsertIntoBucketImpl(Key, TheBucket); + + TheBucket->getFirst() = Key; + ::new (&TheBucket->getSecond()) ValueT(Value); + return TheBucket; + } + + BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value, + BucketT *TheBucket) { + TheBucket = InsertIntoBucketImpl(Key, TheBucket); + + TheBucket->getFirst() = Key; + ::new (&TheBucket->getSecond()) ValueT(std::move(Value)); + return TheBucket; + } + + BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) { + TheBucket = InsertIntoBucketImpl(Key, TheBucket); + + TheBucket->getFirst() = std::move(Key); + ::new (&TheBucket->getSecond()) ValueT(std::move(Value)); + return TheBucket; + } + + BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) { + incrementEpoch(); + // If the load of the hash table is more than 3/4, or if fewer than 1/8 of // the buckets are empty (meaning that many are filled with tombstones), // grow the table. @@ -301,224 +435,639 @@ private: // probe almost the entire table until it found the empty bucket. If the // table completely filled with tombstones, no lookup would ever succeed, // causing infinite loops in lookup. - if (NumEntries*4 >= NumBuckets*3 || - NumBuckets-(NumEntries+NumTombstones) < NumBuckets/8) { + unsigned NewNumEntries = getNumEntries() + 1; + unsigned NumBuckets = getNumBuckets(); + if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) { this->grow(NumBuckets * 2); LookupBucketFor(Key, TheBucket); + NumBuckets = getNumBuckets(); + } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <= + NumBuckets/8)) { + this->grow(NumBuckets); + LookupBucketFor(Key, TheBucket); } - ++NumEntries; - + assert(TheBucket); + + // Only update the state after we've grown our bucket space appropriately + // so that when growing buckets we have self-consistent entry count. + incrementNumEntries(); + // If we are writing over a tombstone, remember this. - if (!KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) - --NumTombstones; - - TheBucket->first = Key; - new (&TheBucket->second) ValueT(Value); + const KeyT EmptyKey = getEmptyKey(); + if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey)) + decrementNumTombstones(); + return TheBucket; } - static unsigned getHashValue(const KeyT &Val) { - return KeyInfoT::getHashValue(Val); - } - static const KeyT getEmptyKey() { - return KeyInfoT::getEmptyKey(); - } - static const KeyT getTombstoneKey() { - return KeyInfoT::getTombstoneKey(); - } - /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in /// FoundBucket. If the bucket contains the key and a value, this returns /// true, otherwise it returns a bucket with an empty marker or tombstone and /// returns false. - bool LookupBucketFor(const KeyT &Val, BucketT *&FoundBucket) const { - unsigned BucketNo = getHashValue(Val); - unsigned ProbeAmt = 1; - BucketT *BucketsPtr = Buckets; - + template + bool LookupBucketFor(const LookupKeyT &Val, + const BucketT *&FoundBucket) const { + const BucketT *BucketsPtr = getBuckets(); + const unsigned NumBuckets = getNumBuckets(); + + if (NumBuckets == 0) { + FoundBucket = nullptr; + return false; + } + // FoundTombstone - Keep track of whether we find a tombstone while probing. - BucketT *FoundTombstone = 0; + const BucketT *FoundTombstone = nullptr; const KeyT EmptyKey = getEmptyKey(); const KeyT TombstoneKey = getTombstoneKey(); assert(!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && "Empty/Tombstone value shouldn't be inserted into map!"); - + + unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); + unsigned ProbeAmt = 1; while (1) { - BucketT *ThisBucket = BucketsPtr + (BucketNo & (NumBuckets-1)); + const BucketT *ThisBucket = BucketsPtr + BucketNo; // Found Val's bucket? If so, return it. - if (KeyInfoT::isEqual(ThisBucket->first, Val)) { + if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) { FoundBucket = ThisBucket; return true; } - + // If we found an empty bucket, the key doesn't exist in the set. // Insert it and return the default value. - if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) { + if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) { // If we've already seen a tombstone while probing, fill it in instead // of the empty bucket we eventually probed to. - if (FoundTombstone) ThisBucket = FoundTombstone; FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; return false; } - + // If this is a tombstone, remember it. If Val ends up not in the map, we // prefer to return it than something that would require more probing. - if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone) + if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) && + !FoundTombstone) FoundTombstone = ThisBucket; // Remember the first tombstone found. - + // Otherwise, it's a hash collision or a tombstone, continue quadratic // probing. BucketNo += ProbeAmt++; + BucketNo &= (NumBuckets-1); + } + } + + template + bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { + const BucketT *ConstFoundBucket; + bool Result = const_cast(this) + ->LookupBucketFor(Val, ConstFoundBucket); + FoundBucket = const_cast(ConstFoundBucket); + return Result; + } + +public: + /// Return the approximate size (in bytes) of the actual map. + /// This is just the raw memory used by DenseMap. + /// If entries are pointers to objects, the size of the referenced objects + /// are not included. + size_t getMemorySize() const { + return getNumBuckets() * sizeof(BucketT); + } +}; + +template , + typename BucketT = detail::DenseMapPair> +class DenseMap : public DenseMapBase, + KeyT, ValueT, KeyInfoT, BucketT> { + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + typedef DenseMapBase BaseT; + friend class DenseMapBase; + + BucketT *Buckets; + unsigned NumEntries; + unsigned NumTombstones; + unsigned NumBuckets; + +public: + explicit DenseMap(unsigned NumInitBuckets = 0) { + init(NumInitBuckets); + } + + DenseMap(const DenseMap &other) : BaseT() { + init(0); + copyFrom(other); + } + + DenseMap(DenseMap &&other) : BaseT() { + init(0); + swap(other); + } + + template + DenseMap(const InputIt &I, const InputIt &E) { + init(NextPowerOf2(std::distance(I, E))); + this->insert(I, E); + } + + ~DenseMap() { + this->destroyAll(); + operator delete(Buckets); + } + + void swap(DenseMap& RHS) { + this->incrementEpoch(); + RHS.incrementEpoch(); + std::swap(Buckets, RHS.Buckets); + std::swap(NumEntries, RHS.NumEntries); + std::swap(NumTombstones, RHS.NumTombstones); + std::swap(NumBuckets, RHS.NumBuckets); + } + + DenseMap& operator=(const DenseMap& other) { + if (&other != this) + copyFrom(other); + return *this; + } + + DenseMap& operator=(DenseMap &&other) { + this->destroyAll(); + operator delete(Buckets); + init(0); + swap(other); + return *this; + } + + void copyFrom(const DenseMap& other) { + this->destroyAll(); + operator delete(Buckets); + if (allocateBuckets(other.NumBuckets)) { + this->BaseT::copyFrom(other); + } else { + NumEntries = 0; + NumTombstones = 0; } } void init(unsigned InitBuckets) { - NumEntries = 0; - NumTombstones = 0; - NumBuckets = InitBuckets; - assert(InitBuckets && (InitBuckets & (InitBuckets-1)) == 0 && - "# initial buckets must be a power of two!"); - Buckets = static_cast(operator new(sizeof(BucketT)*InitBuckets)); - // Initialize all the keys to EmptyKey. - const KeyT EmptyKey = getEmptyKey(); - for (unsigned i = 0; i != InitBuckets; ++i) - new (&Buckets[i].first) KeyT(EmptyKey); + if (allocateBuckets(InitBuckets)) { + this->BaseT::initEmpty(); + } else { + NumEntries = 0; + NumTombstones = 0; + } } - + void grow(unsigned AtLeast) { unsigned OldNumBuckets = NumBuckets; BucketT *OldBuckets = Buckets; - - // Double the number of buckets. - while (NumBuckets <= AtLeast) - NumBuckets <<= 1; - NumTombstones = 0; - Buckets = static_cast(operator new(sizeof(BucketT)*NumBuckets)); - - // Initialize all the keys to EmptyKey. - const KeyT EmptyKey = getEmptyKey(); - for (unsigned i = 0, e = NumBuckets; i != e; ++i) - new (&Buckets[i].first) KeyT(EmptyKey); - // Insert all the old elements. - const KeyT TombstoneKey = getTombstoneKey(); - for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) { - if (!KeyInfoT::isEqual(B->first, EmptyKey) && - !KeyInfoT::isEqual(B->first, TombstoneKey)) { - // Insert the key/value into the new table. - BucketT *DestBucket; - bool FoundVal = LookupBucketFor(B->first, DestBucket); - FoundVal = FoundVal; // silence warning. - assert(!FoundVal && "Key already in new map?"); - DestBucket->first = B->first; - new (&DestBucket->second) ValueT(B->second); - - // Free the value. - B->second.~ValueT(); - } - B->first.~KeyT(); + allocateBuckets(std::max(64, static_cast(NextPowerOf2(AtLeast-1)))); + assert(Buckets); + if (!OldBuckets) { + this->BaseT::initEmpty(); + return; } - + + this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); + // Free the old table. operator delete(OldBuckets); } - + void shrink_and_clear() { - unsigned OldNumBuckets = NumBuckets; - BucketT *OldBuckets = Buckets; - + unsigned OldNumEntries = NumEntries; + this->destroyAll(); + // Reduce the number of buckets. - NumBuckets = NumEntries > 32 ? 1 << (Log2_32_Ceil(NumEntries) + 1) - : 64; - NumTombstones = 0; - Buckets = static_cast(operator new(sizeof(BucketT)*NumBuckets)); + unsigned NewNumBuckets = 0; + if (OldNumEntries) + NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); + if (NewNumBuckets == NumBuckets) { + this->BaseT::initEmpty(); + return; + } - // Initialize all the keys to EmptyKey. - const KeyT EmptyKey = getEmptyKey(); - for (unsigned i = 0, e = NumBuckets; i != e; ++i) - new (&Buckets[i].first) KeyT(EmptyKey); + operator delete(Buckets); + init(NewNumBuckets); + } - // Free the old buckets. - const KeyT TombstoneKey = getTombstoneKey(); - for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) { - if (!KeyInfoT::isEqual(B->first, EmptyKey) && - !KeyInfoT::isEqual(B->first, TombstoneKey)) { - // Free the value. - B->second.~ValueT(); +private: + unsigned getNumEntries() const { + return NumEntries; + } + void setNumEntries(unsigned Num) { + NumEntries = Num; + } + + unsigned getNumTombstones() const { + return NumTombstones; + } + void setNumTombstones(unsigned Num) { + NumTombstones = Num; + } + + BucketT *getBuckets() const { + return Buckets; + } + + unsigned getNumBuckets() const { + return NumBuckets; + } + + bool allocateBuckets(unsigned Num) { + NumBuckets = Num; + if (NumBuckets == 0) { + Buckets = nullptr; + return false; + } + + Buckets = static_cast(operator new(sizeof(BucketT) * NumBuckets)); + return true; + } +}; + +template , + typename BucketT = detail::DenseMapPair> +class SmallDenseMap + : public DenseMapBase< + SmallDenseMap, KeyT, + ValueT, KeyInfoT, BucketT> { + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + typedef DenseMapBase BaseT; + friend class DenseMapBase; + + unsigned Small : 1; + unsigned NumEntries : 31; + unsigned NumTombstones; + + struct LargeRep { + BucketT *Buckets; + unsigned NumBuckets; + }; + + /// A "union" of an inline bucket array and the struct representing + /// a large bucket. This union will be discriminated by the 'Small' bit. + AlignedCharArrayUnion storage; + +public: + explicit SmallDenseMap(unsigned NumInitBuckets = 0) { + init(NumInitBuckets); + } + + SmallDenseMap(const SmallDenseMap &other) : BaseT() { + init(0); + copyFrom(other); + } + + SmallDenseMap(SmallDenseMap &&other) : BaseT() { + init(0); + swap(other); + } + + template + SmallDenseMap(const InputIt &I, const InputIt &E) { + init(NextPowerOf2(std::distance(I, E))); + this->insert(I, E); + } + + ~SmallDenseMap() { + this->destroyAll(); + deallocateBuckets(); + } + + void swap(SmallDenseMap& RHS) { + unsigned TmpNumEntries = RHS.NumEntries; + RHS.NumEntries = NumEntries; + NumEntries = TmpNumEntries; + std::swap(NumTombstones, RHS.NumTombstones); + + const KeyT EmptyKey = this->getEmptyKey(); + const KeyT TombstoneKey = this->getTombstoneKey(); + if (Small && RHS.Small) { + // If we're swapping inline bucket arrays, we have to cope with some of + // the tricky bits of DenseMap's storage system: the buckets are not + // fully initialized. Thus we swap every key, but we may have + // a one-directional move of the value. + for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { + BucketT *LHSB = &getInlineBuckets()[i], + *RHSB = &RHS.getInlineBuckets()[i]; + bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey)); + bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey)); + if (hasLHSValue && hasRHSValue) { + // Swap together if we can... + std::swap(*LHSB, *RHSB); + continue; + } + // Swap separately and handle any assymetry. + std::swap(LHSB->getFirst(), RHSB->getFirst()); + if (hasLHSValue) { + ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond())); + LHSB->getSecond().~ValueT(); + } else if (hasRHSValue) { + ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond())); + RHSB->getSecond().~ValueT(); + } + } + return; + } + if (!Small && !RHS.Small) { + std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); + std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); + return; + } + + SmallDenseMap &SmallSide = Small ? *this : RHS; + SmallDenseMap &LargeSide = Small ? RHS : *this; + + // First stash the large side's rep and move the small side across. + LargeRep TmpRep = std::move(*LargeSide.getLargeRep()); + LargeSide.getLargeRep()->~LargeRep(); + LargeSide.Small = true; + // This is similar to the standard move-from-old-buckets, but the bucket + // count hasn't actually rotated in this case. So we have to carefully + // move construct the keys and values into their new locations, but there + // is no need to re-hash things. + for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { + BucketT *NewB = &LargeSide.getInlineBuckets()[i], + *OldB = &SmallSide.getInlineBuckets()[i]; + ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst())); + OldB->getFirst().~KeyT(); + if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) { + ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond())); + OldB->getSecond().~ValueT(); + } + } + + // The hard part of moving the small buckets across is done, just move + // the TmpRep into its new home. + SmallSide.Small = false; + new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep)); + } + + SmallDenseMap& operator=(const SmallDenseMap& other) { + if (&other != this) + copyFrom(other); + return *this; + } + + SmallDenseMap& operator=(SmallDenseMap &&other) { + this->destroyAll(); + deallocateBuckets(); + init(0); + swap(other); + return *this; + } + + void copyFrom(const SmallDenseMap& other) { + this->destroyAll(); + deallocateBuckets(); + Small = true; + if (other.getNumBuckets() > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets())); + } + this->BaseT::copyFrom(other); + } + + void init(unsigned InitBuckets) { + Small = true; + if (InitBuckets > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); + } + this->BaseT::initEmpty(); + } + + void grow(unsigned AtLeast) { + if (AtLeast >= InlineBuckets) + AtLeast = std::max(64, NextPowerOf2(AtLeast-1)); + + if (Small) { + if (AtLeast < InlineBuckets) + return; // Nothing to do. + + // First move the inline buckets into a temporary storage. + AlignedCharArrayUnion TmpStorage; + BucketT *TmpBegin = reinterpret_cast(TmpStorage.buffer); + BucketT *TmpEnd = TmpBegin; + + // Loop over the buckets, moving non-empty, non-tombstones into the + // temporary storage. Have the loop move the TmpEnd forward as it goes. + const KeyT EmptyKey = this->getEmptyKey(); + const KeyT TombstoneKey = this->getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { + if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) && + !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) { + assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && + "Too many inline buckets!"); + ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst())); + ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond())); + ++TmpEnd; + P->getSecond().~ValueT(); + } + P->getFirst().~KeyT(); } - B->first.~KeyT(); + + // Now make this map use the large rep, and move all the entries back + // into it. + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); + this->moveFromOldBuckets(TmpBegin, TmpEnd); + return; } - + + LargeRep OldRep = std::move(*getLargeRep()); + getLargeRep()->~LargeRep(); + if (AtLeast <= InlineBuckets) { + Small = true; + } else { + new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); + } + + this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); + // Free the old table. - operator delete(OldBuckets); - - NumEntries = 0; + operator delete(OldRep.Buckets); + } + + void shrink_and_clear() { + unsigned OldSize = this->size(); + this->destroyAll(); + + // Reduce the number of buckets. + unsigned NewNumBuckets = 0; + if (OldSize) { + NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); + if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u) + NewNumBuckets = 64; + } + if ((Small && NewNumBuckets <= InlineBuckets) || + (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { + this->BaseT::initEmpty(); + return; + } + + deallocateBuckets(); + init(NewNumBuckets); + } + +private: + unsigned getNumEntries() const { + return NumEntries; + } + void setNumEntries(unsigned Num) { + assert(Num < INT_MAX && "Cannot support more than INT_MAX entries"); + NumEntries = Num; + } + + unsigned getNumTombstones() const { + return NumTombstones; + } + void setNumTombstones(unsigned Num) { + NumTombstones = Num; + } + + const BucketT *getInlineBuckets() const { + assert(Small); + // Note that this cast does not violate aliasing rules as we assert that + // the memory's dynamic type is the small, inline bucket buffer, and the + // 'storage.buffer' static type is 'char *'. + return reinterpret_cast(storage.buffer); + } + BucketT *getInlineBuckets() { + return const_cast( + const_cast(this)->getInlineBuckets()); + } + const LargeRep *getLargeRep() const { + assert(!Small); + // Note, same rule about aliasing as with getInlineBuckets. + return reinterpret_cast(storage.buffer); + } + LargeRep *getLargeRep() { + return const_cast( + const_cast(this)->getLargeRep()); + } + + const BucketT *getBuckets() const { + return Small ? getInlineBuckets() : getLargeRep()->Buckets; + } + BucketT *getBuckets() { + return const_cast( + const_cast(this)->getBuckets()); + } + unsigned getNumBuckets() const { + return Small ? InlineBuckets : getLargeRep()->NumBuckets; + } + + void deallocateBuckets() { + if (Small) + return; + + operator delete(getLargeRep()->Buckets); + getLargeRep()->~LargeRep(); + } + + LargeRep allocateBuckets(unsigned Num) { + assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); + LargeRep Rep = { + static_cast(operator new(sizeof(BucketT) * Num)), Num + }; + return Rep; } }; -template -class DenseMapIterator { - typedef std::pair BucketT; -protected: - const BucketT *Ptr, *End; +template +class DenseMapIterator : DebugEpochBase::HandleBase { + typedef DenseMapIterator ConstIterator; + friend class DenseMapIterator; + friend class DenseMapIterator; + public: - DenseMapIterator(void) : Ptr(0), End(0) {} + typedef ptrdiff_t difference_type; + typedef typename std::conditional::type + value_type; + typedef value_type *pointer; + typedef value_type &reference; + typedef std::forward_iterator_tag iterator_category; +private: + pointer Ptr, End; +public: + DenseMapIterator() : Ptr(nullptr), End(nullptr) {} - DenseMapIterator(const BucketT *Pos, const BucketT *E) : Ptr(Pos), End(E) { - AdvancePastEmptyBuckets(); + DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, + bool NoAdvance = false) + : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) { + assert(isHandleInSync() && "invalid construction!"); + if (!NoAdvance) AdvancePastEmptyBuckets(); } - - std::pair &operator*() const { - return *const_cast(Ptr); + + // Converting ctor from non-const iterators to const iterators. SFINAE'd out + // for const iterator destinations so it doesn't end up as a user defined copy + // constructor. + template ::type> + DenseMapIterator( + const DenseMapIterator &I) + : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {} + + reference operator*() const { + assert(isHandleInSync() && "invalid iterator access!"); + return *Ptr; } - std::pair *operator->() const { - return const_cast(Ptr); + pointer operator->() const { + assert(isHandleInSync() && "invalid iterator access!"); + return Ptr; } - - bool operator==(const DenseMapIterator &RHS) const { + + bool operator==(const ConstIterator &RHS) const { + assert((!Ptr || isHandleInSync()) && "handle not in sync!"); + assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!"); + assert(getEpochAddress() == RHS.getEpochAddress() && + "comparing incomparable iterators!"); return Ptr == RHS.Ptr; } - bool operator!=(const DenseMapIterator &RHS) const { + bool operator!=(const ConstIterator &RHS) const { + assert((!Ptr || isHandleInSync()) && "handle not in sync!"); + assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!"); + assert(getEpochAddress() == RHS.getEpochAddress() && + "comparing incomparable iterators!"); return Ptr != RHS.Ptr; } - - inline DenseMapIterator& operator++() { // Preincrement + + inline DenseMapIterator& operator++() { // Preincrement + assert(isHandleInSync() && "invalid iterator access!"); ++Ptr; AdvancePastEmptyBuckets(); return *this; } - DenseMapIterator operator++(int) { // Postincrement + DenseMapIterator operator++(int) { // Postincrement + assert(isHandleInSync() && "invalid iterator access!"); DenseMapIterator tmp = *this; ++*this; return tmp; } - + private: void AdvancePastEmptyBuckets() { const KeyT Empty = KeyInfoT::getEmptyKey(); const KeyT Tombstone = KeyInfoT::getTombstoneKey(); - while (Ptr != End && - (KeyInfoT::isEqual(Ptr->first, Empty) || - KeyInfoT::isEqual(Ptr->first, Tombstone))) + while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) || + KeyInfoT::isEqual(Ptr->getFirst(), Tombstone))) ++Ptr; } }; -template -class DenseMapConstIterator : public DenseMapIterator { -public: - DenseMapConstIterator(void) : DenseMapIterator() {} - DenseMapConstIterator(const std::pair *Pos, - const std::pair *E) - : DenseMapIterator(Pos, E) { - } - const std::pair &operator*() const { - return *this->Ptr; - } - const std::pair *operator->() const { - return this->Ptr; - } -}; +template +static inline size_t +capacity_in_bytes(const DenseMap &X) { + return X.getMemorySize(); +} } // end namespace llvm