X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=include%2Fllvm%2FSupport%2FAllocator.h;h=f9b5cf22f97d42f69e3f321577dce572f5a2228f;hb=034f8656645697556783a94bbb235f782131986b;hp=a28080187b6e88bc0f84c7299b76a04c7e53ae52;hpb=ebfe4d7fee96e635d2656fb664ee0b56c41d0fcc;p=oota-llvm.git diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h index a28080187b6..f9b5cf22f97 100644 --- a/include/llvm/Support/Allocator.h +++ b/include/llvm/Support/Allocator.h @@ -32,12 +32,6 @@ #include namespace llvm { -template struct ReferenceAdder { - typedef T &result; -}; -template struct ReferenceAdder { - typedef T result; -}; /// \brief CRTP base class providing obvious overloads for the core \c /// Allocate() methods of LLVM-style allocators. @@ -50,62 +44,68 @@ public: /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method /// must be implemented by \c DerivedT. void *Allocate(size_t Size, size_t Alignment) { +#ifdef __clang__ static_assert(static_cast( &AllocatorBase::Allocate) != static_cast( &DerivedT::Allocate), "Class derives from AllocatorBase without implementing the " "core Allocate(size_t, size_t) overload!"); +#endif return static_cast(this)->Allocate(Size, Alignment); } - /// \brief Allocate space for one object without constructing it. - template T *Allocate() { - return static_cast(Allocate(sizeof(T), AlignOf::Alignment)); + /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this + /// allocator. + void Deallocate(const void *Ptr, size_t Size) { +#ifdef __clang__ + static_assert(static_cast( + &AllocatorBase::Deallocate) != + static_cast( + &DerivedT::Deallocate), + "Class derives from AllocatorBase without implementing the " + "core Deallocate(void *) overload!"); +#endif + return static_cast(this)->Deallocate(Ptr, Size); } - /// \brief Allocate space for an array of objects without constructing them. - template T *Allocate(size_t Num) { + // The rest of these methods are helpers that redirect to one of the above + // core methods. + + /// \brief Allocate space for a sequence of objects without constructing them. + template T *Allocate(size_t Num = 1) { return static_cast(Allocate(Num * sizeof(T), AlignOf::Alignment)); } - /// \brief Allocate space for an array of objects with the specified alignment - /// and without constructing them. - template T *Allocate(size_t Num, size_t Alignment) { - // Round EltSize up to the specified alignment. - size_t EltSize = (sizeof(T) + Alignment - 1) & (-Alignment); - return static_cast(Allocate(Num * EltSize, Alignment)); + /// \brief Deallocate space for a sequence of objects without constructing them. + template + typename std::enable_if< + !std::is_same::type, void>::value, void>::type + Deallocate(T *Ptr, size_t Num = 1) { + Deallocate(static_cast(Ptr), Num * sizeof(T)); } }; class MallocAllocator : public AllocatorBase { public: - MallocAllocator() {} - ~MallocAllocator() {} - void Reset() {} - void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); } + LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, + size_t /*Alignment*/) { + return malloc(Size); + } // Pull in base class overloads. using AllocatorBase::Allocate; - void Deallocate(const void *Ptr) { free(const_cast(Ptr)); } - - void PrintStats() const {} -}; + void Deallocate(const void *Ptr, size_t /*Size*/) { + free(const_cast(Ptr)); + } -/// MallocSlabAllocator - The default slab allocator for the bump allocator -/// is an adapter class for MallocAllocator that just forwards the method -/// calls and translates the arguments. -class MallocSlabAllocator { - /// Allocator - The underlying allocator that we forward to. - /// - MallocAllocator Allocator; + // Pull in base class overloads. + using AllocatorBase::Deallocate; -public: - void *Allocate(size_t Size) { return Allocator.Allocate(Size, 0); } - void Deallocate(void *Slab, size_t Size) { Allocator.Deallocate(Slab); } + void PrintStats() const {} }; namespace detail { @@ -119,25 +119,22 @@ void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, /// \brief Allocate memory in an ever growing pool, as if by bump-pointer. /// /// This isn't strictly a bump-pointer allocator as it uses backing slabs of -/// memory rather than relying on boundless contiguous heap. However, it has -/// bump-pointer semantics in that is a monotonically growing pool of memory +/// memory rather than relying on a boundless contiguous heap. However, it has +/// bump-pointer semantics in that it is a monotonically growing pool of memory /// where every allocation is found by merely allocating the next N bytes in /// the slab, or the next N bytes in the next slab. /// /// Note that this also has a threshold for forcing allocations above a certain /// size into their own slab. /// -/// The BumpPtrAllocatorImpl template defaults to using a MallocSlabAllocator +/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator /// object, which wraps malloc, to allocate memory, but it can be changed to /// use a custom allocator. -template class BumpPtrAllocatorImpl : public AllocatorBase< BumpPtrAllocatorImpl> { - BumpPtrAllocatorImpl(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION; - void operator=(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION; - public: static_assert(SizeThreshold <= SlabSize, "The SizeThreshold must be at most the SlabSize to ensure " @@ -150,14 +147,49 @@ public: BumpPtrAllocatorImpl(T &&Allocator) : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator(std::forward(Allocator)) {} + + // Manually implement a move constructor as we must clear the old allocator's + // slabs as a matter of correctness. + BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) + : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)), + CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), + BytesAllocated(Old.BytesAllocated), + Allocator(std::move(Old.Allocator)) { + Old.CurPtr = Old.End = nullptr; + Old.BytesAllocated = 0; + Old.Slabs.clear(); + Old.CustomSizedSlabs.clear(); + } + ~BumpPtrAllocatorImpl() { DeallocateSlabs(Slabs.begin(), Slabs.end()); DeallocateCustomSizedSlabs(); } + BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { + DeallocateSlabs(Slabs.begin(), Slabs.end()); + DeallocateCustomSizedSlabs(); + + CurPtr = RHS.CurPtr; + End = RHS.End; + BytesAllocated = RHS.BytesAllocated; + Slabs = std::move(RHS.Slabs); + CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); + Allocator = std::move(RHS.Allocator); + + RHS.CurPtr = RHS.End = nullptr; + RHS.BytesAllocated = 0; + RHS.Slabs.clear(); + RHS.CustomSizedSlabs.clear(); + return *this; + } + /// \brief Deallocate all but the current slab and reset the current pointer /// to the beginning of it, freeing all memory allocated so far. void Reset() { + DeallocateCustomSizedSlabs(); + CustomSizedSlabs.clear(); + if (Slabs.empty()) return; @@ -166,63 +198,64 @@ public: CurPtr = (char *)Slabs.front(); End = CurPtr + SlabSize; - // Deallocate all but the first slab, and all custome sized slabs. + // Deallocate all but the first slab, and deallocate all custom-sized slabs. DeallocateSlabs(std::next(Slabs.begin()), Slabs.end()); Slabs.erase(std::next(Slabs.begin()), Slabs.end()); - DeallocateCustomSizedSlabs(); - CustomSizedSlabs.clear(); } /// \brief Allocate space at the specified alignment. - void *Allocate(size_t Size, size_t Alignment) { - if (!CurPtr) // Start a new slab if we haven't allocated one already. - StartNewSlab(); + LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * + Allocate(size_t Size, size_t Alignment) { + assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead."); // Keep track of how many bytes we've allocated. BytesAllocated += Size; - // 0-byte alignment means 1-byte alignment. - if (Alignment == 0) - Alignment = 1; + size_t Adjustment = alignmentAdjustment(CurPtr, Alignment); + assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow"); - // Allocate the aligned space, going forwards from CurPtr. - char *Ptr = alignPtr(CurPtr, Alignment); - - // Check if we can hold it. - if (Ptr + Size <= End) { - CurPtr = Ptr + Size; + // Check if we have enough space. + if (Adjustment + Size <= size_t(End - CurPtr)) { + char *AlignedPtr = CurPtr + Adjustment; + CurPtr = AlignedPtr + Size; // Update the allocation point of this memory block in MemorySanitizer. // Without this, MemorySanitizer messages for values originated from here // will point to the allocation of the entire slab. - __msan_allocated_memory(Ptr, Size); - return Ptr; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } // If Size is really big, allocate a separate slab for it. size_t PaddedSize = Size + Alignment - 1; if (PaddedSize > SizeThreshold) { - void *NewSlab = Allocator.Allocate(PaddedSize); + void *NewSlab = Allocator.Allocate(PaddedSize, 0); CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); - Ptr = alignPtr((char *)NewSlab, Alignment); - assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + PaddedSize); - __msan_allocated_memory(Ptr, Size); - return Ptr; + uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); + assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize); + char *AlignedPtr = (char*)AlignedAddr; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } // Otherwise, start a new slab and try again. StartNewSlab(); - Ptr = alignPtr(CurPtr, Alignment); - CurPtr = Ptr + Size; - assert(CurPtr <= End && "Unable to allocate memory!"); - __msan_allocated_memory(Ptr, Size); - return Ptr; + uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); + assert(AlignedAddr + Size <= (uintptr_t)End && + "Unable to allocate memory!"); + char *AlignedPtr = (char*)AlignedAddr; + CurPtr = AlignedPtr + Size; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } // Pull in base class overloads. using AllocatorBase::Allocate; - void Deallocate(const void * /*Ptr*/) {} + void Deallocate(const void * /*Ptr*/, size_t /*Size*/) {} + + // Pull in base class overloads. + using AllocatorBase::Deallocate; size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); } @@ -276,7 +309,7 @@ private: void StartNewSlab() { size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); - void *NewSlab = Allocator.Allocate(AllocatedSlabSize); + void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0); Slabs.push_back(NewSlab); CurPtr = (char *)(NewSlab); End = ((char *)NewSlab) + AllocatedSlabSize; @@ -288,12 +321,6 @@ private: for (; I != E; ++I) { size_t AllocatedSlabSize = computeSlabSize(std::distance(Slabs.begin(), I)); -#ifndef NDEBUG - // Poison the memory so stale pointers crash sooner. Note we must - // preserve the Size and NextPtr fields at the beginning. - sys::Memory::setRangeWritable(*I, AllocatedSlabSize); - memset(*I, 0xCD, AllocatedSlabSize); -#endif Allocator.Deallocate(*I, AllocatedSlabSize); } } @@ -303,12 +330,6 @@ private: for (auto &PtrAndSize : CustomSizedSlabs) { void *Ptr = PtrAndSize.first; size_t Size = PtrAndSize.second; -#ifndef NDEBUG - // Poison the memory so stale pointers crash sooner. Note we must - // preserve the Size and NextPtr fields at the beginning. - sys::Memory::setRangeWritable(Ptr, Size); - memset(Ptr, 0xCD, Size); -#endif Allocator.Deallocate(Ptr, Size); } } @@ -330,15 +351,21 @@ template class SpecificBumpPtrAllocator { public: SpecificBumpPtrAllocator() : Allocator() {} - + SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) + : Allocator(std::move(Old.Allocator)) {} ~SpecificBumpPtrAllocator() { DestroyAll(); } + SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { + Allocator = std::move(RHS.Allocator); + return *this; + } + /// Call the destructor of each allocated object and deallocate all but the /// current slab and reset the current pointer to the beginning of it, freeing /// all memory allocated so far. void DestroyAll() { auto DestroyElements = [](char *Begin, char *End) { - assert(Begin == alignPtr(Begin, alignOf())); + assert(Begin == (char*)alignAddr(Begin, alignOf())); for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) reinterpret_cast(Ptr)->~T(); }; @@ -347,7 +374,7 @@ public: ++I) { size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( std::distance(Allocator.Slabs.begin(), I)); - char *Begin = alignPtr((char *)*I, alignOf()); + char *Begin = (char*)alignAddr(*I, alignOf()); char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr : (char *)*I + AllocatedSlabSize; @@ -357,7 +384,7 @@ public: for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { void *Ptr = PtrAndSize.first; size_t Size = PtrAndSize.second; - DestroyElements(alignPtr((char *)Ptr, alignOf()), (char *)Ptr + Size); + DestroyElements((char*)alignAddr(Ptr, alignOf()), (char *)Ptr + Size); } Allocator.Reset(); @@ -365,8 +392,6 @@ public: /// \brief Allocate space for an array of objects without constructing them. T *Allocate(size_t num = 1) { return Allocator.Allocate(num); } - -private: }; } // end namespace llvm