X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=include%2Fllvm%2FSupport%2FAllocator.h;h=de317719714da0196261b5173b83a501687792d6;hb=d09c631f071640130c0c91f8f8c219ea31a23f32;hp=d96c8f254cc92fde54f2c3bf95af89cbfc2d9dda;hpb=eb19b8f58b12532e736051fee46dcf2115a4888d;p=oota-llvm.git diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h index d96c8f254cc..de317719714 100644 --- a/include/llvm/Support/Allocator.h +++ b/include/llvm/Support/Allocator.h @@ -32,12 +32,6 @@ #include namespace llvm { -template struct ReferenceAdder { - typedef T &result; -}; -template struct ReferenceAdder { - typedef T result; -}; /// \brief CRTP base class providing obvious overloads for the core \c /// Allocate() methods of LLVM-style allocators. @@ -63,66 +57,50 @@ public: /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this /// allocator. - void Deallocate(const void *Ptr) { + void Deallocate(const void *Ptr, size_t Size) { #ifdef __clang__ - static_assert(static_cast( + static_assert(static_cast( &AllocatorBase::Deallocate) != - static_cast( + static_cast( &DerivedT::Deallocate), "Class derives from AllocatorBase without implementing the " "core Deallocate(void *) overload!"); #endif - return static_cast(this)->Deallocate(Ptr); + return static_cast(this)->Deallocate(Ptr, Size); } // The rest of these methods are helpers that redirect to one of the above // core methods. - /// \brief Allocate space for one object without constructing it. - template T *Allocate() { - return static_cast(Allocate(sizeof(T), AlignOf::Alignment)); - } - - /// \brief Allocate space for an array of objects without constructing them. - template T *Allocate(size_t Num) { + /// \brief Allocate space for a sequence of objects without constructing them. + template T *Allocate(size_t Num = 1) { return static_cast(Allocate(Num * sizeof(T), AlignOf::Alignment)); } - /// \brief Allocate space for an array of objects with the specified alignment - /// and without constructing them. - template T *Allocate(size_t Num, size_t Alignment) { - // Round EltSize up to the specified alignment. - size_t EltSize = (sizeof(T) + Alignment - 1) & (-Alignment); - return static_cast(Allocate(Num * EltSize, Alignment)); - } - - /// \brief Deallocate space for one object without destroying it. - template - typename std::enable_if::type Deallocate(T *Ptr) { - Deallocate(static_cast(Ptr)); - } - - /// \brief Allocate space for an array of objects without constructing them. + /// \brief Deallocate space for a sequence of objects without constructing them. template - typename std::enable_if::type - Deallocate(T *Ptr, size_t /*Num*/) { - Deallocate(static_cast(Ptr)); + typename std::enable_if< + !std::is_same::type, void>::value, void>::type + Deallocate(T *Ptr, size_t Num = 1) { + Deallocate(static_cast(Ptr), Num * sizeof(T)); } }; class MallocAllocator : public AllocatorBase { public: - MallocAllocator() {} - ~MallocAllocator() {} - void Reset() {} - void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); } + LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, + size_t /*Alignment*/) { + return malloc(Size); + } // Pull in base class overloads. using AllocatorBase::Allocate; - void Deallocate(const void *Ptr) { free(const_cast(Ptr)); } + void Deallocate(const void *Ptr, size_t /*Size*/) { + free(const_cast(Ptr)); + } // Pull in base class overloads. using AllocatorBase::Deallocate; @@ -130,19 +108,6 @@ public: void PrintStats() const {} }; -/// MallocSlabAllocator - The default slab allocator for the bump allocator -/// is an adapter class for MallocAllocator that just forwards the method -/// calls and translates the arguments. -class MallocSlabAllocator { - /// Allocator - The underlying allocator that we forward to. - /// - MallocAllocator Allocator; - -public: - void *Allocate(size_t Size) { return Allocator.Allocate(Size, 0); } - void Deallocate(void *Slab, size_t Size) { Allocator.Deallocate(Slab); } -}; - namespace detail { // We call out to an external function to actually print the message as the @@ -154,25 +119,22 @@ void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated, /// \brief Allocate memory in an ever growing pool, as if by bump-pointer. /// /// This isn't strictly a bump-pointer allocator as it uses backing slabs of -/// memory rather than relying on boundless contiguous heap. However, it has -/// bump-pointer semantics in that is a monotonically growing pool of memory +/// memory rather than relying on a boundless contiguous heap. However, it has +/// bump-pointer semantics in that it is a monotonically growing pool of memory /// where every allocation is found by merely allocating the next N bytes in /// the slab, or the next N bytes in the next slab. /// /// Note that this also has a threshold for forcing allocations above a certain /// size into their own slab. /// -/// The BumpPtrAllocatorImpl template defaults to using a MallocSlabAllocator +/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator /// object, which wraps malloc, to allocate memory, but it can be changed to /// use a custom allocator. -template class BumpPtrAllocatorImpl : public AllocatorBase< BumpPtrAllocatorImpl> { - BumpPtrAllocatorImpl(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION; - void operator=(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION; - public: static_assert(SizeThreshold <= SlabSize, "The SizeThreshold must be at most the SlabSize to ensure " @@ -185,11 +147,43 @@ public: BumpPtrAllocatorImpl(T &&Allocator) : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator(std::forward(Allocator)) {} + + // Manually implement a move constructor as we must clear the old allocators + // slabs as a matter of correctness. + BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old) + : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)), + CustomSizedSlabs(std::move(Old.CustomSizedSlabs)), + BytesAllocated(Old.BytesAllocated), + Allocator(std::move(Old.Allocator)) { + Old.CurPtr = Old.End = nullptr; + Old.BytesAllocated = 0; + Old.Slabs.clear(); + Old.CustomSizedSlabs.clear(); + } + ~BumpPtrAllocatorImpl() { DeallocateSlabs(Slabs.begin(), Slabs.end()); DeallocateCustomSizedSlabs(); } + BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) { + DeallocateSlabs(Slabs.begin(), Slabs.end()); + DeallocateCustomSizedSlabs(); + + CurPtr = RHS.CurPtr; + End = RHS.End; + BytesAllocated = RHS.BytesAllocated; + Slabs = std::move(RHS.Slabs); + CustomSizedSlabs = std::move(RHS.CustomSizedSlabs); + Allocator = std::move(RHS.Allocator); + + RHS.CurPtr = RHS.End = nullptr; + RHS.BytesAllocated = 0; + RHS.Slabs.clear(); + RHS.CustomSizedSlabs.clear(); + return *this; + } + /// \brief Deallocate all but the current slab and reset the current pointer /// to the beginning of it, freeing all memory allocated so far. void Reset() { @@ -209,55 +203,54 @@ public: } /// \brief Allocate space at the specified alignment. - void *Allocate(size_t Size, size_t Alignment) { - if (!CurPtr) // Start a new slab if we haven't allocated one already. - StartNewSlab(); + LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size, size_t Alignment) { + assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead."); // Keep track of how many bytes we've allocated. BytesAllocated += Size; - // 0-byte alignment means 1-byte alignment. - if (Alignment == 0) - Alignment = 1; + size_t Adjustment = alignmentAdjustment(CurPtr, Alignment); + assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow"); - // Allocate the aligned space, going forwards from CurPtr. - char *Ptr = alignPtr(CurPtr, Alignment); - - // Check if we can hold it. - if (Ptr + Size <= End) { - CurPtr = Ptr + Size; + // Check if we have enough space. + if (Adjustment + Size <= size_t(End - CurPtr)) { + char *AlignedPtr = CurPtr + Adjustment; + CurPtr = AlignedPtr + Size; // Update the allocation point of this memory block in MemorySanitizer. // Without this, MemorySanitizer messages for values originated from here // will point to the allocation of the entire slab. - __msan_allocated_memory(Ptr, Size); - return Ptr; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } // If Size is really big, allocate a separate slab for it. size_t PaddedSize = Size + Alignment - 1; if (PaddedSize > SizeThreshold) { - void *NewSlab = Allocator.Allocate(PaddedSize); + void *NewSlab = Allocator.Allocate(PaddedSize, 0); CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize)); - Ptr = alignPtr((char *)NewSlab, Alignment); - assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + PaddedSize); - __msan_allocated_memory(Ptr, Size); - return Ptr; + uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment); + assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize); + char *AlignedPtr = (char*)AlignedAddr; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } // Otherwise, start a new slab and try again. StartNewSlab(); - Ptr = alignPtr(CurPtr, Alignment); - CurPtr = Ptr + Size; - assert(CurPtr <= End && "Unable to allocate memory!"); - __msan_allocated_memory(Ptr, Size); - return Ptr; + uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment); + assert(AlignedAddr + Size <= (uintptr_t)End && + "Unable to allocate memory!"); + char *AlignedPtr = (char*)AlignedAddr; + CurPtr = AlignedPtr + Size; + __msan_allocated_memory(AlignedPtr, Size); + return AlignedPtr; } // Pull in base class overloads. using AllocatorBase::Allocate; - void Deallocate(const void * /*Ptr*/) {} + void Deallocate(const void * /*Ptr*/, size_t /*Size*/) {} // Pull in base class overloads. using AllocatorBase::Deallocate; @@ -314,7 +307,7 @@ private: void StartNewSlab() { size_t AllocatedSlabSize = computeSlabSize(Slabs.size()); - void *NewSlab = Allocator.Allocate(AllocatedSlabSize); + void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0); Slabs.push_back(NewSlab); CurPtr = (char *)(NewSlab); End = ((char *)NewSlab) + AllocatedSlabSize; @@ -329,8 +322,10 @@ private: #ifndef NDEBUG // Poison the memory so stale pointers crash sooner. Note we must // preserve the Size and NextPtr fields at the beginning. - sys::Memory::setRangeWritable(*I, AllocatedSlabSize); - memset(*I, 0xCD, AllocatedSlabSize); + if (AllocatedSlabSize != 0) { + sys::Memory::setRangeWritable(*I, AllocatedSlabSize); + memset(*I, 0xCD, AllocatedSlabSize); + } #endif Allocator.Deallocate(*I, AllocatedSlabSize); } @@ -368,15 +363,21 @@ template class SpecificBumpPtrAllocator { public: SpecificBumpPtrAllocator() : Allocator() {} - + SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old) + : Allocator(std::move(Old.Allocator)) {} ~SpecificBumpPtrAllocator() { DestroyAll(); } + SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) { + Allocator = std::move(RHS.Allocator); + return *this; + } + /// Call the destructor of each allocated object and deallocate all but the /// current slab and reset the current pointer to the beginning of it, freeing /// all memory allocated so far. void DestroyAll() { auto DestroyElements = [](char *Begin, char *End) { - assert(Begin == alignPtr(Begin, alignOf())); + assert(Begin == (char*)alignAddr(Begin, alignOf())); for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T)) reinterpret_cast(Ptr)->~T(); }; @@ -385,7 +386,7 @@ public: ++I) { size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize( std::distance(Allocator.Slabs.begin(), I)); - char *Begin = alignPtr((char *)*I, alignOf()); + char *Begin = (char*)alignAddr(*I, alignOf()); char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr : (char *)*I + AllocatedSlabSize; @@ -395,7 +396,7 @@ public: for (auto &PtrAndSize : Allocator.CustomSizedSlabs) { void *Ptr = PtrAndSize.first; size_t Size = PtrAndSize.second; - DestroyElements(alignPtr((char *)Ptr, alignOf()), (char *)Ptr + Size); + DestroyElements((char*)alignAddr(Ptr, alignOf()), (char *)Ptr + Size); } Allocator.Reset(); @@ -403,8 +404,6 @@ public: /// \brief Allocate space for an array of objects without constructing them. T *Allocate(size_t num = 1) { return Allocator.Allocate(num); } - -private: }; } // end namespace llvm