X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FSROA.cpp;h=04bf4f8dfc3ad37f658fb102f02efee623063293;hb=8169ef891adb27e54afeac7127a43121096e35c0;hp=85d279826832e0db8beade65a874adacc03d6cee;hpb=dd080790eb9184dd0cab95825b922a705bada613;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 85d27982683..04bf4f8dfc3 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -23,7 +23,6 @@ /// //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "sroa" #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" @@ -32,19 +31,19 @@ #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/PtrUseVisitor.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/DIBuilder.h" -#include "llvm/DebugInfo.h" #include "llvm/IR/Constants.h" +#include "llvm/IR/DIBuilder.h" #include "llvm/IR/DataLayout.h" +#include "llvm/IR/DebugInfo.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InstVisitor.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Operator.h" -#include "llvm/InstVisitor.h" #include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" @@ -64,6 +63,8 @@ using namespace llvm; +#define DEBUG_TYPE "sroa" + STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed"); STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca"); @@ -159,8 +160,8 @@ public: Use *getUse() const { return UseAndIsSplittable.getPointer(); } - bool isDead() const { return getUse() == 0; } - void kill() { UseAndIsSplittable.setPointer(0); } + bool isDead() const { return getUse() == nullptr; } + void kill() { UseAndIsSplittable.setPointer(nullptr); } /// \brief Support for ordering ranges. /// @@ -320,7 +321,7 @@ static Value *foldSelectInst(SelectInst &SI) { if (SI.getOperand(1) == SI.getOperand(2)) return SI.getOperand(1); - return 0; + return nullptr; } /// \brief Builder for the alloca slices. @@ -556,7 +557,7 @@ private: // they both point to the same alloca. bool Inserted; SmallDenseMap::iterator MTPI; - llvm::tie(MTPI, Inserted) = + std::tie(MTPI, Inserted) = MemTransferSliceMap.insert(std::make_pair(&II, S.Slices.size())); unsigned PrevIdx = MTPI->second; if (!Inserted) { @@ -615,7 +616,7 @@ private: Size = 0; do { Instruction *I, *UsedI; - llvm::tie(UsedI, I) = Uses.pop_back_val(); + std::tie(UsedI, I) = Uses.pop_back_val(); if (LoadInst *LI = dyn_cast(I)) { Size = std::max(Size, DL.getTypeStoreSize(LI->getType())); @@ -637,13 +638,12 @@ private: return I; } - for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE; - ++UI) - if (Visited.insert(cast(*UI))) - Uses.push_back(std::make_pair(I, cast(*UI))); + for (User *U : I->users()) + if (Visited.insert(cast(U))) + Uses.push_back(std::make_pair(I, cast(U))); } while (!Uses.empty()); - return 0; + return nullptr; } void visitPHINode(PHINode &PN) { @@ -725,7 +725,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) AI(AI), #endif - PointerEscapingInstr(0) { + PointerEscapingInstr(nullptr) { SliceBuilder PB(DL, AI, *this); SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); if (PtrI.isEscaped() || PtrI.isAborted()) { @@ -817,12 +817,10 @@ public: // Retain the debug information attached to the alloca for use when // rewriting loads and stores. if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) { - for (Value::use_iterator UI = DebugNode->use_begin(), - UE = DebugNode->use_end(); - UI != UE; ++UI) - if (DbgDeclareInst *DDI = dyn_cast(*UI)) + for (User *U : DebugNode->users()) + if (DbgDeclareInst *DDI = dyn_cast(U)) DDIs.push_back(DDI); - else if (DbgValueInst *DVI = dyn_cast(*UI)) + else if (DbgValueInst *DVI = dyn_cast(U)) DVIs.push_back(DVI); } @@ -836,8 +834,8 @@ public: DVIs.pop_back_val()->eraseFromParent(); } - virtual bool isInstInList(Instruction *I, - const SmallVectorImpl &Insts) const { + bool isInstInList(Instruction *I, + const SmallVectorImpl &Insts) const override { Value *Ptr; if (LoadInst *LI = dyn_cast(I)) Ptr = LI->getOperand(0); @@ -864,7 +862,7 @@ public: return false; } - virtual void updateDebugInfo(Instruction *Inst) const { + void updateDebugInfo(Instruction *Inst) const override { for (SmallVectorImpl::const_iterator I = DDIs.begin(), E = DDIs.end(); I != E; ++I) { DbgDeclareInst *DDI = *I; @@ -876,7 +874,7 @@ public: for (SmallVectorImpl::const_iterator I = DVIs.begin(), E = DVIs.end(); I != E; ++I) { DbgValueInst *DVI = *I; - Value *Arg = 0; + Value *Arg = nullptr; if (StoreInst *SI = dyn_cast(Inst)) { // If an argument is zero extended then use argument directly. The ZExt // may be zapped by an optimization pass in future. @@ -972,13 +970,13 @@ class SROA : public FunctionPass { public: SROA(bool RequiresDomTree = true) : FunctionPass(ID), RequiresDomTree(RequiresDomTree), - C(0), DL(0), DT(0) { + C(nullptr), DL(nullptr), DT(nullptr) { initializeSROAPass(*PassRegistry::getPassRegistry()); } - bool runOnFunction(Function &F); - void getAnalysisUsage(AnalysisUsage &AU) const; + bool runOnFunction(Function &F) override; + void getAnalysisUsage(AnalysisUsage &AU) const override; - const char *getPassName() const { return "SROA"; } + const char *getPassName() const override { return "SROA"; } static char ID; private: @@ -1014,9 +1012,9 @@ INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", static Type *findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E, uint64_t EndOffset) { - Type *Ty = 0; + Type *Ty = nullptr; bool TyIsCommon = true; - IntegerType *ITy = 0; + IntegerType *ITy = nullptr; // Note that we need to look at *every* alloca slice's Use to ensure we // always get consistent results regardless of the order of slices. @@ -1027,7 +1025,7 @@ static Type *findCommonType(AllocaSlices::const_iterator B, if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset) continue; - Type *UserTy = 0; + Type *UserTy = nullptr; if (LoadInst *LI = dyn_cast(U->getUser())) { UserTy = LI->getType(); } else if (StoreInst *SI = dyn_cast(U->getUser())) { @@ -1077,7 +1075,7 @@ static Type *findCommonType(AllocaSlices::const_iterator B, /// FIXME: This should be hoisted into a generic utility, likely in /// Transforms/Util/Local.h static bool isSafePHIToSpeculate(PHINode &PN, - const DataLayout *DL = 0) { + const DataLayout *DL = nullptr) { // For now, we can only do this promotion if the load is in the same block // as the PHI, and if there are no stores between the phi and load. // TODO: Allow recursive phi users. @@ -1085,10 +1083,9 @@ static bool isSafePHIToSpeculate(PHINode &PN, BasicBlock *BB = PN.getParent(); unsigned MaxAlign = 0; bool HaveLoad = false; - for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE; - ++UI) { - LoadInst *LI = dyn_cast(*UI); - if (LI == 0 || !LI->isSimple()) + for (User *U : PN.users()) { + LoadInst *LI = dyn_cast(U); + if (!LI || !LI->isSimple()) return false; // For now we only allow loads in the same block as the PHI. This is @@ -1151,13 +1148,13 @@ static void speculatePHINodeLoads(PHINode &PN) { // Get the TBAA tag and alignment to use from one of the loads. It doesn't // matter which one we get and if any differ. - LoadInst *SomeLoad = cast(*PN.use_begin()); + LoadInst *SomeLoad = cast(PN.user_back()); MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa); unsigned Align = SomeLoad->getAlignment(); // Rewrite all loads of the PN to use the new PHI. while (!PN.use_empty()) { - LoadInst *LI = cast(*PN.use_begin()); + LoadInst *LI = cast(PN.user_back()); LI->replaceAllUsesWith(NewPN); LI->eraseFromParent(); } @@ -1195,16 +1192,16 @@ static void speculatePHINodeLoads(PHINode &PN) { /// /// We can do this to a select if its only uses are loads and if the operand /// to the select can be loaded unconditionally. -static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) { +static bool isSafeSelectToSpeculate(SelectInst &SI, + const DataLayout *DL = nullptr) { Value *TValue = SI.getTrueValue(); Value *FValue = SI.getFalseValue(); bool TDerefable = TValue->isDereferenceablePointer(); bool FDerefable = FValue->isDereferenceablePointer(); - for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE; - ++UI) { - LoadInst *LI = dyn_cast(*UI); - if (LI == 0 || !LI->isSimple()) + for (User *U : SI.users()) { + LoadInst *LI = dyn_cast(U); + if (!LI || !LI->isSimple()) return false; // Both operands to the select need to be dereferencable, either @@ -1229,7 +1226,7 @@ static void speculateSelectInstLoads(SelectInst &SI) { Value *FV = SI.getFalseValue(); // Replace the loads of the select with a select of two loads. while (!SI.use_empty()) { - LoadInst *LI = cast(*SI.use_begin()); + LoadInst *LI = cast(SI.user_back()); assert(LI->isSimple() && "We only speculate simple loads"); IRB.SetInsertPoint(LI); @@ -1290,6 +1287,9 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, if (Ty == TargetTy) return buildGEP(IRB, BasePtr, Indices, NamePrefix); + // Pointer size to use for the indices. + unsigned PtrSize = DL.getPointerTypeSizeInBits(BasePtr->getType()); + // See if we can descend into a struct and locate a field with the correct // type. unsigned NumLayers = 0; @@ -1297,11 +1297,13 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, do { if (ElementTy->isPointerTy()) break; - if (SequentialType *SeqTy = dyn_cast(ElementTy)) { - ElementTy = SeqTy->getElementType(); - // Note that we use the default address space as this index is over an - // array or a vector, not a pointer. - Indices.push_back(IRB.getInt(APInt(DL.getPointerSizeInBits(0), 0))); + + if (ArrayType *ArrayTy = dyn_cast(ElementTy)) { + ElementTy = ArrayTy->getElementType(); + Indices.push_back(IRB.getIntN(PtrSize, 0)); + } else if (VectorType *VectorTy = dyn_cast(ElementTy)) { + ElementTy = VectorTy->getElementType(); + Indices.push_back(IRB.getInt32(0)); } else if (StructType *STy = dyn_cast(ElementTy)) { if (STy->element_begin() == STy->element_end()) break; // Nothing left to descend into. @@ -1332,19 +1334,21 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, // We can't recurse through pointer types. if (Ty->isPointerTy()) - return 0; + return nullptr; // We try to analyze GEPs over vectors here, but note that these GEPs are // extremely poorly defined currently. The long-term goal is to remove GEPing // over a vector from the IR completely. if (VectorType *VecTy = dyn_cast(Ty)) { unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType()); - if (ElementSizeInBits % 8) - return 0; // GEPs over non-multiple of 8 size vector elements are invalid. + if (ElementSizeInBits % 8 != 0) { + // GEPs over non-multiple of 8 size vector elements are invalid. + return nullptr; + } APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(VecTy->getNumElements())) - return 0; + return nullptr; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), @@ -1356,7 +1360,7 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); APInt NumSkippedElements = Offset.sdiv(ElementSize); if (NumSkippedElements.ugt(ArrTy->getNumElements())) - return 0; + return nullptr; Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); @@ -1366,17 +1370,17 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, StructType *STy = dyn_cast(Ty); if (!STy) - return 0; + return nullptr; const StructLayout *SL = DL.getStructLayout(STy); uint64_t StructOffset = Offset.getZExtValue(); if (StructOffset >= SL->getSizeInBytes()) - return 0; + return nullptr; unsigned Index = SL->getElementContainingOffset(StructOffset); Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); Type *ElementTy = STy->getElementType(Index); if (Offset.uge(DL.getTypeAllocSize(ElementTy))) - return 0; // The offset points into alignment padding. + return nullptr; // The offset points into alignment padding. Indices.push_back(IRB.getInt32(Index)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, @@ -1401,15 +1405,15 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, // Don't consider any GEPs through an i8* as natural unless the TargetTy is // an i8. - if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8)) - return 0; + if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8)) + return nullptr; Type *ElementTy = Ty->getElementType(); if (!ElementTy->isSized()) - return 0; // We can't GEP through an unsized element. + return nullptr; // We can't GEP through an unsized element. APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy)); if (ElementSize == 0) - return 0; // Zero-length arrays can't help us build a natural GEP. + return nullptr; // Zero-length arrays can't help us build a natural GEP. APInt NumSkippedElements = Offset.sdiv(ElementSize); Offset -= NumSkippedElements * ElementSize; @@ -1445,11 +1449,11 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, // We may end up computing an offset pointer that has the wrong type. If we // never are able to compute one directly that has the correct type, we'll // fall back to it, so keep it around here. - Value *OffsetPtr = 0; + Value *OffsetPtr = nullptr; // Remember any i8 pointer we come across to re-use if we need to do a raw // byte offset. - Value *Int8Ptr = 0; + Value *Int8Ptr = nullptr; APInt Int8PtrOffset(Offset.getBitWidth(), 0); Type *TargetTy = PointerTy->getPointerElementType(); @@ -1503,8 +1507,9 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, if (!OffsetPtr) { if (!Int8Ptr) { - Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(), - NamePrefix + "sroa_raw_cast"); + Int8Ptr = IRB.CreateBitCast( + Ptr, IRB.getInt8PtrTy(PointerTy->getPointerAddressSpace()), + NamePrefix + "sroa_raw_cast"); Int8PtrOffset = Offset; } @@ -2042,14 +2047,14 @@ public: NewAllocaBeginOffset(NewAllocaBeginOffset), NewAllocaEndOffset(NewAllocaEndOffset), NewAllocaTy(NewAI.getAllocatedType()), - VecTy(IsVectorPromotable ? cast(NewAllocaTy) : 0), - ElementTy(VecTy ? VecTy->getElementType() : 0), + VecTy(IsVectorPromotable ? cast(NewAllocaTy) : nullptr), + ElementTy(VecTy ? VecTy->getElementType() : nullptr), ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0), IntTy(IsIntegerPromotable ? Type::getIntNTy( NewAI.getContext(), DL.getTypeSizeInBits(NewAI.getAllocatedType())) - : 0), + : nullptr), BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(), OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers), IRB(NewAI.getContext(), ConstantFolder()) { @@ -2143,7 +2148,7 @@ private: /// /// You can optionally pass a type to this routine and if that type's ABI /// alignment is itself suitable, this will return zero. - unsigned getSliceAlign(Type *Ty = 0) { + unsigned getSliceAlign(Type *Ty = nullptr) { unsigned NewAIAlign = NewAI.getAlignment(); if (!NewAIAlign) NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); @@ -2226,7 +2231,7 @@ private: DL.getTypeStoreSizeInBits(LI.getType()) && "Non-byte-multiple bit width"); // Move the insertion point just past the load so that we can refer to it. - IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI))); + IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI))); // Create a placeholder value with the same type as LI to use as the // basis for the new value. This allows us to replace the uses of LI with // the computed value, and then replace the placeholder with LI, leaving @@ -2494,14 +2499,7 @@ private: assert((IsDest && II.getRawDest() == OldPtr) || (!IsDest && II.getRawSource() == OldPtr)); - // Compute the relative offset within the transfer. - unsigned IntPtrWidth = DL.getPointerSizeInBits(); - APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset); - - unsigned Align = II.getAlignment(); - if (Align > 1) - Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(), - MinAlign(II.getAlignment(), getSliceAlign())); + unsigned SliceAlign = getSliceAlign(); // For unsplit intrinsics, we simply modify the source and destination // pointers in place. This isn't just an optimization, it is a matter of @@ -2517,8 +2515,11 @@ private: else II.setSource(AdjustedPtr); - Type *CstTy = II.getAlignmentCst()->getType(); - II.setAlignment(ConstantInt::get(CstTy, Align)); + if (II.getAlignment() > SliceAlign) { + Type *CstTy = II.getAlignmentCst()->getType(); + II.setAlignment( + ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign))); + } DEBUG(dbgs() << " to: " << II << "\n"); deleteIfTriviallyDead(OldPtr); @@ -2563,32 +2564,33 @@ private: Pass.Worklist.insert(AI); } - if (EmitMemCpy) { - Type *OtherPtrTy = OtherPtr->getType(); + Type *OtherPtrTy = OtherPtr->getType(); + unsigned OtherAS = OtherPtrTy->getPointerAddressSpace(); + + // Compute the relative offset for the other pointer within the transfer. + unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS); + APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset); + unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1, + OtherOffset.zextOrTrunc(64).getZExtValue()); + if (EmitMemCpy) { // Compute the other pointer, folding as much as possible to produce // a single, simple GEP in most cases. - OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy, + OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, OtherPtr->getName() + "."); Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Type *SizeTy = II.getLength()->getType(); Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); - CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr, - IsDest ? OtherPtr : OurPtr, - Size, Align, II.isVolatile()); + CallInst *New = IRB.CreateMemCpy( + IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size, + MinAlign(SliceAlign, OtherAlign), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; } - // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy - // is equivalent to 1, but that isn't true if we end up rewriting this as - // a load or store. - if (!Align) - Align = 1; - bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && NewEndOffset == NewAllocaEndOffset; uint64_t Size = NewEndOffset - NewBeginOffset; @@ -2596,25 +2598,32 @@ private: unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0; unsigned NumElements = EndIndex - BeginIndex; IntegerType *SubIntTy - = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0; + = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : nullptr; - Type *OtherPtrTy = NewAI.getType(); + // Reset the other pointer type to match the register type we're going to + // use, but using the address space of the original other pointer. if (VecTy && !IsWholeAlloca) { if (NumElements == 1) OtherPtrTy = VecTy->getElementType(); else OtherPtrTy = VectorType::get(VecTy->getElementType(), NumElements); - OtherPtrTy = OtherPtrTy->getPointerTo(); + OtherPtrTy = OtherPtrTy->getPointerTo(OtherAS); } else if (IntTy && !IsWholeAlloca) { - OtherPtrTy = SubIntTy->getPointerTo(); + OtherPtrTy = SubIntTy->getPointerTo(OtherAS); + } else { + OtherPtrTy = NewAllocaTy->getPointerTo(OtherAS); } - Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy, + Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, OtherPtr->getName() + "."); + unsigned SrcAlign = OtherAlign; Value *DstPtr = &NewAI; - if (!IsDest) + unsigned DstAlign = SliceAlign; + if (!IsDest) { std::swap(SrcPtr, DstPtr); + std::swap(SrcAlign, DstAlign); + } Value *Src; if (VecTy && !IsWholeAlloca && !IsDest) { @@ -2628,7 +2637,7 @@ private: uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); } else { - Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(), + Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(), "copyload"); } @@ -2646,7 +2655,7 @@ private: } StoreInst *Store = cast( - IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile())); + IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return !II.isVolatile(); @@ -2772,10 +2781,9 @@ private: /// Enqueue all the users of the given instruction for further processing. /// This uses a set to de-duplicate users. void enqueueUsers(Instruction &I) { - for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; - ++UI) - if (Visited.insert(*UI)) - Queue.push_back(&UI.getUse()); + for (Use &U : I.uses()) + if (Visited.insert(U.getUser())) + Queue.push_back(&U); } // Conservative default is to not rewrite anything. @@ -2988,22 +2996,22 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, return stripAggregateTypeWrapping(DL, Ty); if (Offset > DL.getTypeAllocSize(Ty) || (DL.getTypeAllocSize(Ty) - Offset) < Size) - return 0; + return nullptr; if (SequentialType *SeqTy = dyn_cast(Ty)) { // We can't partition pointers... if (SeqTy->isPointerTy()) - return 0; + return nullptr; Type *ElementTy = SeqTy->getElementType(); uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); uint64_t NumSkippedElements = Offset / ElementSize; if (ArrayType *ArrTy = dyn_cast(SeqTy)) { if (NumSkippedElements >= ArrTy->getNumElements()) - return 0; + return nullptr; } else if (VectorType *VecTy = dyn_cast(SeqTy)) { if (NumSkippedElements >= VecTy->getNumElements()) - return 0; + return nullptr; } Offset -= NumSkippedElements * ElementSize; @@ -3011,7 +3019,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, if (Offset > 0 || Size < ElementSize) { // Bail if the partition ends in a different array element. if ((Offset + Size) > ElementSize) - return 0; + return nullptr; // Recurse through the element type trying to peel off offset bytes. return getTypePartition(DL, ElementTy, Offset, Size); } @@ -3022,20 +3030,20 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, assert(Size > ElementSize); uint64_t NumElements = Size / ElementSize; if (NumElements * ElementSize != Size) - return 0; + return nullptr; return ArrayType::get(ElementTy, NumElements); } StructType *STy = dyn_cast(Ty); if (!STy) - return 0; + return nullptr; const StructLayout *SL = DL.getStructLayout(STy); if (Offset >= SL->getSizeInBytes()) - return 0; + return nullptr; uint64_t EndOffset = Offset + Size; if (EndOffset > SL->getSizeInBytes()) - return 0; + return nullptr; unsigned Index = SL->getElementContainingOffset(Offset); Offset -= SL->getElementOffset(Index); @@ -3043,12 +3051,12 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, Type *ElementTy = STy->getElementType(Index); uint64_t ElementSize = DL.getTypeAllocSize(ElementTy); if (Offset >= ElementSize) - return 0; // The offset points into alignment padding. + return nullptr; // The offset points into alignment padding. // See if any partition must be contained by the element. if (Offset > 0 || Size < ElementSize) { if ((Offset + Size) > ElementSize) - return 0; + return nullptr; return getTypePartition(DL, ElementTy, Offset, Size); } assert(Offset == 0); @@ -3061,14 +3069,14 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, if (EndOffset < SL->getSizeInBytes()) { unsigned EndIndex = SL->getElementContainingOffset(EndOffset); if (Index == EndIndex) - return 0; // Within a single element and its padding. + return nullptr; // Within a single element and its padding. // Don't try to form "natural" types if the elements don't line up with the // expected size. // FIXME: We could potentially recurse down through the last element in the // sub-struct to find a natural end point. if (SL->getElementOffset(EndIndex) != EndOffset) - return 0; + return nullptr; assert(Index < EndIndex); EE = STy->element_begin() + EndIndex; @@ -3079,7 +3087,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty, STy->isPacked()); const StructLayout *SubSL = DL.getStructLayout(SubTy); if (Size != SubSL->getSizeInBytes()) - return 0; // The sub-struct doesn't have quite the size needed. + return nullptr; // The sub-struct doesn't have quite the size needed. return SubTy; } @@ -3104,7 +3112,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, // Try to compute a friendly type for this partition of the alloca. This // won't always succeed, in which case we fall back to a legal integer type // or an i8 array of an appropriate size. - Type *SliceTy = 0; + Type *SliceTy = nullptr; if (Type *CommonUseTy = findCommonType(B, E, EndOffset)) if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize) SliceTy = CommonUseTy; @@ -3151,7 +3159,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, // the alloca's alignment unconstrained. if (Alignment <= DL->getABITypeAlignment(SliceTy)) Alignment = 0; - NewAI = new AllocaInst(SliceTy, 0, Alignment, + NewAI = new AllocaInst(SliceTy, nullptr, Alignment, AI.getName() + ".sroa." + Twine(B - S.begin()), &AI); ++NumNewAllocas; } @@ -3245,18 +3253,6 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S, return true; } -namespace { -struct IsSliceEndLessOrEqualTo { - uint64_t UpperBound; - - IsSliceEndLessOrEqualTo(uint64_t UpperBound) : UpperBound(UpperBound) {} - - bool operator()(const AllocaSlices::iterator &I) { - return I->endOffset() <= UpperBound; - } -}; -} - static void removeFinishedSplitUses(SmallVectorImpl &SplitUses, uint64_t &MaxSplitUseEndOffset, uint64_t Offset) { @@ -3268,7 +3264,9 @@ removeFinishedSplitUses(SmallVectorImpl &SplitUses, size_t SplitUsesOldSize = SplitUses.size(); SplitUses.erase(std::remove_if(SplitUses.begin(), SplitUses.end(), - IsSliceEndLessOrEqualTo(Offset)), + [Offset](const AllocaSlices::iterator &I) { + return I->endOffset() <= Offset; + }), SplitUses.end()); if (SplitUsesOldSize == SplitUses.size()) return; @@ -3295,7 +3293,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &S) { uint64_t BeginOffset = S.begin()->beginOffset(); - for (AllocaSlices::iterator SI = S.begin(), SJ = llvm::next(SI), SE = S.end(); + for (AllocaSlices::iterator SI = S.begin(), SJ = std::next(SI), SE = S.end(); SI != SE; SI = SJ) { uint64_t MaxEndOffset = SI->endOffset(); @@ -3447,9 +3445,8 @@ bool SROA::runOnAlloca(AllocaInst &AI) { DE = S.dead_user_end(); DI != DE; ++DI) { // Free up everything used by this instruction. - for (User::op_iterator DOI = (*DI)->op_begin(), DOE = (*DI)->op_end(); - DOI != DOE; ++DOI) - clobberUse(*DOI); + for (Use &DeadOp : (*DI)->operands()) + clobberUse(DeadOp); // Now replace the uses of this instruction. (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType())); @@ -3498,10 +3495,10 @@ void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { I->replaceAllUsesWith(UndefValue::get(I->getType())); - for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) - if (Instruction *U = dyn_cast(*OI)) { + for (Use &Operand : I->operands()) + if (Instruction *U = dyn_cast(Operand)) { // Zero out the operand and see if it becomes trivially dead. - *OI = 0; + Operand = nullptr; if (isInstructionTriviallyDead(U)) DeadInsts.insert(U); } @@ -3517,10 +3514,9 @@ void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { static void enqueueUsersInWorklist(Instruction &I, SmallVectorImpl &Worklist, SmallPtrSet &Visited) { - for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; - ++UI) - if (Visited.insert(cast(*UI))) - Worklist.push_back(cast(*UI)); + for (User *U : I.users()) + if (Visited.insert(cast(U))) + Worklist.push_back(cast(U)); } /// \brief Promote the allocas, using the best available technique. @@ -3606,20 +3602,6 @@ bool SROA::promoteAllocas(Function &F) { return true; } -namespace { - /// \brief A predicate to test whether an alloca belongs to a set. - class IsAllocaInSet { - typedef SmallPtrSet SetType; - const SetType &Set; - - public: - typedef AllocaInst *argument_type; - - IsAllocaInSet(const SetType &Set) : Set(Set) {} - bool operator()(AllocaInst *AI) const { return Set.count(AI); } - }; -} - bool SROA::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; @@ -3634,10 +3616,10 @@ bool SROA::runOnFunction(Function &F) { DL = &DLP->getDataLayout(); DominatorTreeWrapperPass *DTWP = getAnalysisIfAvailable(); - DT = DTWP ? &DTWP->getDomTree() : 0; + DT = DTWP ? &DTWP->getDomTree() : nullptr; BasicBlock &EntryBB = F.getEntryBlock(); - for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end()); + for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end()); I != E; ++I) if (AllocaInst *AI = dyn_cast(I)) Worklist.insert(AI); @@ -3655,11 +3637,14 @@ bool SROA::runOnFunction(Function &F) { // Remove the deleted allocas from various lists so that we don't try to // continue processing them. if (!DeletedAllocas.empty()) { - Worklist.remove_if(IsAllocaInSet(DeletedAllocas)); - PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas)); + auto IsInSet = [&](AllocaInst *AI) { + return DeletedAllocas.count(AI); + }; + Worklist.remove_if(IsInSet); + PostPromotionWorklist.remove_if(IsInSet); PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(), PromotableAllocas.end(), - IsAllocaInSet(DeletedAllocas)), + IsInSet), PromotableAllocas.end()); DeletedAllocas.clear(); }