From: Rafael Espindola Date: Mon, 24 Feb 2014 23:12:18 +0000 (+0000) Subject: Make some DataLayout pointers const. X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=ec89b9fb9ed78dbb783897774e5d7bab376c169a;p=oota-llvm.git Make some DataLayout pointers const. No functionality change. Just reduces the noise of an upcoming patch. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@202087 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h index 21a85eb002d..58d347ed19b 100644 --- a/include/llvm/Analysis/IVUsers.h +++ b/include/llvm/Analysis/IVUsers.h @@ -122,7 +122,7 @@ class IVUsers : public LoopPass { LoopInfo *LI; DominatorTree *DT; ScalarEvolution *SE; - DataLayout *DL; + const DataLayout *DL; SmallPtrSet Processed; /// IVUses - A list of all tracked IV uses of induction variable expressions diff --git a/include/llvm/Analysis/LazyValueInfo.h b/include/llvm/Analysis/LazyValueInfo.h index 99f0bb8c2df..5db048d0524 100644 --- a/include/llvm/Analysis/LazyValueInfo.h +++ b/include/llvm/Analysis/LazyValueInfo.h @@ -26,7 +26,7 @@ namespace llvm { /// LazyValueInfo - This pass computes, caches, and vends lazy value constraint /// information. class LazyValueInfo : public FunctionPass { - class DataLayout *DL; + const DataLayout *DL; class TargetLibraryInfo *TLI; void *PImpl; LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION; diff --git a/include/llvm/Analysis/MemoryDependenceAnalysis.h b/include/llvm/Analysis/MemoryDependenceAnalysis.h index 786948a8d89..b4b401a1e6b 100644 --- a/include/llvm/Analysis/MemoryDependenceAnalysis.h +++ b/include/llvm/Analysis/MemoryDependenceAnalysis.h @@ -323,7 +323,7 @@ namespace llvm { /// Current AA implementation, just a cache. AliasAnalysis *AA; - DataLayout *DL; + const DataLayout *DL; DominatorTree *DT; OwningPtr PredCache; public: diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index b00ffa9f67c..e1d4861a555 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -227,7 +227,7 @@ namespace llvm { /// The DataLayout information for the target we are targeting. /// - DataLayout *DL; + const DataLayout *DL; /// TLI - The target library information for the target we are targeting. /// diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp index 84707b4dd54..069fe113afc 100644 --- a/lib/Analysis/Lint.cpp +++ b/lib/Analysis/Lint.cpp @@ -102,7 +102,7 @@ namespace { Module *Mod; AliasAnalysis *AA; DominatorTree *DT; - DataLayout *DL; + const DataLayout *DL; TargetLibraryInfo *TLI; std::string Messages; @@ -503,7 +503,7 @@ void Lint::visitShl(BinaryOperator &I) { "Undefined result: Shift count out of range", &I); } -static bool isZero(Value *V, DataLayout *DL) { +static bool isZero(Value *V, const DataLayout *DL) { // Assume undef could be zero. if (isa(V)) return true; diff --git a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp index fd07c7fb254..fab84cc9172 100644 --- a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -104,7 +104,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) { SmallVector aggrMemcpys; SmallVector aggrMemsets; - DataLayout *DL = &getAnalysis(); + const DataLayout *DL = &getAnalysis(); LLVMContext &Context = F.getParent()->getContext(); // diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index 4c8aaf05efb..491d067078c 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -109,7 +109,7 @@ namespace { PPCTargetMachine *TM; LoopInfo *LI; ScalarEvolution *SE; - DataLayout *DL; + const DataLayout *DL; DominatorTree *DT; const TargetLibraryInfo *LibInfo; }; diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 8b20fcd2a4b..b4b8a0c650b 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -84,7 +84,7 @@ namespace { const GlobalStatus &GS); bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); - DataLayout *DL; + const DataLayout *DL; TargetLibraryInfo *TLI; }; } @@ -266,7 +266,8 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV, /// quick scan over the use list to clean up the easy and obvious cruft. This /// returns true if it made a change. static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, - DataLayout *DL, TargetLibraryInfo *TLI) { + const DataLayout *DL, + TargetLibraryInfo *TLI) { bool Changed = false; // Note that we need to use a weak value handle for the worklist items. When // we delete a constant array, we may also be holding pointer to one of its @@ -743,7 +744,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { /// if the loaded value is dynamically null, then we know that they cannot be /// reachable with a null optimize away the load. static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, - DataLayout *DL, + const DataLayout *DL, TargetLibraryInfo *TLI) { bool Changed = false; @@ -806,8 +807,8 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the /// instructions that are foldable. -static void ConstantPropUsersOf(Value *V, - DataLayout *DL, TargetLibraryInfo *TLI) { +static void ConstantPropUsersOf(Value *V, const DataLayout *DL, + TargetLibraryInfo *TLI) { for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) if (Instruction *I = dyn_cast(*UI++)) if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) { @@ -830,7 +831,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy, ConstantInt *NElements, - DataLayout *DL, + const DataLayout *DL, TargetLibraryInfo *TLI) { DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); @@ -1278,7 +1279,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break /// it up into multiple allocations of arrays of the fields. static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, - Value *NElems, DataLayout *DL, + Value *NElems, const DataLayout *DL, const TargetLibraryInfo *TLI) { DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); Type *MAT = getMallocAllocatedType(CI, TLI); @@ -1470,7 +1471,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, Type *AllocTy, AtomicOrdering Ordering, Module::global_iterator &GVI, - DataLayout *DL, + const DataLayout *DL, TargetLibraryInfo *TLI) { if (!DL) return false; @@ -1569,7 +1570,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, AtomicOrdering Ordering, Module::global_iterator &GVI, - DataLayout *DL, TargetLibraryInfo *TLI) { + const DataLayout *DL, + TargetLibraryInfo *TLI) { // Ignore no-op GEPs and bitcasts. StoredOnceVal = StoredOnceVal->stripPointerCasts(); diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp index d7d4cc914d7..d518cc3de23 100644 --- a/lib/Transforms/IPO/MergeFunctions.cpp +++ b/lib/Transforms/IPO/MergeFunctions.cpp @@ -108,12 +108,12 @@ public: static const ComparableFunction TombstoneKey; static DataLayout * const LookupOnly; - ComparableFunction(Function *Func, DataLayout *DL) + ComparableFunction(Function *Func, const DataLayout *DL) : Func(Func), Hash(profileFunction(Func)), DL(DL) {} Function *getFunc() const { return Func; } unsigned getHash() const { return Hash; } - DataLayout *getDataLayout() const { return DL; } + const DataLayout *getDataLayout() const { return DL; } // Drops AssertingVH reference to the function. Outside of debug mode, this // does nothing. @@ -129,7 +129,7 @@ private: AssertingVH Func; unsigned Hash; - DataLayout *DL; + const DataLayout *DL; }; const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0); diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h index 241db7a6a50..2a4b8a146d8 100644 --- a/lib/Transforms/InstCombine/InstCombine.h +++ b/lib/Transforms/InstCombine/InstCombine.h @@ -81,7 +81,7 @@ public: class LLVM_LIBRARY_VISIBILITY InstCombiner : public FunctionPass, public InstVisitor { - DataLayout *DL; + const DataLayout *DL; TargetLibraryInfo *TLI; bool MadeIRChange; LibCallSimplifier *Simplifier; @@ -108,7 +108,7 @@ public: virtual void getAnalysisUsage(AnalysisUsage &AU) const; - DataLayout *getDataLayout() const { return DL; } + const DataLayout *getDataLayout() const { return DL; } TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; } diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp index 5c1d1b136b3..063ab171e9f 100644 --- a/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -235,7 +235,7 @@ isEliminableCastPair( const CastInst *CI, ///< The first cast instruction unsigned opcode, ///< The opcode of the second cast instruction Type *DstTy, ///< The target type for the second cast instruction - DataLayout *DL ///< The target data for pointer size + const DataLayout *DL ///< The target data for pointer size ) { Type *SrcTy = CI->getOperand(0)->getType(); // A from above diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index 4ad58c40e4b..dcd295718e9 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -503,7 +503,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV, /// If we can't emit an optimized form for this expression, this returns null. /// static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) { - DataLayout &DL = *IC.getDataLayout(); + const DataLayout &DL = *IC.getDataLayout(); gep_type_iterator GTI = gep_type_begin(GEP); // Check to see if this gep only has a single variable index. If so, and if diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 1e2f4b7e5cb..5b8df9c466f 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -334,7 +334,7 @@ struct AddressSanitizer : public FunctionPass { SmallString<64> BlacklistFile; LLVMContext *C; - DataLayout *DL; + const DataLayout *DL; int LongSize; Type *IntptrTy; ShadowMapping Mapping; @@ -383,7 +383,7 @@ class AddressSanitizerModule : public ModulePass { SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals; Type *IntptrTy; LLVMContext *C; - DataLayout *DL; + const DataLayout *DL; ShadowMapping Mapping; Function *AsanPoisonGlobals; Function *AsanUnpoisonGlobals; diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp index 338584c31cd..7dab7e30f7c 100644 --- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp +++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp @@ -164,7 +164,7 @@ class DataFlowSanitizer : public ModulePass { WK_Custom }; - DataLayout *DL; + const DataLayout *DL; Module *Mod; LLVMContext *Ctx; IntegerType *ShadowTy; diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 7eea4bff596..13f431c7c73 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -222,7 +222,7 @@ class MemorySanitizer : public FunctionPass { /// \brief Track origins (allocation points) of uninitialized values. bool TrackOrigins; - DataLayout *DL; + const DataLayout *DL; LLVMContext *C; Type *IntptrTy; Type *OriginTy; diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index f2d43f28776..5e53ffb7934 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -96,7 +96,7 @@ struct ThreadSanitizer : public FunctionPass { bool addrPointsToConstantData(Value *Addr); int getMemoryAccessFuncIndex(Value *Addr); - DataLayout *DL; + const DataLayout *DL; Type *IntptrTy; SmallString<64> BlacklistFile; OwningPtr BL; diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp index c15217c093e..76815a4cf0b 100644 --- a/lib/Transforms/Scalar/ConstantProp.cpp +++ b/lib/Transforms/Scalar/ConstantProp.cpp @@ -67,7 +67,7 @@ bool ConstantPropagation::runOnFunction(Function &F) { WorkList.insert(&*i); } bool Changed = false; - DataLayout *DL = getAnalysisIfAvailable(); + const DataLayout *DL = getAnalysisIfAvailable(); TargetLibraryInfo *TLI = &getAnalysis(); while (!WorkList.empty()) { diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 1543e5f1eb1..95cb670d904 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -71,7 +71,7 @@ namespace { LoopInfo *LI; ScalarEvolution *SE; DominatorTree *DT; - DataLayout *DL; + const DataLayout *DL; TargetLibraryInfo *TLI; SmallVector DeadInsts; diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index e4b088d82be..d8ac289a7d4 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -76,7 +76,7 @@ namespace { /// revectored to the false side of the second if. /// class JumpThreading : public FunctionPass { - DataLayout *DL; + const DataLayout *DL; TargetLibraryInfo *TLI; LazyValueInfo *LVI; #ifdef NDEBUG diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 2ba6210b47c..4a3fe42aa9c 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -108,7 +108,7 @@ namespace { LoopInfo *LI; // Current LoopInfo DominatorTree *DT; // Dominator Tree for the current Loop. - DataLayout *DL; // DataLayout for constant folding. + const DataLayout *DL; // DataLayout for constant folding. TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding. // State that is updated as we process loops. diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp index 7245e0a5c50..1cd346f790b 100644 --- a/lib/Transforms/Scalar/LoopRerollPass.cpp +++ b/lib/Transforms/Scalar/LoopRerollPass.cpp @@ -141,7 +141,7 @@ protected: AliasAnalysis *AA; LoopInfo *LI; ScalarEvolution *SE; - DataLayout *DL; + const DataLayout *DL; TargetLibraryInfo *TLI; DominatorTree *DT; diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index ac7e7773aa1..8f954d5c848 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -87,7 +87,7 @@ namespace { private: bool HasDomTree; - DataLayout *DL; + const DataLayout *DL; /// DeadInsts - Keep track of instructions we have made dead, so that /// we can remove them after we are done working. diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp index 865694611f5..ceb9f8c5193 100644 --- a/lib/Transforms/Vectorize/BBVectorize.cpp +++ b/lib/Transforms/Vectorize/BBVectorize.cpp @@ -214,7 +214,7 @@ namespace { AliasAnalysis *AA; DominatorTree *DT; ScalarEvolution *SE; - DataLayout *DL; + const DataLayout *DL; const TargetTransformInfo *TTI; // FIXME: const correct? diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index d7d66e79161..31d11548d4f 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -219,7 +219,7 @@ class LoopVectorizationCostModel; class InnerLoopVectorizer { public: InnerLoopVectorizer(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI, - DominatorTree *DT, DataLayout *DL, + DominatorTree *DT, const DataLayout *DL, const TargetLibraryInfo *TLI, unsigned VecWidth, unsigned UnrollFactor) : OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), DL(DL), TLI(TLI), @@ -379,7 +379,7 @@ protected: /// Dominator Tree. DominatorTree *DT; /// Data Layout. - DataLayout *DL; + const DataLayout *DL; /// Target Library Info. const TargetLibraryInfo *TLI; @@ -428,7 +428,7 @@ protected: class InnerLoopUnroller : public InnerLoopVectorizer { public: InnerLoopUnroller(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI, - DominatorTree *DT, DataLayout *DL, + DominatorTree *DT, const DataLayout *DL, const TargetLibraryInfo *TLI, unsigned UnrollFactor) : InnerLoopVectorizer(OrigLoop, SE, LI, DT, DL, TLI, 1, UnrollFactor) { } @@ -487,7 +487,7 @@ public: unsigned NumStores; unsigned NumPredStores; - LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, DataLayout *DL, + LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, const DataLayout *DL, DominatorTree *DT, TargetLibraryInfo *TLI) : NumLoads(0), NumStores(0), NumPredStores(0), TheLoop(L), SE(SE), DL(DL), DT(DT), TLI(TLI), Induction(0), WidestIndTy(0), HasFunNoNaNAttr(false), @@ -725,7 +725,7 @@ private: /// Scev analysis. ScalarEvolution *SE; /// DataLayout analysis. - DataLayout *DL; + const DataLayout *DL; /// Dominators. DominatorTree *DT; /// Target Library Info. @@ -775,7 +775,7 @@ public: LoopVectorizationCostModel(Loop *L, ScalarEvolution *SE, LoopInfo *LI, LoopVectorizationLegality *Legal, const TargetTransformInfo &TTI, - DataLayout *DL, const TargetLibraryInfo *TLI) + const DataLayout *DL, const TargetLibraryInfo *TLI) : TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), DL(DL), TLI(TLI) {} /// Information about vectorization costs @@ -848,7 +848,7 @@ private: /// Vector target information. const TargetTransformInfo &TTI; /// Target data layout information. - DataLayout *DL; + const DataLayout *DL; /// Target Library Info. const TargetLibraryInfo *TLI; }; @@ -1009,7 +1009,7 @@ struct LoopVectorize : public FunctionPass { } ScalarEvolution *SE; - DataLayout *DL; + const DataLayout *DL; LoopInfo *LI; TargetTransformInfo *TTI; DominatorTree *DT; @@ -1283,7 +1283,7 @@ Value *InnerLoopVectorizer::getConsecutiveVector(Value* Val, int StartIdx, /// \brief Find the operand of the GEP that should be checked for consecutive /// stores. This ignores trailing indices that have no effect on the final /// pointer. -static unsigned getGEPInductionOperand(DataLayout *DL, +static unsigned getGEPInductionOperand(const DataLayout *DL, const GetElementPtrInst *Gep) { unsigned LastOperand = Gep->getNumOperands() - 1; unsigned GEPAllocSize = DL->getTypeAllocSize( @@ -3298,7 +3298,7 @@ bool LoopVectorizationLegality::canVectorize() { return true; } -static Type *convertPointerToIntegerType(DataLayout &DL, Type *Ty) { +static Type *convertPointerToIntegerType(const DataLayout &DL, Type *Ty) { if (Ty->isPointerTy()) return DL.getIntPtrType(Ty); @@ -3310,7 +3310,7 @@ static Type *convertPointerToIntegerType(DataLayout &DL, Type *Ty) { return Ty; } -static Type* getWiderType(DataLayout &DL, Type *Ty0, Type *Ty1) { +static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) { Ty0 = convertPointerToIntegerType(DL, Ty0); Ty1 = convertPointerToIntegerType(DL, Ty1); if (Ty0->getScalarSizeInBits() > Ty1->getScalarSizeInBits()) @@ -3508,7 +3508,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() { ///\brief Remove GEPs whose indices but the last one are loop invariant and /// return the induction operand of the gep pointer. static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, - DataLayout *DL, Loop *Lp) { + const DataLayout *DL, Loop *Lp) { GetElementPtrInst *GEP = dyn_cast(Ptr); if (!GEP) return Ptr; @@ -3544,7 +3544,7 @@ static Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) { /// Looks for symbolic strides "a[i*stride]". Returns the symbolic stride as a /// pointer to the Value, or null otherwise. static Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, - DataLayout *DL, Loop *Lp) { + const DataLayout *DL, Loop *Lp) { const PointerType *PtrTy = dyn_cast(Ptr->getType()); if (!PtrTy || PtrTy->isAggregateType()) return 0; @@ -3679,7 +3679,7 @@ public: /// \brief Set of potential dependent memory accesses. typedef EquivalenceClasses DepCandidates; - AccessAnalysis(DataLayout *Dl, DepCandidates &DA) : + AccessAnalysis(const DataLayout *Dl, DepCandidates &DA) : DL(Dl), DepCands(DA), AreAllWritesIdentified(true), AreAllReadsIdentified(true), IsRTCheckNeeded(false) {} @@ -3745,7 +3745,7 @@ private: /// Set of underlying objects already written to. SmallPtrSet WriteObjects; - DataLayout *DL; + const DataLayout *DL; /// Sets of potentially dependent accesses - members of one set share an /// underlying pointer. The set "CheckDeps" identfies which sets really need a @@ -3772,7 +3772,7 @@ static bool hasComputableBounds(ScalarEvolution *SE, ValueToValueMap &Strides, /// \brief Check the stride of the pointer and ensure that it does not wrap in /// the address space. -static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr, +static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr, const Loop *Lp, ValueToValueMap &StridesMap); bool AccessAnalysis::canCheckPtrAtRT( @@ -3992,7 +3992,7 @@ public: typedef PointerIntPair MemAccessInfo; typedef SmallPtrSet MemAccessInfoSet; - MemoryDepChecker(ScalarEvolution *Se, DataLayout *Dl, const Loop *L) + MemoryDepChecker(ScalarEvolution *Se, const DataLayout *Dl, const Loop *L) : SE(Se), DL(Dl), InnermostLoop(L), AccessIdx(0), ShouldRetryWithRuntimeCheck(false) {} @@ -4030,7 +4030,7 @@ public: private: ScalarEvolution *SE; - DataLayout *DL; + const DataLayout *DL; const Loop *InnermostLoop; /// \brief Maps access locations (ptr, read/write) to program order. @@ -4079,7 +4079,7 @@ static bool isInBoundsGep(Value *Ptr) { } /// \brief Check whether the access through \p Ptr has a constant stride. -static int isStridedPtr(ScalarEvolution *SE, DataLayout *DL, Value *Ptr, +static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr, const Loop *Lp, ValueToValueMap &StridesMap) { const Type *Ty = Ptr->getType(); assert(Ty->isPointerTy() && "Unexpected non-ptr");