X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FAnalysis%2FLint.cpp;h=8ee9b8af51ea79d82f438c3d7243ecc553d074e5;hb=1c4abdb6ab6e959418b82f235e3f7c8646c7792c;hp=72ae7abd551e9a7811dfe6a83b70de560f381260;hpb=c1f1efdd1a835feae3213495af8a5720ff700c38;p=oota-llvm.git diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp index 72ae7abd551..8ee9b8af51e 100644 --- a/lib/Analysis/Lint.cpp +++ b/lib/Analysis/Lint.cpp @@ -16,7 +16,7 @@ // those aren't comprehensive either. Second, many conditions cannot be // checked statically. This pass does no dynamic instrumentation, so it // can't check for all possible problems. -// +// // Another limitation is that it assumes all code will be executed. A store // through a null pointer in a basic block which is never reached is harmless, // but this pass will warn about it anyway. This is the main reason why most @@ -26,33 +26,34 @@ // less obvious. If an optimization pass appears to be introducing a warning, // it may be that the optimization pass is merely exposing an existing // condition in the code. -// +// // This code may be run before instcombine. In many cases, instcombine checks // for the same kinds of things and turns instructions with undefined behavior // into unreachable (or equivalent). Because of this, this pass makes some // effort to look through bitcasts and so on. -// +// //===----------------------------------------------------------------------===// -#include "llvm/Analysis/Passes.h" +#include "llvm/Analysis/Lint.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Analysis/AssumptionTracker.h" #include "llvm/Analysis/ConstantFolding.h" -#include "llvm/Analysis/Dominators.h" -#include "llvm/Analysis/Lint.h" +#include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/Passes.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Assembly/Writer.h" -#include "llvm/Target/TargetData.h" +#include "llvm/IR/CallSite.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/InstVisitor.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/Pass.h" #include "llvm/PassManager.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/Function.h" -#include "llvm/Support/CallSite.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/InstVisitor.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/ADT/STLExtras.h" +#include "llvm/Target/TargetLibraryInfo.h" using namespace llvm; namespace { @@ -71,7 +72,7 @@ namespace { void visitCallSite(CallSite CS); void visitMemoryReference(Instruction &I, Value *Ptr, uint64_t Size, unsigned Align, - const Type *Ty, unsigned Flags); + Type *Ty, unsigned Flags); void visitCallInst(CallInst &I); void visitInvokeInst(InvokeInst &I); @@ -96,13 +97,15 @@ namespace { Value *findValue(Value *V, bool OffsetOk) const; Value *findValueImpl(Value *V, bool OffsetOk, - SmallPtrSet &Visited) const; + SmallPtrSetImpl &Visited) const; public: Module *Mod; AliasAnalysis *AA; + AssumptionTracker *AT; DominatorTree *DT; - TargetData *TD; + const DataLayout *DL; + TargetLibraryInfo *TLI; std::string Messages; raw_string_ostream MessagesStr; @@ -112,21 +115,23 @@ namespace { initializeLintPass(*PassRegistry::getPassRegistry()); } - virtual bool runOnFunction(Function &F); + bool runOnFunction(Function &F) override; - virtual void getAnalysisUsage(AnalysisUsage &AU) const { + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); } - virtual void print(raw_ostream &O, const Module *M) const {} + void print(raw_ostream &O, const Module *M) const override {} void WriteValue(const Value *V) { if (!V) return; if (isa(V)) { MessagesStr << *V << '\n'; } else { - WriteAsOperand(MessagesStr, V, true, Mod); + V->printAsOperand(MessagesStr, true, Mod); MessagesStr << '\n'; } } @@ -135,8 +140,8 @@ namespace { // that failed. This provides a nice place to put a breakpoint if you want // to see why something is not correct. void CheckFailed(const Twine &Message, - const Value *V1 = 0, const Value *V2 = 0, - const Value *V3 = 0, const Value *V4 = 0) { + const Value *V1 = nullptr, const Value *V2 = nullptr, + const Value *V3 = nullptr, const Value *V4 = nullptr) { MessagesStr << Message.str() << "\n"; WriteValue(V1); WriteValue(V2); @@ -149,7 +154,9 @@ namespace { char Lint::ID = 0; INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR", false, true) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_PASS_DEPENDENCY(AssumptionTracker) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR", false, true) @@ -172,8 +179,11 @@ INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR", bool Lint::runOnFunction(Function &F) { Mod = F.getParent(); AA = &getAnalysis(); - DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); + AT = &getAnalysis(); + DT = &getAnalysis().getDomTree(); + DataLayoutPass *DLP = getAnalysisIfAvailable(); + DL = DLP ? &DLP->getDataLayout() : nullptr; + TLI = &getAnalysis(); visit(F); dbgs() << MessagesStr.str(); Messages.clear(); @@ -194,15 +204,15 @@ void Lint::visitCallSite(CallSite CS) { Value *Callee = CS.getCalledValue(); visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize, - 0, 0, MemRef::Callee); + 0, nullptr, MemRef::Callee); if (Function *F = dyn_cast(findValue(Callee, /*OffsetOk=*/false))) { Assert1(CS.getCallingConv() == F->getCallingConv(), "Undefined behavior: Caller and callee calling convention differ", &I); - const FunctionType *FT = F->getFunctionType(); - unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); + FunctionType *FT = F->getFunctionType(); + unsigned NumActualArgs = CS.arg_size(); Assert1(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs : @@ -240,10 +250,10 @@ void Lint::visitCallSite(CallSite CS) { // Check that an sret argument points to valid memory. if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) { - const Type *Ty = + Type *Ty = cast(Formal->getType())->getElementType(); visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty), - TD ? TD->getABITypeAlignment(Ty) : 0, + DL ? DL->getABITypeAlignment(Ty) : 0, Ty, MemRef::Read | MemRef::Write); } } @@ -270,10 +280,10 @@ void Lint::visitCallSite(CallSite CS) { MemCpyInst *MCI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize, - MCI->getAlignment(), 0, + MCI->getAlignment(), nullptr, MemRef::Write); visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize, - MCI->getAlignment(), 0, + MCI->getAlignment(), nullptr, MemRef::Read); // Check that the memcpy arguments don't overlap. The AliasAnalysis API @@ -294,10 +304,10 @@ void Lint::visitCallSite(CallSite CS) { MemMoveInst *MMI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize, - MMI->getAlignment(), 0, + MMI->getAlignment(), nullptr, MemRef::Write); visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize, - MMI->getAlignment(), 0, + MMI->getAlignment(), nullptr, MemRef::Read); break; } @@ -305,7 +315,7 @@ void Lint::visitCallSite(CallSite CS) { MemSetInst *MSI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize, - MSI->getAlignment(), 0, + MSI->getAlignment(), nullptr, MemRef::Write); break; } @@ -316,17 +326,17 @@ void Lint::visitCallSite(CallSite CS) { &I); visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Read | MemRef::Write); + 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::vacopy: visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Write); + 0, nullptr, MemRef::Write); visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Read); + 0, nullptr, MemRef::Read); break; case Intrinsic::vaend: visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Read | MemRef::Write); + 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::stackrestore: @@ -334,7 +344,7 @@ void Lint::visitCallSite(CallSite CS) { // stack pointer, which the compiler may read from or write to // at any time, so check it for both readability and writeability. visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Read | MemRef::Write); + 0, nullptr, MemRef::Read | MemRef::Write); break; } } @@ -364,7 +374,7 @@ void Lint::visitReturnInst(ReturnInst &I) { // TODO: Check readnone/readonly function attributes. void Lint::visitMemoryReference(Instruction &I, Value *Ptr, uint64_t Size, unsigned Align, - const Type *Ty, unsigned Flags) { + Type *Ty, unsigned Flags) { // If no memory is being referenced, it doesn't matter if the pointer // is valid. if (Size == 0) @@ -406,17 +416,50 @@ void Lint::visitMemoryReference(Instruction &I, "Undefined behavior: Branch to non-blockaddress", &I); } - if (TD) { - if (Align == 0 && Ty) Align = TD->getABITypeAlignment(Ty); - - if (Align != 0) { - unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType()); - APInt Mask = APInt::getAllOnesValue(BitWidth), - KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); - ComputeMaskedBits(Ptr, Mask, KnownZero, KnownOne, TD); - Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))), - "Undefined behavior: Memory reference address is misaligned", &I); + // Check for buffer overflows and misalignment. + // Only handles memory references that read/write something simple like an + // alloca instruction or a global variable. + int64_t Offset = 0; + if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) { + // OK, so the access is to a constant offset from Ptr. Check that Ptr is + // something we can handle and if so extract the size of this base object + // along with its alignment. + uint64_t BaseSize = AliasAnalysis::UnknownSize; + unsigned BaseAlign = 0; + + if (AllocaInst *AI = dyn_cast(Base)) { + Type *ATy = AI->getAllocatedType(); + if (DL && !AI->isArrayAllocation() && ATy->isSized()) + BaseSize = DL->getTypeAllocSize(ATy); + BaseAlign = AI->getAlignment(); + if (DL && BaseAlign == 0 && ATy->isSized()) + BaseAlign = DL->getABITypeAlignment(ATy); + } else if (GlobalVariable *GV = dyn_cast(Base)) { + // If the global may be defined differently in another compilation unit + // then don't warn about funky memory accesses. + if (GV->hasDefinitiveInitializer()) { + Type *GTy = GV->getType()->getElementType(); + if (DL && GTy->isSized()) + BaseSize = DL->getTypeAllocSize(GTy); + BaseAlign = GV->getAlignment(); + if (DL && BaseAlign == 0 && GTy->isSized()) + BaseAlign = DL->getABITypeAlignment(GTy); + } } + + // Accesses from before the start or after the end of the object are not + // defined. + Assert1(Size == AliasAnalysis::UnknownSize || + BaseSize == AliasAnalysis::UnknownSize || + (Offset >= 0 && Offset + Size <= BaseSize), + "Undefined behavior: Buffer overflow", &I); + + // Accesses that say that the memory is more aligned than it is are not + // defined. + if (DL && Align == 0 && Ty && Ty->isSized()) + Align = DL->getABITypeAlignment(Ty); + Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset), + "Undefined behavior: Memory reference address is misaligned", &I); } } @@ -466,34 +509,63 @@ void Lint::visitShl(BinaryOperator &I) { "Undefined result: Shift count out of range", &I); } -static bool isZero(Value *V, TargetData *TD) { +static bool isZero(Value *V, const DataLayout *DL, DominatorTree *DT, + AssumptionTracker *AT) { // Assume undef could be zero. - if (isa(V)) return true; + if (isa(V)) + return true; + + VectorType *VecTy = dyn_cast(V->getType()); + if (!VecTy) { + unsigned BitWidth = V->getType()->getIntegerBitWidth(); + APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); + computeKnownBits(V, KnownZero, KnownOne, DL, + 0, AT, dyn_cast(V), DT); + return KnownZero.isAllOnesValue(); + } + + // Per-component check doesn't work with zeroinitializer + Constant *C = dyn_cast(V); + if (!C) + return false; + + if (C->isZeroValue()) + return true; + + // For a vector, KnownZero will only be true if all values are zero, so check + // this per component + unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth(); + for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) { + Constant *Elem = C->getAggregateElement(I); + if (isa(Elem)) + return true; + + APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); + computeKnownBits(Elem, KnownZero, KnownOne, DL); + if (KnownZero.isAllOnesValue()) + return true; + } - unsigned BitWidth = cast(V->getType())->getBitWidth(); - APInt Mask = APInt::getAllOnesValue(BitWidth), - KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); - ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD); - return KnownZero.isAllOnesValue(); + return false; } void Lint::visitSDiv(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL, DT, AT), "Undefined behavior: Division by zero", &I); } void Lint::visitUDiv(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL, DT, AT), "Undefined behavior: Division by zero", &I); } void Lint::visitSRem(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL, DT, AT), "Undefined behavior: Division by zero", &I); } void Lint::visitURem(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL, DT, AT), "Undefined behavior: Division by zero", &I); } @@ -507,13 +579,13 @@ void Lint::visitAllocaInst(AllocaInst &I) { } void Lint::visitVAArgInst(VAArgInst &I) { - visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, 0, - MemRef::Read | MemRef::Write); + visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, + nullptr, MemRef::Read | MemRef::Write); } void Lint::visitIndirectBrInst(IndirectBrInst &I) { - visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, 0, - MemRef::Branchee); + visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, + nullptr, MemRef::Branchee); Assert1(I.getNumDestinations() != 0, "Undefined behavior: indirectbr with no destinations", &I); @@ -538,7 +610,7 @@ void Lint::visitInsertElementInst(InsertElementInst &I) { void Lint::visitUnreachableInst(UnreachableInst &I) { // This isn't undefined behavior, it's merely suspicious. Assert1(&I == I.getParent()->begin() || - prior(BasicBlock::iterator(&I))->mayHaveSideEffects(), + std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(), "Unusual: unreachable immediately preceded by instruction without " "side effects", &I); } @@ -557,9 +629,9 @@ Value *Lint::findValue(Value *V, bool OffsetOk) const { /// findValueImpl - Implementation helper for findValue. Value *Lint::findValueImpl(Value *V, bool OffsetOk, - SmallPtrSet &Visited) const { + SmallPtrSetImpl &Visited) const { // Detect self-referential values. - if (!Visited.insert(V)) + if (!Visited.insert(V).second) return UndefValue::get(V->getType()); // TODO: Look through sext or zext cast, when the result is known to @@ -567,13 +639,14 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, // TODO: Look through eliminable cast pairs. // TODO: Look through calls with unique return values. // TODO: Look through vector insert/extract/shuffle. - V = OffsetOk ? V->getUnderlyingObject() : V->stripPointerCasts(); + V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts(); if (LoadInst *L = dyn_cast(V)) { BasicBlock::iterator BBI = L; BasicBlock *BB = L->getParent(); SmallPtrSet VisitedBlocks; for (;;) { - if (!VisitedBlocks.insert(BB)) break; + if (!VisitedBlocks.insert(BB).second) + break; if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(), BB, BBI, 6, AA)) return findValueImpl(U, OffsetOk, Visited); @@ -587,13 +660,11 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, if (W != V) return findValueImpl(W, OffsetOk, Visited); } else if (CastInst *CI = dyn_cast(V)) { - if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) : - Type::getInt64Ty(V->getContext()))) + if (CI->isNoopCast(DL)) return findValueImpl(CI->getOperand(0), OffsetOk, Visited); } else if (ExtractValueInst *Ex = dyn_cast(V)) { if (Value *W = FindInsertedValue(Ex->getAggregateOperand(), - Ex->idx_begin(), - Ex->idx_end())) + Ex->getIndices())) if (W != V) return findValueImpl(W, OffsetOk, Visited); } else if (ConstantExpr *CE = dyn_cast(V)) { @@ -602,14 +673,12 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()), CE->getOperand(0)->getType(), CE->getType(), - TD ? TD->getIntPtrType(V->getContext()) : + DL ? DL->getIntPtrType(V->getType()) : Type::getInt64Ty(V->getContext()))) return findValueImpl(CE->getOperand(0), OffsetOk, Visited); } else if (CE->getOpcode() == Instruction::ExtractValue) { - const SmallVector &Indices = CE->getIndices(); - if (Value *W = FindInsertedValue(CE->getOperand(0), - Indices.begin(), - Indices.end())) + ArrayRef Indices = CE->getIndices(); + if (Value *W = FindInsertedValue(CE->getOperand(0), Indices)) if (W != V) return findValueImpl(W, OffsetOk, Visited); } @@ -617,10 +686,10 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, // As a last resort, try SimplifyInstruction or constant folding. if (Instruction *Inst = dyn_cast(V)) { - if (Value *W = SimplifyInstruction(Inst, TD, DT)) + if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT, AT)) return findValueImpl(W, OffsetOk, Visited); } else if (ConstantExpr *CE = dyn_cast(V)) { - if (Value *W = ConstantFoldConstantExpression(CE, TD)) + if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI)) if (W != V) return findValueImpl(W, OffsetOk, Visited); }