X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FAnalysis%2FLint.cpp;h=5365fe5a3eee619d97ecaa8bfc93811cfc2c582a;hb=ed306d0cf5b98970c19d4698f239a293c2bda1f0;hp=7bd945733b4b1d0e08c5ed3f906f71c978a58887;hpb=b620469223d3135361978668c1f5b0739e5cbaa1;p=oota-llvm.git diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp index 7bd945733b4..5365fe5a3ee 100644 --- a/lib/Analysis/Lint.cpp +++ b/lib/Analysis/Lint.cpp @@ -16,7 +16,7 @@ // those aren't comprehensive either. Second, many conditions cannot be // checked statically. This pass does no dynamic instrumentation, so it // can't check for all possible problems. -// +// // Another limitation is that it assumes all code will be executed. A store // through a null pointer in a basic block which is never reached is harmless, // but this pass will warn about it anyway. This is the main reason why most @@ -26,34 +26,33 @@ // less obvious. If an optimization pass appears to be introducing a warning, // it may be that the optimization pass is merely exposing an existing // condition in the code. -// +// // This code may be run before instcombine. In many cases, instcombine checks // for the same kinds of things and turns instructions with undefined behavior // into unreachable (or equivalent). Because of this, this pass makes some // effort to look through bitcasts and so on. -// +// //===----------------------------------------------------------------------===// -#include "llvm/Analysis/Passes.h" +#include "llvm/Analysis/Lint.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/ConstantFolding.h" -#include "llvm/Analysis/Dominators.h" -#include "llvm/Analysis/Lint.h" +#include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/Passes.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Assembly/Writer.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetLibraryInfo.h" +#include "llvm/IR/CallSite.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/InstVisitor.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/Pass.h" #include "llvm/PassManager.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/Function.h" -#include "llvm/Support/CallSite.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/InstVisitor.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/ADT/STLExtras.h" +#include "llvm/Target/TargetLibraryInfo.h" using namespace llvm; namespace { @@ -103,7 +102,7 @@ namespace { Module *Mod; AliasAnalysis *AA; DominatorTree *DT; - TargetData *TD; + const DataLayout *DL; TargetLibraryInfo *TLI; std::string Messages; @@ -114,22 +113,22 @@ namespace { initializeLintPass(*PassRegistry::getPassRegistry()); } - virtual bool runOnFunction(Function &F); + bool runOnFunction(Function &F) override; - virtual void getAnalysisUsage(AnalysisUsage &AU) const { + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); } - virtual void print(raw_ostream &O, const Module *M) const {} + void print(raw_ostream &O, const Module *M) const override {} void WriteValue(const Value *V) { if (!V) return; if (isa(V)) { MessagesStr << *V << '\n'; } else { - WriteAsOperand(MessagesStr, V, true, Mod); + V->printAsOperand(MessagesStr, true, Mod); MessagesStr << '\n'; } } @@ -138,8 +137,8 @@ namespace { // that failed. This provides a nice place to put a breakpoint if you want // to see why something is not correct. void CheckFailed(const Twine &Message, - const Value *V1 = 0, const Value *V2 = 0, - const Value *V3 = 0, const Value *V4 = 0) { + const Value *V1 = nullptr, const Value *V2 = nullptr, + const Value *V3 = nullptr, const Value *V4 = nullptr) { MessagesStr << Message.str() << "\n"; WriteValue(V1); WriteValue(V2); @@ -153,7 +152,7 @@ char Lint::ID = 0; INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR", false, true) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) -INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR", false, true) @@ -176,8 +175,9 @@ INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR", bool Lint::runOnFunction(Function &F) { Mod = F.getParent(); AA = &getAnalysis(); - DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); + DT = &getAnalysis().getDomTree(); + DataLayoutPass *DLP = getAnalysisIfAvailable(); + DL = DLP ? &DLP->getDataLayout() : nullptr; TLI = &getAnalysis(); visit(F); dbgs() << MessagesStr.str(); @@ -199,7 +199,7 @@ void Lint::visitCallSite(CallSite CS) { Value *Callee = CS.getCalledValue(); visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize, - 0, 0, MemRef::Callee); + 0, nullptr, MemRef::Callee); if (Function *F = dyn_cast(findValue(Callee, /*OffsetOk=*/false))) { Assert1(CS.getCallingConv() == F->getCallingConv(), @@ -207,7 +207,7 @@ void Lint::visitCallSite(CallSite CS) { &I); FunctionType *FT = F->getFunctionType(); - unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); + unsigned NumActualArgs = CS.arg_size(); Assert1(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs : @@ -248,7 +248,7 @@ void Lint::visitCallSite(CallSite CS) { Type *Ty = cast(Formal->getType())->getElementType(); visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty), - TD ? TD->getABITypeAlignment(Ty) : 0, + DL ? DL->getABITypeAlignment(Ty) : 0, Ty, MemRef::Read | MemRef::Write); } } @@ -275,10 +275,10 @@ void Lint::visitCallSite(CallSite CS) { MemCpyInst *MCI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize, - MCI->getAlignment(), 0, + MCI->getAlignment(), nullptr, MemRef::Write); visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize, - MCI->getAlignment(), 0, + MCI->getAlignment(), nullptr, MemRef::Read); // Check that the memcpy arguments don't overlap. The AliasAnalysis API @@ -299,10 +299,10 @@ void Lint::visitCallSite(CallSite CS) { MemMoveInst *MMI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize, - MMI->getAlignment(), 0, + MMI->getAlignment(), nullptr, MemRef::Write); visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize, - MMI->getAlignment(), 0, + MMI->getAlignment(), nullptr, MemRef::Read); break; } @@ -310,7 +310,7 @@ void Lint::visitCallSite(CallSite CS) { MemSetInst *MSI = cast(&I); // TODO: If the size is known, use it. visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize, - MSI->getAlignment(), 0, + MSI->getAlignment(), nullptr, MemRef::Write); break; } @@ -321,17 +321,17 @@ void Lint::visitCallSite(CallSite CS) { &I); visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Read | MemRef::Write); + 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::vacopy: visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Write); + 0, nullptr, MemRef::Write); visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Read); + 0, nullptr, MemRef::Read); break; case Intrinsic::vaend: visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Read | MemRef::Write); + 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::stackrestore: @@ -339,7 +339,7 @@ void Lint::visitCallSite(CallSite CS) { // stack pointer, which the compiler may read from or write to // at any time, so check it for both readability and writeability. visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, - 0, 0, MemRef::Read | MemRef::Write); + 0, nullptr, MemRef::Read | MemRef::Write); break; } } @@ -412,51 +412,49 @@ void Lint::visitMemoryReference(Instruction &I, } // Check for buffer overflows and misalignment. - if (TD) { - // Only handles memory references that read/write something simple like an - // alloca instruction or a global variable. - int64_t Offset = 0; - if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *TD)) { - // OK, so the access is to a constant offset from Ptr. Check that Ptr is - // something we can handle and if so extract the size of this base object - // along with its alignment. - uint64_t BaseSize = AliasAnalysis::UnknownSize; - unsigned BaseAlign = 0; - - if (AllocaInst *AI = dyn_cast(Base)) { - Type *ATy = AI->getAllocatedType(); - if (!AI->isArrayAllocation() && ATy->isSized()) - BaseSize = TD->getTypeAllocSize(ATy); - BaseAlign = AI->getAlignment(); - if (BaseAlign == 0 && ATy->isSized()) - BaseAlign = TD->getABITypeAlignment(ATy); - } else if (GlobalVariable *GV = dyn_cast(Base)) { - // If the global may be defined differently in another compilation unit - // then don't warn about funky memory accesses. - if (GV->hasDefinitiveInitializer()) { - Type *GTy = GV->getType()->getElementType(); - if (GTy->isSized()) - BaseSize = TD->getTypeAllocSize(GTy); - BaseAlign = GV->getAlignment(); - if (BaseAlign == 0 && GTy->isSized()) - BaseAlign = TD->getABITypeAlignment(GTy); - } + // Only handles memory references that read/write something simple like an + // alloca instruction or a global variable. + int64_t Offset = 0; + if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) { + // OK, so the access is to a constant offset from Ptr. Check that Ptr is + // something we can handle and if so extract the size of this base object + // along with its alignment. + uint64_t BaseSize = AliasAnalysis::UnknownSize; + unsigned BaseAlign = 0; + + if (AllocaInst *AI = dyn_cast(Base)) { + Type *ATy = AI->getAllocatedType(); + if (DL && !AI->isArrayAllocation() && ATy->isSized()) + BaseSize = DL->getTypeAllocSize(ATy); + BaseAlign = AI->getAlignment(); + if (DL && BaseAlign == 0 && ATy->isSized()) + BaseAlign = DL->getABITypeAlignment(ATy); + } else if (GlobalVariable *GV = dyn_cast(Base)) { + // If the global may be defined differently in another compilation unit + // then don't warn about funky memory accesses. + if (GV->hasDefinitiveInitializer()) { + Type *GTy = GV->getType()->getElementType(); + if (DL && GTy->isSized()) + BaseSize = DL->getTypeAllocSize(GTy); + BaseAlign = GV->getAlignment(); + if (DL && BaseAlign == 0 && GTy->isSized()) + BaseAlign = DL->getABITypeAlignment(GTy); } - - // Accesses from before the start or after the end of the object are not - // defined. - Assert1(Size == AliasAnalysis::UnknownSize || - BaseSize == AliasAnalysis::UnknownSize || - (Offset >= 0 && Offset + Size <= BaseSize), - "Undefined behavior: Buffer overflow", &I); - - // Accesses that say that the memory is more aligned than it is are not - // defined. - if (Align == 0 && Ty && Ty->isSized()) - Align = TD->getABITypeAlignment(Ty); - Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset), - "Undefined behavior: Memory reference address is misaligned", &I); } + + // Accesses from before the start or after the end of the object are not + // defined. + Assert1(Size == AliasAnalysis::UnknownSize || + BaseSize == AliasAnalysis::UnknownSize || + (Offset >= 0 && Offset + Size <= BaseSize), + "Undefined behavior: Buffer overflow", &I); + + // Accesses that say that the memory is more aligned than it is are not + // defined. + if (DL && Align == 0 && Ty && Ty->isSized()) + Align = DL->getABITypeAlignment(Ty); + Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset), + "Undefined behavior: Memory reference address is misaligned", &I); } } @@ -506,33 +504,61 @@ void Lint::visitShl(BinaryOperator &I) { "Undefined result: Shift count out of range", &I); } -static bool isZero(Value *V, TargetData *TD) { +static bool isZero(Value *V, const DataLayout *DL) { // Assume undef could be zero. - if (isa(V)) return true; + if (isa(V)) + return true; + + VectorType *VecTy = dyn_cast(V->getType()); + if (!VecTy) { + unsigned BitWidth = V->getType()->getIntegerBitWidth(); + APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); + ComputeMaskedBits(V, KnownZero, KnownOne, DL); + return KnownZero.isAllOnesValue(); + } - unsigned BitWidth = cast(V->getType())->getBitWidth(); - APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); - ComputeMaskedBits(V, KnownZero, KnownOne, TD); - return KnownZero.isAllOnesValue(); + // Per-component check doesn't work with zeroinitializer + Constant *C = dyn_cast(V); + if (!C) + return false; + + if (C->isZeroValue()) + return true; + + // For a vector, KnownZero will only be true if all values are zero, so check + // this per component + unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth(); + for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) { + Constant *Elem = C->getAggregateElement(I); + if (isa(Elem)) + return true; + + APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); + ComputeMaskedBits(Elem, KnownZero, KnownOne, DL); + if (KnownZero.isAllOnesValue()) + return true; + } + + return false; } void Lint::visitSDiv(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL), "Undefined behavior: Division by zero", &I); } void Lint::visitUDiv(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL), "Undefined behavior: Division by zero", &I); } void Lint::visitSRem(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL), "Undefined behavior: Division by zero", &I); } void Lint::visitURem(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL), "Undefined behavior: Division by zero", &I); } @@ -546,13 +572,13 @@ void Lint::visitAllocaInst(AllocaInst &I) { } void Lint::visitVAArgInst(VAArgInst &I) { - visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, 0, - MemRef::Read | MemRef::Write); + visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, + nullptr, MemRef::Read | MemRef::Write); } void Lint::visitIndirectBrInst(IndirectBrInst &I) { - visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, 0, - MemRef::Branchee); + visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, + nullptr, MemRef::Branchee); Assert1(I.getNumDestinations() != 0, "Undefined behavior: indirectbr with no destinations", &I); @@ -577,7 +603,7 @@ void Lint::visitInsertElementInst(InsertElementInst &I) { void Lint::visitUnreachableInst(UnreachableInst &I) { // This isn't undefined behavior, it's merely suspicious. Assert1(&I == I.getParent()->begin() || - prior(BasicBlock::iterator(&I))->mayHaveSideEffects(), + std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(), "Unusual: unreachable immediately preceded by instruction without " "side effects", &I); } @@ -606,7 +632,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, // TODO: Look through eliminable cast pairs. // TODO: Look through calls with unique return values. // TODO: Look through vector insert/extract/shuffle. - V = OffsetOk ? GetUnderlyingObject(V, TD) : V->stripPointerCasts(); + V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts(); if (LoadInst *L = dyn_cast(V)) { BasicBlock::iterator BBI = L; BasicBlock *BB = L->getParent(); @@ -626,8 +652,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, if (W != V) return findValueImpl(W, OffsetOk, Visited); } else if (CastInst *CI = dyn_cast(V)) { - if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) : - Type::getInt64Ty(V->getContext()))) + if (CI->isNoopCast(DL)) return findValueImpl(CI->getOperand(0), OffsetOk, Visited); } else if (ExtractValueInst *Ex = dyn_cast(V)) { if (Value *W = FindInsertedValue(Ex->getAggregateOperand(), @@ -640,7 +665,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()), CE->getOperand(0)->getType(), CE->getType(), - TD ? TD->getIntPtrType(V->getContext()) : + DL ? DL->getIntPtrType(V->getType()) : Type::getInt64Ty(V->getContext()))) return findValueImpl(CE->getOperand(0), OffsetOk, Visited); } else if (CE->getOpcode() == Instruction::ExtractValue) { @@ -653,10 +678,10 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk, // As a last resort, try SimplifyInstruction or constant folding. if (Instruction *Inst = dyn_cast(V)) { - if (Value *W = SimplifyInstruction(Inst, TD, TLI, DT)) + if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT)) return findValueImpl(W, OffsetOk, Visited); } else if (ConstantExpr *CE = dyn_cast(V)) { - if (Value *W = ConstantFoldConstantExpression(CE, TD, TLI)) + if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI)) if (W != V) return findValueImpl(W, OffsetOk, Visited); }