1 //===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass statically checks for common and easily-identified constructs
11 // which produce undefined or likely unintended behavior in LLVM IR.
13 // It is not a guarantee of correctness, in two ways. First, it isn't
14 // comprehensive. There are checks which could be done statically which are
15 // not yet implemented. Some of these are indicated by TODO comments, but
16 // those aren't comprehensive either. Second, many conditions cannot be
17 // checked statically. This pass does no dynamic instrumentation, so it
18 // can't check for all possible problems.
20 // Another limitation is that it assumes all code will be executed. A store
21 // through a null pointer in a basic block which is never reached is harmless,
22 // but this pass will warn about it anyway. This is the main reason why most
23 // of these checks live here instead of in the Verifier pass.
25 // Optimization passes may make conditions that this pass checks for more or
26 // less obvious. If an optimization pass appears to be introducing a warning,
27 // it may be that the optimization pass is merely exposing an existing
28 // condition in the code.
30 // This code may be run before instcombine. In many cases, instcombine checks
31 // for the same kinds of things and turns instructions with undefined behavior
32 // into unreachable (or equivalent). Because of this, this pass makes some
33 // effort to look through bitcasts and so on.
35 //===----------------------------------------------------------------------===//
37 #include "llvm/Analysis/Lint.h"
38 #include "llvm/ADT/STLExtras.h"
39 #include "llvm/ADT/SmallSet.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/AssumptionCache.h"
42 #include "llvm/Analysis/ConstantFolding.h"
43 #include "llvm/Analysis/InstructionSimplify.h"
44 #include "llvm/Analysis/Loads.h"
45 #include "llvm/Analysis/Passes.h"
46 #include "llvm/Analysis/TargetLibraryInfo.h"
47 #include "llvm/Analysis/ValueTracking.h"
48 #include "llvm/IR/CallSite.h"
49 #include "llvm/IR/DataLayout.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InstVisitor.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/LegacyPassManager.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/raw_ostream.h"
62 static unsigned Read = 1;
63 static unsigned Write = 2;
64 static unsigned Callee = 4;
65 static unsigned Branchee = 8;
68 class Lint : public FunctionPass, public InstVisitor<Lint> {
69 friend class InstVisitor<Lint>;
71 void visitFunction(Function &F);
73 void visitCallSite(CallSite CS);
74 void visitMemoryReference(Instruction &I, Value *Ptr,
75 uint64_t Size, unsigned Align,
76 Type *Ty, unsigned Flags);
77 void visitEHBeginCatch(IntrinsicInst *II);
78 void visitEHEndCatch(IntrinsicInst *II);
80 void visitCallInst(CallInst &I);
81 void visitInvokeInst(InvokeInst &I);
82 void visitReturnInst(ReturnInst &I);
83 void visitLoadInst(LoadInst &I);
84 void visitStoreInst(StoreInst &I);
85 void visitXor(BinaryOperator &I);
86 void visitSub(BinaryOperator &I);
87 void visitLShr(BinaryOperator &I);
88 void visitAShr(BinaryOperator &I);
89 void visitShl(BinaryOperator &I);
90 void visitSDiv(BinaryOperator &I);
91 void visitUDiv(BinaryOperator &I);
92 void visitSRem(BinaryOperator &I);
93 void visitURem(BinaryOperator &I);
94 void visitAllocaInst(AllocaInst &I);
95 void visitVAArgInst(VAArgInst &I);
96 void visitIndirectBrInst(IndirectBrInst &I);
97 void visitExtractElementInst(ExtractElementInst &I);
98 void visitInsertElementInst(InsertElementInst &I);
99 void visitUnreachableInst(UnreachableInst &I);
101 Value *findValue(Value *V, bool OffsetOk) const;
102 Value *findValueImpl(Value *V, bool OffsetOk,
103 SmallPtrSetImpl<Value *> &Visited) const;
110 const DataLayout *DL;
111 TargetLibraryInfo *TLI;
113 std::string Messages;
114 raw_string_ostream MessagesStr;
116 static char ID; // Pass identification, replacement for typeid
117 Lint() : FunctionPass(ID), MessagesStr(Messages) {
118 initializeLintPass(*PassRegistry::getPassRegistry());
121 bool runOnFunction(Function &F) override;
123 void getAnalysisUsage(AnalysisUsage &AU) const override {
124 AU.setPreservesAll();
125 AU.addRequired<AliasAnalysis>();
126 AU.addRequired<AssumptionCacheTracker>();
127 AU.addRequired<TargetLibraryInfoWrapperPass>();
128 AU.addRequired<DominatorTreeWrapperPass>();
130 void print(raw_ostream &O, const Module *M) const override {}
132 void WriteValue(const Value *V) {
134 if (isa<Instruction>(V)) {
135 MessagesStr << *V << '\n';
137 V->printAsOperand(MessagesStr, true, Mod);
142 // CheckFailed - A check failed, so print out the condition and the message
143 // that failed. This provides a nice place to put a breakpoint if you want
144 // to see why something is not correct.
145 void CheckFailed(const Twine &Message,
146 const Value *V1 = nullptr, const Value *V2 = nullptr,
147 const Value *V3 = nullptr, const Value *V4 = nullptr) {
148 MessagesStr << Message.str() << "\n";
158 INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
160 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
161 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
162 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
163 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
164 INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
167 // Assert - We know that cond should be true, if not print an error message.
168 #define Assert(C, M) \
169 do { if (!(C)) { CheckFailed(M); return; } } while (0)
170 #define Assert1(C, M, V1) \
171 do { if (!(C)) { CheckFailed(M, V1); return; } } while (0)
172 #define Assert2(C, M, V1, V2) \
173 do { if (!(C)) { CheckFailed(M, V1, V2); return; } } while (0)
174 #define Assert3(C, M, V1, V2, V3) \
175 do { if (!(C)) { CheckFailed(M, V1, V2, V3); return; } } while (0)
176 #define Assert4(C, M, V1, V2, V3, V4) \
177 do { if (!(C)) { CheckFailed(M, V1, V2, V3, V4); return; } } while (0)
179 // Lint::run - This is the main Analysis entry point for a
182 bool Lint::runOnFunction(Function &F) {
184 AA = &getAnalysis<AliasAnalysis>();
185 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
186 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
187 DL = &F.getParent()->getDataLayout();
188 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
190 dbgs() << MessagesStr.str();
195 void Lint::visitFunction(Function &F) {
196 // This isn't undefined behavior, it's just a little unusual, and it's a
197 // fairly common mistake to neglect to name a function.
198 Assert1(F.hasName() || F.hasLocalLinkage(),
199 "Unusual: Unnamed function with non-local linkage", &F);
201 // TODO: Check for irreducible control flow.
204 void Lint::visitCallSite(CallSite CS) {
205 Instruction &I = *CS.getInstruction();
206 Value *Callee = CS.getCalledValue();
208 visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
209 0, nullptr, MemRef::Callee);
211 if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
212 Assert1(CS.getCallingConv() == F->getCallingConv(),
213 "Undefined behavior: Caller and callee calling convention differ",
216 FunctionType *FT = F->getFunctionType();
217 unsigned NumActualArgs = CS.arg_size();
219 Assert1(FT->isVarArg() ?
220 FT->getNumParams() <= NumActualArgs :
221 FT->getNumParams() == NumActualArgs,
222 "Undefined behavior: Call argument count mismatches callee "
223 "argument count", &I);
225 Assert1(FT->getReturnType() == I.getType(),
226 "Undefined behavior: Call return type mismatches "
227 "callee return type", &I);
229 // Check argument types (in case the callee was casted) and attributes.
230 // TODO: Verify that caller and callee attributes are compatible.
231 Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end();
232 CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
233 for (; AI != AE; ++AI) {
236 Argument *Formal = PI++;
237 Assert1(Formal->getType() == Actual->getType(),
238 "Undefined behavior: Call argument type mismatches "
239 "callee parameter type", &I);
241 // Check that noalias arguments don't alias other arguments. This is
242 // not fully precise because we don't know the sizes of the dereferenced
244 if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy())
245 for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI)
246 if (AI != BI && (*BI)->getType()->isPointerTy()) {
247 AliasAnalysis::AliasResult Result = AA->alias(*AI, *BI);
248 Assert1(Result != AliasAnalysis::MustAlias &&
249 Result != AliasAnalysis::PartialAlias,
250 "Unusual: noalias argument aliases another argument", &I);
253 // Check that an sret argument points to valid memory.
254 if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
256 cast<PointerType>(Formal->getType())->getElementType();
257 visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
258 DL ? DL->getABITypeAlignment(Ty) : 0,
259 Ty, MemRef::Read | MemRef::Write);
265 if (CS.isCall() && cast<CallInst>(CS.getInstruction())->isTailCall())
266 for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
268 Value *Obj = findValue(*AI, /*OffsetOk=*/true);
269 Assert1(!isa<AllocaInst>(Obj),
270 "Undefined behavior: Call with \"tail\" keyword references "
275 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I))
276 switch (II->getIntrinsicID()) {
279 // TODO: Check more intrinsics
281 case Intrinsic::memcpy: {
282 MemCpyInst *MCI = cast<MemCpyInst>(&I);
283 // TODO: If the size is known, use it.
284 visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize,
285 MCI->getAlignment(), nullptr,
287 visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize,
288 MCI->getAlignment(), nullptr,
291 // Check that the memcpy arguments don't overlap. The AliasAnalysis API
292 // isn't expressive enough for what we really want to do. Known partial
293 // overlap is not distinguished from the case where nothing is known.
295 if (const ConstantInt *Len =
296 dyn_cast<ConstantInt>(findValue(MCI->getLength(),
297 /*OffsetOk=*/false)))
298 if (Len->getValue().isIntN(32))
299 Size = Len->getValue().getZExtValue();
300 Assert1(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
301 AliasAnalysis::MustAlias,
302 "Undefined behavior: memcpy source and destination overlap", &I);
305 case Intrinsic::memmove: {
306 MemMoveInst *MMI = cast<MemMoveInst>(&I);
307 // TODO: If the size is known, use it.
308 visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize,
309 MMI->getAlignment(), nullptr,
311 visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize,
312 MMI->getAlignment(), nullptr,
316 case Intrinsic::memset: {
317 MemSetInst *MSI = cast<MemSetInst>(&I);
318 // TODO: If the size is known, use it.
319 visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize,
320 MSI->getAlignment(), nullptr,
325 case Intrinsic::vastart:
326 Assert1(I.getParent()->getParent()->isVarArg(),
327 "Undefined behavior: va_start called in a non-varargs function",
330 visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
331 0, nullptr, MemRef::Read | MemRef::Write);
333 case Intrinsic::vacopy:
334 visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
335 0, nullptr, MemRef::Write);
336 visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize,
337 0, nullptr, MemRef::Read);
339 case Intrinsic::vaend:
340 visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
341 0, nullptr, MemRef::Read | MemRef::Write);
344 case Intrinsic::stackrestore:
345 // Stackrestore doesn't read or write memory, but it sets the
346 // stack pointer, which the compiler may read from or write to
347 // at any time, so check it for both readability and writeability.
348 visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
349 0, nullptr, MemRef::Read | MemRef::Write);
352 case Intrinsic::eh_begincatch:
353 visitEHBeginCatch(II);
355 case Intrinsic::eh_endcatch:
361 void Lint::visitCallInst(CallInst &I) {
362 return visitCallSite(&I);
365 void Lint::visitInvokeInst(InvokeInst &I) {
366 return visitCallSite(&I);
369 void Lint::visitReturnInst(ReturnInst &I) {
370 Function *F = I.getParent()->getParent();
371 Assert1(!F->doesNotReturn(),
372 "Unusual: Return statement in function with noreturn attribute",
375 if (Value *V = I.getReturnValue()) {
376 Value *Obj = findValue(V, /*OffsetOk=*/true);
377 Assert1(!isa<AllocaInst>(Obj),
378 "Unusual: Returning alloca value", &I);
382 // TODO: Check that the reference is in bounds.
383 // TODO: Check readnone/readonly function attributes.
384 void Lint::visitMemoryReference(Instruction &I,
385 Value *Ptr, uint64_t Size, unsigned Align,
386 Type *Ty, unsigned Flags) {
387 // If no memory is being referenced, it doesn't matter if the pointer
392 Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
393 Assert1(!isa<ConstantPointerNull>(UnderlyingObject),
394 "Undefined behavior: Null pointer dereference", &I);
395 Assert1(!isa<UndefValue>(UnderlyingObject),
396 "Undefined behavior: Undef pointer dereference", &I);
397 Assert1(!isa<ConstantInt>(UnderlyingObject) ||
398 !cast<ConstantInt>(UnderlyingObject)->isAllOnesValue(),
399 "Unusual: All-ones pointer dereference", &I);
400 Assert1(!isa<ConstantInt>(UnderlyingObject) ||
401 !cast<ConstantInt>(UnderlyingObject)->isOne(),
402 "Unusual: Address one pointer dereference", &I);
404 if (Flags & MemRef::Write) {
405 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject))
406 Assert1(!GV->isConstant(),
407 "Undefined behavior: Write to read-only memory", &I);
408 Assert1(!isa<Function>(UnderlyingObject) &&
409 !isa<BlockAddress>(UnderlyingObject),
410 "Undefined behavior: Write to text section", &I);
412 if (Flags & MemRef::Read) {
413 Assert1(!isa<Function>(UnderlyingObject),
414 "Unusual: Load from function body", &I);
415 Assert1(!isa<BlockAddress>(UnderlyingObject),
416 "Undefined behavior: Load from block address", &I);
418 if (Flags & MemRef::Callee) {
419 Assert1(!isa<BlockAddress>(UnderlyingObject),
420 "Undefined behavior: Call to block address", &I);
422 if (Flags & MemRef::Branchee) {
423 Assert1(!isa<Constant>(UnderlyingObject) ||
424 isa<BlockAddress>(UnderlyingObject),
425 "Undefined behavior: Branch to non-blockaddress", &I);
428 // Check for buffer overflows and misalignment.
429 // Only handles memory references that read/write something simple like an
430 // alloca instruction or a global variable.
432 if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) {
433 // OK, so the access is to a constant offset from Ptr. Check that Ptr is
434 // something we can handle and if so extract the size of this base object
435 // along with its alignment.
436 uint64_t BaseSize = AliasAnalysis::UnknownSize;
437 unsigned BaseAlign = 0;
439 if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
440 Type *ATy = AI->getAllocatedType();
441 if (DL && !AI->isArrayAllocation() && ATy->isSized())
442 BaseSize = DL->getTypeAllocSize(ATy);
443 BaseAlign = AI->getAlignment();
444 if (DL && BaseAlign == 0 && ATy->isSized())
445 BaseAlign = DL->getABITypeAlignment(ATy);
446 } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
447 // If the global may be defined differently in another compilation unit
448 // then don't warn about funky memory accesses.
449 if (GV->hasDefinitiveInitializer()) {
450 Type *GTy = GV->getType()->getElementType();
451 if (DL && GTy->isSized())
452 BaseSize = DL->getTypeAllocSize(GTy);
453 BaseAlign = GV->getAlignment();
454 if (DL && BaseAlign == 0 && GTy->isSized())
455 BaseAlign = DL->getABITypeAlignment(GTy);
459 // Accesses from before the start or after the end of the object are not
461 Assert1(Size == AliasAnalysis::UnknownSize ||
462 BaseSize == AliasAnalysis::UnknownSize ||
463 (Offset >= 0 && Offset + Size <= BaseSize),
464 "Undefined behavior: Buffer overflow", &I);
466 // Accesses that say that the memory is more aligned than it is are not
468 if (DL && Align == 0 && Ty && Ty->isSized())
469 Align = DL->getABITypeAlignment(Ty);
470 Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
471 "Undefined behavior: Memory reference address is misaligned", &I);
475 void Lint::visitLoadInst(LoadInst &I) {
476 visitMemoryReference(I, I.getPointerOperand(),
477 AA->getTypeStoreSize(I.getType()), I.getAlignment(),
478 I.getType(), MemRef::Read);
481 void Lint::visitStoreInst(StoreInst &I) {
482 visitMemoryReference(I, I.getPointerOperand(),
483 AA->getTypeStoreSize(I.getOperand(0)->getType()),
485 I.getOperand(0)->getType(), MemRef::Write);
488 void Lint::visitXor(BinaryOperator &I) {
489 Assert1(!isa<UndefValue>(I.getOperand(0)) ||
490 !isa<UndefValue>(I.getOperand(1)),
491 "Undefined result: xor(undef, undef)", &I);
494 void Lint::visitSub(BinaryOperator &I) {
495 Assert1(!isa<UndefValue>(I.getOperand(0)) ||
496 !isa<UndefValue>(I.getOperand(1)),
497 "Undefined result: sub(undef, undef)", &I);
500 void Lint::visitLShr(BinaryOperator &I) {
501 if (ConstantInt *CI =
502 dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
503 Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
504 "Undefined result: Shift count out of range", &I);
507 void Lint::visitAShr(BinaryOperator &I) {
508 if (ConstantInt *CI =
509 dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
510 Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
511 "Undefined result: Shift count out of range", &I);
514 void Lint::visitShl(BinaryOperator &I) {
515 if (ConstantInt *CI =
516 dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
517 Assert1(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
518 "Undefined result: Shift count out of range", &I);
522 allPredsCameFromLandingPad(BasicBlock *BB,
523 SmallSet<BasicBlock *, 4> &VisitedBlocks) {
524 VisitedBlocks.insert(BB);
525 if (BB->isLandingPad())
527 // If we find a block with no predecessors, the search failed.
530 for (BasicBlock *Pred : predecessors(BB)) {
531 if (VisitedBlocks.count(Pred))
533 if (!allPredsCameFromLandingPad(Pred, VisitedBlocks))
540 allSuccessorsReachEndCatch(BasicBlock *BB, BasicBlock::iterator InstBegin,
541 IntrinsicInst **SecondBeginCatch,
542 SmallSet<BasicBlock *, 4> &VisitedBlocks) {
543 VisitedBlocks.insert(BB);
544 for (BasicBlock::iterator I = InstBegin, E = BB->end(); I != E; ++I) {
545 IntrinsicInst *IC = dyn_cast<IntrinsicInst>(I);
546 if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch)
548 // If we find another begincatch while looking for an endcatch,
549 // that's also an error.
550 if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch) {
551 *SecondBeginCatch = IC;
556 // If we reach a block with no successors while searching, the
557 // search has failed.
560 // Otherwise, search all of the successors.
561 for (BasicBlock *Succ : successors(BB)) {
562 if (VisitedBlocks.count(Succ))
564 if (!allSuccessorsReachEndCatch(Succ, Succ->begin(), SecondBeginCatch,
571 void Lint::visitEHBeginCatch(IntrinsicInst *II) {
572 // The checks in this function make a potentially dubious assumption about
573 // the CFG, namely that any block involved in a catch is only used for the
574 // catch. This will very likely be true of IR generated by a front end,
575 // but it may cease to be true, for example, if the IR is run through a
576 // pass which combines similar blocks.
578 // In general, if we encounter a block the isn't dominated by the catch
579 // block while we are searching the catch block's successors for a call
580 // to end catch intrinsic, then it is possible that it will be legal for
581 // a path through this block to never reach a call to llvm.eh.endcatch.
582 // An analogous statement could be made about our search for a landing
583 // pad among the catch block's predecessors.
585 // What is actually required is that no path is possible at runtime that
586 // reaches a call to llvm.eh.begincatch without having previously visited
587 // a landingpad instruction and that no path is possible at runtime that
588 // calls llvm.eh.begincatch and does not subsequently call llvm.eh.endcatch
589 // (mentally adjusting for the fact that in reality these calls will be
590 // removed before code generation).
592 // Because this is a lint check, we take a pessimistic approach and warn if
593 // the control flow is potentially incorrect.
595 SmallSet<BasicBlock *, 4> VisitedBlocks;
596 BasicBlock *CatchBB = II->getParent();
598 // The begin catch must occur in a landing pad block or all paths
599 // to it must have come from a landing pad.
600 Assert1(allPredsCameFromLandingPad(CatchBB, VisitedBlocks),
601 "llvm.eh.begincatch may be reachable without passing a landingpad",
604 // Reset the visited block list.
605 VisitedBlocks.clear();
607 IntrinsicInst *SecondBeginCatch = nullptr;
609 // This has to be called before it is asserted. Otherwise, the first assert
610 // below can never be hit.
611 bool EndCatchFound = allSuccessorsReachEndCatch(
612 CatchBB, std::next(static_cast<BasicBlock::iterator>(II)),
613 &SecondBeginCatch, VisitedBlocks);
615 SecondBeginCatch == nullptr,
616 "llvm.eh.begincatch may be called a second time before llvm.eh.endcatch",
617 II, SecondBeginCatch);
618 Assert1(EndCatchFound,
619 "Some paths from llvm.eh.begincatch may not reach llvm.eh.endcatch",
623 static bool allPredCameFromBeginCatch(
624 BasicBlock *BB, BasicBlock::reverse_iterator InstRbegin,
625 IntrinsicInst **SecondEndCatch, SmallSet<BasicBlock *, 4> &VisitedBlocks) {
626 VisitedBlocks.insert(BB);
627 // Look for a begincatch in this block.
628 for (BasicBlock::reverse_iterator RI = InstRbegin, RE = BB->rend(); RI != RE;
630 IntrinsicInst *IC = dyn_cast<IntrinsicInst>(&*RI);
631 if (IC && IC->getIntrinsicID() == Intrinsic::eh_begincatch)
633 // If we find another end catch before we find a begin catch, that's
635 if (IC && IC->getIntrinsicID() == Intrinsic::eh_endcatch) {
636 *SecondEndCatch = IC;
639 // If we encounter a landingpad instruction, the search failed.
640 if (isa<LandingPadInst>(*RI))
643 // If while searching we find a block with no predeccesors,
644 // the search failed.
647 // Search any predecessors we haven't seen before.
648 for (BasicBlock *Pred : predecessors(BB)) {
649 if (VisitedBlocks.count(Pred))
651 if (!allPredCameFromBeginCatch(Pred, Pred->rbegin(), SecondEndCatch,
658 void Lint::visitEHEndCatch(IntrinsicInst *II) {
659 // The check in this function makes a potentially dubious assumption about
660 // the CFG, namely that any block involved in a catch is only used for the
661 // catch. This will very likely be true of IR generated by a front end,
662 // but it may cease to be true, for example, if the IR is run through a
663 // pass which combines similar blocks.
665 // In general, if we encounter a block the isn't post-dominated by the
666 // end catch block while we are searching the end catch block's predecessors
667 // for a call to the begin catch intrinsic, then it is possible that it will
668 // be legal for a path to reach the end catch block without ever having
669 // called llvm.eh.begincatch.
671 // What is actually required is that no path is possible at runtime that
672 // reaches a call to llvm.eh.endcatch without having previously visited
673 // a call to llvm.eh.begincatch (mentally adjusting for the fact that in
674 // reality these calls will be removed before code generation).
676 // Because this is a lint check, we take a pessimistic approach and warn if
677 // the control flow is potentially incorrect.
679 BasicBlock *EndCatchBB = II->getParent();
681 // Alls paths to the end catch call must pass through a begin catch call.
683 // If llvm.eh.begincatch wasn't called in the current block, we'll use this
684 // lambda to recursively look for it in predecessors.
685 SmallSet<BasicBlock *, 4> VisitedBlocks;
686 IntrinsicInst *SecondEndCatch = nullptr;
688 // This has to be called before it is asserted. Otherwise, the first assert
689 // below can never be hit.
690 bool BeginCatchFound =
691 allPredCameFromBeginCatch(EndCatchBB, BasicBlock::reverse_iterator(II),
692 &SecondEndCatch, VisitedBlocks);
694 SecondEndCatch == nullptr,
695 "llvm.eh.endcatch may be called a second time after llvm.eh.begincatch",
699 "llvm.eh.endcatch may be reachable without passing llvm.eh.begincatch",
703 static bool isZero(Value *V, const DataLayout *DL, DominatorTree *DT,
704 AssumptionCache *AC) {
705 // Assume undef could be zero.
706 if (isa<UndefValue>(V))
709 VectorType *VecTy = dyn_cast<VectorType>(V->getType());
711 unsigned BitWidth = V->getType()->getIntegerBitWidth();
712 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
713 computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC,
714 dyn_cast<Instruction>(V), DT);
715 return KnownZero.isAllOnesValue();
718 // Per-component check doesn't work with zeroinitializer
719 Constant *C = dyn_cast<Constant>(V);
723 if (C->isZeroValue())
726 // For a vector, KnownZero will only be true if all values are zero, so check
727 // this per component
728 unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth();
729 for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) {
730 Constant *Elem = C->getAggregateElement(I);
731 if (isa<UndefValue>(Elem))
734 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
735 computeKnownBits(Elem, KnownZero, KnownOne, DL);
736 if (KnownZero.isAllOnesValue())
743 void Lint::visitSDiv(BinaryOperator &I) {
744 Assert1(!isZero(I.getOperand(1), DL, DT, AC),
745 "Undefined behavior: Division by zero", &I);
748 void Lint::visitUDiv(BinaryOperator &I) {
749 Assert1(!isZero(I.getOperand(1), DL, DT, AC),
750 "Undefined behavior: Division by zero", &I);
753 void Lint::visitSRem(BinaryOperator &I) {
754 Assert1(!isZero(I.getOperand(1), DL, DT, AC),
755 "Undefined behavior: Division by zero", &I);
758 void Lint::visitURem(BinaryOperator &I) {
759 Assert1(!isZero(I.getOperand(1), DL, DT, AC),
760 "Undefined behavior: Division by zero", &I);
763 void Lint::visitAllocaInst(AllocaInst &I) {
764 if (isa<ConstantInt>(I.getArraySize()))
765 // This isn't undefined behavior, it's just an obvious pessimization.
766 Assert1(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
767 "Pessimization: Static alloca outside of entry block", &I);
769 // TODO: Check for an unusual size (MSB set?)
772 void Lint::visitVAArgInst(VAArgInst &I) {
773 visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0,
774 nullptr, MemRef::Read | MemRef::Write);
777 void Lint::visitIndirectBrInst(IndirectBrInst &I) {
778 visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0,
779 nullptr, MemRef::Branchee);
781 Assert1(I.getNumDestinations() != 0,
782 "Undefined behavior: indirectbr with no destinations", &I);
785 void Lint::visitExtractElementInst(ExtractElementInst &I) {
786 if (ConstantInt *CI =
787 dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
788 /*OffsetOk=*/false)))
789 Assert1(CI->getValue().ult(I.getVectorOperandType()->getNumElements()),
790 "Undefined result: extractelement index out of range", &I);
793 void Lint::visitInsertElementInst(InsertElementInst &I) {
794 if (ConstantInt *CI =
795 dyn_cast<ConstantInt>(findValue(I.getOperand(2),
796 /*OffsetOk=*/false)))
797 Assert1(CI->getValue().ult(I.getType()->getNumElements()),
798 "Undefined result: insertelement index out of range", &I);
801 void Lint::visitUnreachableInst(UnreachableInst &I) {
802 // This isn't undefined behavior, it's merely suspicious.
803 Assert1(&I == I.getParent()->begin() ||
804 std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(),
805 "Unusual: unreachable immediately preceded by instruction without "
809 /// findValue - Look through bitcasts and simple memory reference patterns
810 /// to identify an equivalent, but more informative, value. If OffsetOk
811 /// is true, look through getelementptrs with non-zero offsets too.
813 /// Most analysis passes don't require this logic, because instcombine
814 /// will simplify most of these kinds of things away. But it's a goal of
815 /// this Lint pass to be useful even on non-optimized IR.
816 Value *Lint::findValue(Value *V, bool OffsetOk) const {
817 SmallPtrSet<Value *, 4> Visited;
818 return findValueImpl(V, OffsetOk, Visited);
821 /// findValueImpl - Implementation helper for findValue.
822 Value *Lint::findValueImpl(Value *V, bool OffsetOk,
823 SmallPtrSetImpl<Value *> &Visited) const {
824 // Detect self-referential values.
825 if (!Visited.insert(V).second)
826 return UndefValue::get(V->getType());
828 // TODO: Look through sext or zext cast, when the result is known to
829 // be interpreted as signed or unsigned, respectively.
830 // TODO: Look through eliminable cast pairs.
831 // TODO: Look through calls with unique return values.
832 // TODO: Look through vector insert/extract/shuffle.
833 V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts();
834 if (LoadInst *L = dyn_cast<LoadInst>(V)) {
835 BasicBlock::iterator BBI = L;
836 BasicBlock *BB = L->getParent();
837 SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
839 if (!VisitedBlocks.insert(BB).second)
841 if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(),
843 return findValueImpl(U, OffsetOk, Visited);
844 if (BBI != BB->begin()) break;
845 BB = BB->getUniquePredecessor();
849 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
850 if (Value *W = PN->hasConstantValue())
852 return findValueImpl(W, OffsetOk, Visited);
853 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
854 if (CI->isNoopCast(DL))
855 return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
856 } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
857 if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
860 return findValueImpl(W, OffsetOk, Visited);
861 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
862 // Same as above, but for ConstantExpr instead of Instruction.
863 if (Instruction::isCast(CE->getOpcode())) {
864 if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
865 CE->getOperand(0)->getType(),
867 DL ? DL->getIntPtrType(V->getType()) :
868 Type::getInt64Ty(V->getContext())))
869 return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
870 } else if (CE->getOpcode() == Instruction::ExtractValue) {
871 ArrayRef<unsigned> Indices = CE->getIndices();
872 if (Value *W = FindInsertedValue(CE->getOperand(0), Indices))
874 return findValueImpl(W, OffsetOk, Visited);
878 // As a last resort, try SimplifyInstruction or constant folding.
879 if (Instruction *Inst = dyn_cast<Instruction>(V)) {
880 if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT, AC))
881 return findValueImpl(W, OffsetOk, Visited);
882 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
883 if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI))
885 return findValueImpl(W, OffsetOk, Visited);
891 //===----------------------------------------------------------------------===//
892 // Implement the public interfaces to this file...
893 //===----------------------------------------------------------------------===//
895 FunctionPass *llvm::createLintPass() {
899 /// lintFunction - Check a function for errors, printing messages on stderr.
901 void llvm::lintFunction(const Function &f) {
902 Function &F = const_cast<Function&>(f);
903 assert(!F.isDeclaration() && "Cannot lint external functions");
905 legacy::FunctionPassManager FPM(F.getParent());
906 Lint *V = new Lint();
911 /// lintModule - Check a module for errors, printing messages on stderr.
913 void llvm::lintModule(const Module &M) {
914 legacy::PassManager PM;
915 Lint *V = new Lint();
917 PM.run(const_cast<Module&>(M));