1 //===-- AddressSanitizer.cpp - memory error detector ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of AddressSanitizer, an address sanity checker.
11 // Details of the algorithm:
12 // http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/Instrumentation.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DenseSet.h"
20 #include "llvm/ADT/DepthFirstIterator.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallString.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/DIBuilder.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/InstVisitor.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/MC/MCSectionMachO.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/DataTypes.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/Endian.h"
45 #include "llvm/Support/SwapByteOrder.h"
46 #include "llvm/Transforms/Scalar.h"
47 #include "llvm/Transforms/Utils/ASanStackFrameLayout.h"
48 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
49 #include "llvm/Transforms/Utils/Cloning.h"
50 #include "llvm/Transforms/Utils/Local.h"
51 #include "llvm/Transforms/Utils/ModuleUtils.h"
54 #include <system_error>
58 #define DEBUG_TYPE "asan"
60 static const uint64_t kDefaultShadowScale = 3;
61 static const uint64_t kDefaultShadowOffset32 = 1ULL << 29;
62 static const uint64_t kIOSShadowOffset32 = 1ULL << 30;
63 static const uint64_t kDefaultShadowOffset64 = 1ULL << 44;
64 static const uint64_t kSmallX86_64ShadowOffset = 0x7FFF8000; // < 2G.
65 static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41;
66 static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
67 static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 36;
68 static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
69 static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
71 static const size_t kMinStackMallocSize = 1 << 6; // 64B
72 static const size_t kMaxStackMallocSize = 1 << 16; // 64K
73 static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3;
74 static const uintptr_t kRetiredStackFrameMagic = 0x45E0360E;
76 static const char *const kAsanModuleCtorName = "asan.module_ctor";
77 static const char *const kAsanModuleDtorName = "asan.module_dtor";
78 static const uint64_t kAsanCtorAndDtorPriority = 1;
79 static const char *const kAsanReportErrorTemplate = "__asan_report_";
80 static const char *const kAsanReportLoadN = "__asan_report_load_n";
81 static const char *const kAsanReportStoreN = "__asan_report_store_n";
82 static const char *const kAsanRegisterGlobalsName = "__asan_register_globals";
83 static const char *const kAsanUnregisterGlobalsName =
84 "__asan_unregister_globals";
85 static const char *const kAsanPoisonGlobalsName = "__asan_before_dynamic_init";
86 static const char *const kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init";
87 static const char *const kAsanInitName = "__asan_init_v5";
88 static const char *const kAsanPtrCmp = "__sanitizer_ptr_cmp";
89 static const char *const kAsanPtrSub = "__sanitizer_ptr_sub";
90 static const char *const kAsanHandleNoReturnName = "__asan_handle_no_return";
91 static const int kMaxAsanStackMallocSizeClass = 10;
92 static const char *const kAsanStackMallocNameTemplate = "__asan_stack_malloc_";
93 static const char *const kAsanStackFreeNameTemplate = "__asan_stack_free_";
94 static const char *const kAsanGenPrefix = "__asan_gen_";
95 static const char *const kSanCovGenPrefix = "__sancov_gen_";
96 static const char *const kAsanPoisonStackMemoryName =
97 "__asan_poison_stack_memory";
98 static const char *const kAsanUnpoisonStackMemoryName =
99 "__asan_unpoison_stack_memory";
101 static const char *const kAsanOptionDetectUAR =
102 "__asan_option_detect_stack_use_after_return";
105 static const int kAsanStackAfterReturnMagic = 0xf5;
108 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
109 static const size_t kNumberOfAccessSizes = 5;
111 static const unsigned kAllocaRzSize = 32;
112 static const unsigned kAsanAllocaLeftMagic = 0xcacacacaU;
113 static const unsigned kAsanAllocaRightMagic = 0xcbcbcbcbU;
114 static const unsigned kAsanAllocaPartialVal1 = 0xcbcbcb00U;
115 static const unsigned kAsanAllocaPartialVal2 = 0x000000cbU;
117 // Command-line flags.
119 // This flag may need to be replaced with -f[no-]asan-reads.
120 static cl::opt<bool> ClInstrumentReads("asan-instrument-reads",
121 cl::desc("instrument read instructions"), cl::Hidden, cl::init(true));
122 static cl::opt<bool> ClInstrumentWrites("asan-instrument-writes",
123 cl::desc("instrument write instructions"), cl::Hidden, cl::init(true));
124 static cl::opt<bool> ClInstrumentAtomics("asan-instrument-atomics",
125 cl::desc("instrument atomic instructions (rmw, cmpxchg)"),
126 cl::Hidden, cl::init(true));
127 static cl::opt<bool> ClAlwaysSlowPath("asan-always-slow-path",
128 cl::desc("use instrumentation with slow path for all accesses"),
129 cl::Hidden, cl::init(false));
130 // This flag limits the number of instructions to be instrumented
131 // in any given BB. Normally, this should be set to unlimited (INT_MAX),
132 // but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary
134 static cl::opt<int> ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb",
136 cl::desc("maximal number of instructions to instrument in any given BB"),
138 // This flag may need to be replaced with -f[no]asan-stack.
139 static cl::opt<bool> ClStack("asan-stack",
140 cl::desc("Handle stack memory"), cl::Hidden, cl::init(true));
141 static cl::opt<bool> ClUseAfterReturn("asan-use-after-return",
142 cl::desc("Check return-after-free"), cl::Hidden, cl::init(true));
143 // This flag may need to be replaced with -f[no]asan-globals.
144 static cl::opt<bool> ClGlobals("asan-globals",
145 cl::desc("Handle global objects"), cl::Hidden, cl::init(true));
146 static cl::opt<bool> ClInitializers("asan-initialization-order",
147 cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(true));
148 static cl::opt<bool> ClInvalidPointerPairs("asan-detect-invalid-pointer-pair",
149 cl::desc("Instrument <, <=, >, >=, - with pointer operands"),
150 cl::Hidden, cl::init(false));
151 static cl::opt<unsigned> ClRealignStack("asan-realign-stack",
152 cl::desc("Realign stack to the value of this flag (power of two)"),
153 cl::Hidden, cl::init(32));
154 static cl::opt<int> ClInstrumentationWithCallsThreshold(
155 "asan-instrumentation-with-call-threshold",
156 cl::desc("If the function being instrumented contains more than "
157 "this number of memory accesses, use callbacks instead of "
158 "inline checks (-1 means never use callbacks)."),
159 cl::Hidden, cl::init(7000));
160 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
161 "asan-memory-access-callback-prefix",
162 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
163 cl::init("__asan_"));
164 static cl::opt<bool> ClInstrumentAllocas("asan-instrument-allocas",
165 cl::desc("instrument dynamic allocas"), cl::Hidden, cl::init(false));
167 // These flags allow to change the shadow mapping.
168 // The shadow mapping looks like
169 // Shadow = (Mem >> scale) + (1 << offset_log)
170 static cl::opt<int> ClMappingScale("asan-mapping-scale",
171 cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0));
173 // Optimization flags. Not user visible, used mostly for testing
174 // and benchmarking the tool.
175 static cl::opt<bool> ClOpt("asan-opt",
176 cl::desc("Optimize instrumentation"), cl::Hidden, cl::init(true));
177 static cl::opt<bool> ClOptSameTemp("asan-opt-same-temp",
178 cl::desc("Instrument the same temp just once"), cl::Hidden,
180 static cl::opt<bool> ClOptGlobals("asan-opt-globals",
181 cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true));
183 static cl::opt<bool> ClCheckLifetime("asan-check-lifetime",
184 cl::desc("Use llvm.lifetime intrinsics to insert extra checks"),
185 cl::Hidden, cl::init(false));
187 static cl::opt<bool> ClDynamicAllocaStack(
188 "asan-stack-dynamic-alloca",
189 cl::desc("Use dynamic alloca to represent stack variables"), cl::Hidden,
193 static cl::opt<int> ClDebug("asan-debug", cl::desc("debug"), cl::Hidden,
195 static cl::opt<int> ClDebugStack("asan-debug-stack", cl::desc("debug stack"),
196 cl::Hidden, cl::init(0));
197 static cl::opt<std::string> ClDebugFunc("asan-debug-func",
198 cl::Hidden, cl::desc("Debug func"));
199 static cl::opt<int> ClDebugMin("asan-debug-min", cl::desc("Debug min inst"),
200 cl::Hidden, cl::init(-1));
201 static cl::opt<int> ClDebugMax("asan-debug-max", cl::desc("Debug man inst"),
202 cl::Hidden, cl::init(-1));
204 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
205 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
206 STATISTIC(NumInstrumentedDynamicAllocas,
207 "Number of instrumented dynamic allocas");
208 STATISTIC(NumOptimizedAccessesToGlobalArray,
209 "Number of optimized accesses to global arrays");
210 STATISTIC(NumOptimizedAccessesToGlobalVar,
211 "Number of optimized accesses to global vars");
214 /// Frontend-provided metadata for source location.
215 struct LocationMetadata {
220 LocationMetadata() : Filename(), LineNo(0), ColumnNo(0) {}
222 bool empty() const { return Filename.empty(); }
224 void parse(MDNode *MDN) {
225 assert(MDN->getNumOperands() == 3);
226 MDString *MDFilename = cast<MDString>(MDN->getOperand(0));
227 Filename = MDFilename->getString();
229 mdconst::extract<ConstantInt>(MDN->getOperand(1))->getLimitedValue();
231 mdconst::extract<ConstantInt>(MDN->getOperand(2))->getLimitedValue();
235 /// Frontend-provided metadata for global variables.
236 class GlobalsMetadata {
240 : SourceLoc(), Name(), IsDynInit(false),
241 IsBlacklisted(false) {}
242 LocationMetadata SourceLoc;
248 GlobalsMetadata() : inited_(false) {}
250 void init(Module& M) {
253 NamedMDNode *Globals = M.getNamedMetadata("llvm.asan.globals");
256 for (auto MDN : Globals->operands()) {
257 // Metadata node contains the global and the fields of "Entry".
258 assert(MDN->getNumOperands() == 5);
259 auto *GV = mdconst::extract_or_null<GlobalVariable>(MDN->getOperand(0));
260 // The optimizer may optimize away a global entirely.
263 // We can already have an entry for GV if it was merged with another
265 Entry &E = Entries[GV];
266 if (auto *Loc = cast_or_null<MDNode>(MDN->getOperand(1)))
267 E.SourceLoc.parse(Loc);
268 if (auto *Name = cast_or_null<MDString>(MDN->getOperand(2)))
269 E.Name = Name->getString();
270 ConstantInt *IsDynInit =
271 mdconst::extract<ConstantInt>(MDN->getOperand(3));
272 E.IsDynInit |= IsDynInit->isOne();
273 ConstantInt *IsBlacklisted =
274 mdconst::extract<ConstantInt>(MDN->getOperand(4));
275 E.IsBlacklisted |= IsBlacklisted->isOne();
279 /// Returns metadata entry for a given global.
280 Entry get(GlobalVariable *G) const {
281 auto Pos = Entries.find(G);
282 return (Pos != Entries.end()) ? Pos->second : Entry();
287 DenseMap<GlobalVariable*, Entry> Entries;
290 /// This struct defines the shadow mapping using the rule:
291 /// shadow = (mem >> Scale) ADD-or-OR Offset.
292 struct ShadowMapping {
298 static ShadowMapping getShadowMapping(Triple &TargetTriple, int LongSize) {
299 bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android;
300 bool IsIOS = TargetTriple.isiOS();
301 bool IsFreeBSD = TargetTriple.isOSFreeBSD();
302 bool IsLinux = TargetTriple.isOSLinux();
303 bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64 ||
304 TargetTriple.getArch() == llvm::Triple::ppc64le;
305 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
306 bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips ||
307 TargetTriple.getArch() == llvm::Triple::mipsel;
308 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
309 TargetTriple.getArch() == llvm::Triple::mips64el;
311 ShadowMapping Mapping;
313 if (LongSize == 32) {
317 Mapping.Offset = kMIPS32_ShadowOffset32;
319 Mapping.Offset = kFreeBSD_ShadowOffset32;
321 Mapping.Offset = kIOSShadowOffset32;
323 Mapping.Offset = kDefaultShadowOffset32;
324 } else { // LongSize == 64
326 Mapping.Offset = kPPC64_ShadowOffset64;
328 Mapping.Offset = kFreeBSD_ShadowOffset64;
329 else if (IsLinux && IsX86_64)
330 Mapping.Offset = kSmallX86_64ShadowOffset;
332 Mapping.Offset = kMIPS64_ShadowOffset64;
334 Mapping.Offset = kDefaultShadowOffset64;
337 Mapping.Scale = kDefaultShadowScale;
338 if (ClMappingScale) {
339 Mapping.Scale = ClMappingScale;
342 // OR-ing shadow offset if more efficient (at least on x86) if the offset
343 // is a power of two, but on ppc64 we have to use add since the shadow
344 // offset is not necessary 1/8-th of the address space.
345 Mapping.OrShadowOffset = !IsPPC64 && !(Mapping.Offset & (Mapping.Offset - 1));
350 static size_t RedzoneSizeForScale(int MappingScale) {
351 // Redzone used for stack and globals is at least 32 bytes.
352 // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively.
353 return std::max(32U, 1U << MappingScale);
356 /// AddressSanitizer: instrument the code in module to find memory bugs.
357 struct AddressSanitizer : public FunctionPass {
358 AddressSanitizer() : FunctionPass(ID) {
359 initializeAddressSanitizerPass(*PassRegistry::getPassRegistry());
361 const char *getPassName() const override {
362 return "AddressSanitizerFunctionPass";
364 void getAnalysisUsage(AnalysisUsage &AU) const override {
365 AU.addRequired<DominatorTreeWrapperPass>();
367 void instrumentMop(Instruction *I, bool UseCalls);
368 void instrumentPointerComparisonOrSubtraction(Instruction *I);
369 void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore,
370 Value *Addr, uint32_t TypeSize, bool IsWrite,
371 Value *SizeArgument, bool UseCalls);
372 Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
373 Value *ShadowValue, uint32_t TypeSize);
374 Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr,
375 bool IsWrite, size_t AccessSizeIndex,
376 Value *SizeArgument);
377 void instrumentMemIntrinsic(MemIntrinsic *MI);
378 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
379 bool runOnFunction(Function &F) override;
380 bool maybeInsertAsanInitAtFunctionEntry(Function &F);
381 bool doInitialization(Module &M) override;
382 static char ID; // Pass identification, replacement for typeid
384 DominatorTree &getDominatorTree() const { return *DT; }
387 void initializeCallbacks(Module &M);
389 bool LooksLikeCodeInBug11395(Instruction *I);
390 bool GlobalIsLinkerInitialized(GlobalVariable *G);
393 const DataLayout *DL;
397 ShadowMapping Mapping;
399 Function *AsanCtorFunction;
400 Function *AsanInitFunction;
401 Function *AsanHandleNoReturnFunc;
402 Function *AsanPtrCmpFunction, *AsanPtrSubFunction;
403 // This array is indexed by AccessIsWrite and log2(AccessSize).
404 Function *AsanErrorCallback[2][kNumberOfAccessSizes];
405 Function *AsanMemoryAccessCallback[2][kNumberOfAccessSizes];
406 // This array is indexed by AccessIsWrite.
407 Function *AsanErrorCallbackSized[2],
408 *AsanMemoryAccessCallbackSized[2];
409 Function *AsanMemmove, *AsanMemcpy, *AsanMemset;
411 GlobalsMetadata GlobalsMD;
413 friend struct FunctionStackPoisoner;
416 class AddressSanitizerModule : public ModulePass {
418 AddressSanitizerModule() : ModulePass(ID) {}
419 bool runOnModule(Module &M) override;
420 static char ID; // Pass identification, replacement for typeid
421 const char *getPassName() const override {
422 return "AddressSanitizerModule";
426 void initializeCallbacks(Module &M);
428 bool InstrumentGlobals(IRBuilder<> &IRB, Module &M);
429 bool ShouldInstrumentGlobal(GlobalVariable *G);
430 void poisonOneInitializer(Function &GlobalInit, GlobalValue *ModuleName);
431 void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName);
432 size_t MinRedzoneSizeForGlobal() const {
433 return RedzoneSizeForScale(Mapping.Scale);
436 GlobalsMetadata GlobalsMD;
439 const DataLayout *DL;
441 ShadowMapping Mapping;
442 Function *AsanPoisonGlobals;
443 Function *AsanUnpoisonGlobals;
444 Function *AsanRegisterGlobals;
445 Function *AsanUnregisterGlobals;
448 // Stack poisoning does not play well with exception handling.
449 // When an exception is thrown, we essentially bypass the code
450 // that unpoisones the stack. This is why the run-time library has
451 // to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire
452 // stack in the interceptor. This however does not work inside the
453 // actual function which catches the exception. Most likely because the
454 // compiler hoists the load of the shadow value somewhere too high.
455 // This causes asan to report a non-existing bug on 453.povray.
456 // It sounds like an LLVM bug.
457 struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
459 AddressSanitizer &ASan;
464 ShadowMapping Mapping;
466 SmallVector<AllocaInst*, 16> AllocaVec;
467 SmallVector<Instruction*, 8> RetVec;
468 unsigned StackAlignment;
470 Function *AsanStackMallocFunc[kMaxAsanStackMallocSizeClass + 1],
471 *AsanStackFreeFunc[kMaxAsanStackMallocSizeClass + 1];
472 Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc;
474 // Stores a place and arguments of poisoning/unpoisoning call for alloca.
475 struct AllocaPoisonCall {
476 IntrinsicInst *InsBefore;
481 SmallVector<AllocaPoisonCall, 8> AllocaPoisonCallVec;
483 // Stores left and right redzone shadow addresses for dynamic alloca
484 // and pointer to alloca instruction itself.
485 // LeftRzAddr is a shadow address for alloca left redzone.
486 // RightRzAddr is a shadow address for alloca right redzone.
487 struct DynamicAllocaCall {
492 explicit DynamicAllocaCall(AllocaInst *AI,
493 Value *LeftRzAddr = nullptr,
494 Value *RightRzAddr = nullptr)
495 : AI(AI), LeftRzAddr(LeftRzAddr), RightRzAddr(RightRzAddr), Poison(true)
498 SmallVector<DynamicAllocaCall, 1> DynamicAllocaVec;
500 // Maps Value to an AllocaInst from which the Value is originated.
501 typedef DenseMap<Value*, AllocaInst*> AllocaForValueMapTy;
502 AllocaForValueMapTy AllocaForValue;
504 bool HasNonEmptyInlineAsm;
505 std::unique_ptr<CallInst> EmptyInlineAsm;
507 FunctionStackPoisoner(Function &F, AddressSanitizer &ASan)
508 : F(F), ASan(ASan), DIB(*F.getParent(), /*AllowUnresolved*/ false),
509 C(ASan.C), IntptrTy(ASan.IntptrTy),
510 IntptrPtrTy(PointerType::get(IntptrTy, 0)), Mapping(ASan.Mapping),
511 StackAlignment(1 << Mapping.Scale), HasNonEmptyInlineAsm(false),
512 EmptyInlineAsm(CallInst::Create(ASan.EmptyAsm)) {}
514 bool runOnFunction() {
515 if (!ClStack) return false;
516 // Collect alloca, ret, lifetime instructions etc.
517 for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
520 if (AllocaVec.empty() && DynamicAllocaVec.empty()) return false;
522 initializeCallbacks(*F.getParent());
532 // Finds all Alloca instructions and puts
533 // poisoned red zones around all of them.
534 // Then unpoison everything back before the function returns.
537 // ----------------------- Visitors.
538 /// \brief Collect all Ret instructions.
539 void visitReturnInst(ReturnInst &RI) {
540 RetVec.push_back(&RI);
543 // Unpoison dynamic allocas redzones.
544 void unpoisonDynamicAlloca(DynamicAllocaCall &AllocaCall) {
545 if (!AllocaCall.Poison)
547 for (auto Ret : RetVec) {
548 IRBuilder<> IRBRet(Ret);
549 PointerType *Int32PtrTy = PointerType::getUnqual(IRBRet.getInt32Ty());
550 Value *Zero = Constant::getNullValue(IRBRet.getInt32Ty());
551 Value *PartialRzAddr = IRBRet.CreateSub(AllocaCall.RightRzAddr,
552 ConstantInt::get(IntptrTy, 4));
553 IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.LeftRzAddr,
555 IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(PartialRzAddr,
557 IRBRet.CreateStore(Zero, IRBRet.CreateIntToPtr(AllocaCall.RightRzAddr,
562 // Right shift for BigEndian and left shift for LittleEndian.
563 Value *shiftAllocaMagic(Value *Val, IRBuilder<> &IRB, Value *Shift) {
564 return ASan.DL->isLittleEndian() ? IRB.CreateShl(Val, Shift)
565 : IRB.CreateLShr(Val, Shift);
568 // Compute PartialRzMagic for dynamic alloca call. Since we don't know the
569 // size of requested memory until runtime, we should compute it dynamically.
570 // If PartialSize is 0, PartialRzMagic would contain kAsanAllocaRightMagic,
571 // otherwise it would contain the value that we will use to poison the
572 // partial redzone for alloca call.
573 Value *computePartialRzMagic(Value *PartialSize, IRBuilder<> &IRB);
575 // Deploy and poison redzones around dynamic alloca call. To do this, we
576 // should replace this call with another one with changed parameters and
577 // replace all its uses with new address, so
578 // addr = alloca type, old_size, align
580 // new_size = (old_size + additional_size) * sizeof(type)
581 // tmp = alloca i8, new_size, max(align, 32)
582 // addr = tmp + 32 (first 32 bytes are for the left redzone).
583 // Additional_size is added to make new memory allocation contain not only
584 // requested memory, but also left, partial and right redzones.
585 // After that, we should poison redzones:
586 // (1) Left redzone with kAsanAllocaLeftMagic.
587 // (2) Partial redzone with the value, computed in runtime by
588 // computePartialRzMagic function.
589 // (3) Right redzone with kAsanAllocaRightMagic.
590 void handleDynamicAllocaCall(DynamicAllocaCall &AllocaCall);
592 /// \brief Collect Alloca instructions we want (and can) handle.
593 void visitAllocaInst(AllocaInst &AI) {
594 if (!isInterestingAlloca(AI)) return;
596 StackAlignment = std::max(StackAlignment, AI.getAlignment());
597 if (isDynamicAlloca(AI))
598 DynamicAllocaVec.push_back(DynamicAllocaCall(&AI));
600 AllocaVec.push_back(&AI);
603 /// \brief Collect lifetime intrinsic calls to check for use-after-scope
605 void visitIntrinsicInst(IntrinsicInst &II) {
606 if (!ClCheckLifetime) return;
607 Intrinsic::ID ID = II.getIntrinsicID();
608 if (ID != Intrinsic::lifetime_start &&
609 ID != Intrinsic::lifetime_end)
611 // Found lifetime intrinsic, add ASan instrumentation if necessary.
612 ConstantInt *Size = dyn_cast<ConstantInt>(II.getArgOperand(0));
613 // If size argument is undefined, don't do anything.
614 if (Size->isMinusOne()) return;
615 // Check that size doesn't saturate uint64_t and can
616 // be stored in IntptrTy.
617 const uint64_t SizeValue = Size->getValue().getLimitedValue();
618 if (SizeValue == ~0ULL ||
619 !ConstantInt::isValueValidForType(IntptrTy, SizeValue))
621 // Find alloca instruction that corresponds to llvm.lifetime argument.
622 AllocaInst *AI = findAllocaForValue(II.getArgOperand(1));
624 bool DoPoison = (ID == Intrinsic::lifetime_end);
625 AllocaPoisonCall APC = {&II, AI, SizeValue, DoPoison};
626 AllocaPoisonCallVec.push_back(APC);
629 void visitCallInst(CallInst &CI) {
630 HasNonEmptyInlineAsm |=
631 CI.isInlineAsm() && !CI.isIdenticalTo(EmptyInlineAsm.get());
634 // ---------------------- Helpers.
635 void initializeCallbacks(Module &M);
637 bool doesDominateAllExits(const Instruction *I) const {
638 for (auto Ret : RetVec) {
639 if (!ASan.getDominatorTree().dominates(I, Ret))
645 bool isDynamicAlloca(AllocaInst &AI) const {
646 return AI.isArrayAllocation() || !AI.isStaticAlloca();
649 // Check if we want (and can) handle this alloca.
650 bool isInterestingAlloca(AllocaInst &AI) const {
651 return (AI.getAllocatedType()->isSized() &&
652 // alloca() may be called with 0 size, ignore it.
653 getAllocaSizeInBytes(&AI) > 0);
656 uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
657 Type *Ty = AI->getAllocatedType();
658 uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty);
661 /// Finds alloca where the value comes from.
662 AllocaInst *findAllocaForValue(Value *V);
663 void poisonRedZones(ArrayRef<uint8_t> ShadowBytes, IRBuilder<> &IRB,
664 Value *ShadowBase, bool DoPoison);
665 void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> &IRB, bool DoPoison);
667 void SetShadowToStackAfterReturnInlined(IRBuilder<> &IRB, Value *ShadowBase,
669 Value *createAllocaForLayout(IRBuilder<> &IRB, const ASanStackFrameLayout &L,
671 PHINode *createPHI(IRBuilder<> &IRB, Value *Cond, Value *ValueIfTrue,
672 Instruction *ThenTerm, Value *ValueIfFalse);
677 char AddressSanitizer::ID = 0;
678 INITIALIZE_PASS_BEGIN(AddressSanitizer, "asan",
679 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
681 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
682 INITIALIZE_PASS_END(AddressSanitizer, "asan",
683 "AddressSanitizer: detects use-after-free and out-of-bounds bugs.",
685 FunctionPass *llvm::createAddressSanitizerFunctionPass() {
686 return new AddressSanitizer();
689 char AddressSanitizerModule::ID = 0;
690 INITIALIZE_PASS(AddressSanitizerModule, "asan-module",
691 "AddressSanitizer: detects use-after-free and out-of-bounds bugs."
692 "ModulePass", false, false)
693 ModulePass *llvm::createAddressSanitizerModulePass() {
694 return new AddressSanitizerModule();
697 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
698 size_t Res = countTrailingZeros(TypeSize / 8);
699 assert(Res < kNumberOfAccessSizes);
703 // \brief Create a constant for Str so that we can pass it to the run-time lib.
704 static GlobalVariable *createPrivateGlobalForString(
705 Module &M, StringRef Str, bool AllowMerging) {
706 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
707 // We use private linkage for module-local strings. If they can be merged
708 // with another one, we set the unnamed_addr attribute.
710 new GlobalVariable(M, StrConst->getType(), true,
711 GlobalValue::PrivateLinkage, StrConst, kAsanGenPrefix);
713 GV->setUnnamedAddr(true);
714 GV->setAlignment(1); // Strings may not be merged w/o setting align 1.
718 /// \brief Create a global describing a source location.
719 static GlobalVariable *createPrivateGlobalForSourceLoc(Module &M,
720 LocationMetadata MD) {
721 Constant *LocData[] = {
722 createPrivateGlobalForString(M, MD.Filename, true),
723 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.LineNo),
724 ConstantInt::get(Type::getInt32Ty(M.getContext()), MD.ColumnNo),
726 auto LocStruct = ConstantStruct::getAnon(LocData);
727 auto GV = new GlobalVariable(M, LocStruct->getType(), true,
728 GlobalValue::PrivateLinkage, LocStruct,
730 GV->setUnnamedAddr(true);
734 static bool GlobalWasGeneratedByAsan(GlobalVariable *G) {
735 return G->getName().find(kAsanGenPrefix) == 0 ||
736 G->getName().find(kSanCovGenPrefix) == 0;
739 Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
741 Shadow = IRB.CreateLShr(Shadow, Mapping.Scale);
742 if (Mapping.Offset == 0)
744 // (Shadow >> scale) | offset
745 if (Mapping.OrShadowOffset)
746 return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
748 return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset));
751 // Instrument memset/memmove/memcpy
752 void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
754 if (isa<MemTransferInst>(MI)) {
756 isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
757 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
758 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
759 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
760 } else if (isa<MemSetInst>(MI)) {
763 IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
764 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
765 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false));
767 MI->eraseFromParent();
770 // If I is an interesting memory access, return the PointerOperand
771 // and set IsWrite/Alignment. Otherwise return nullptr.
772 static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
773 unsigned *Alignment) {
774 // Skip memory accesses inserted by another instrumentation.
775 if (I->getMetadata("nosanitize"))
777 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
778 if (!ClInstrumentReads) return nullptr;
780 *Alignment = LI->getAlignment();
781 return LI->getPointerOperand();
783 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
784 if (!ClInstrumentWrites) return nullptr;
786 *Alignment = SI->getAlignment();
787 return SI->getPointerOperand();
789 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
790 if (!ClInstrumentAtomics) return nullptr;
793 return RMW->getPointerOperand();
795 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
796 if (!ClInstrumentAtomics) return nullptr;
799 return XCHG->getPointerOperand();
804 static bool isPointerOperand(Value *V) {
805 return V->getType()->isPointerTy() || isa<PtrToIntInst>(V);
808 // This is a rough heuristic; it may cause both false positives and
809 // false negatives. The proper implementation requires cooperation with
811 static bool isInterestingPointerComparisonOrSubtraction(Instruction *I) {
812 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(I)) {
813 if (!Cmp->isRelational())
815 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
816 if (BO->getOpcode() != Instruction::Sub)
821 if (!isPointerOperand(I->getOperand(0)) ||
822 !isPointerOperand(I->getOperand(1)))
827 bool AddressSanitizer::GlobalIsLinkerInitialized(GlobalVariable *G) {
828 // If a global variable does not have dynamic initialization we don't
829 // have to instrument it. However, if a global does not have initializer
830 // at all, we assume it has dynamic initializer (in other TU).
831 return G->hasInitializer() && !GlobalsMD.get(G).IsDynInit;
835 AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
837 Function *F = isa<ICmpInst>(I) ? AsanPtrCmpFunction : AsanPtrSubFunction;
838 Value *Param[2] = {I->getOperand(0), I->getOperand(1)};
839 for (int i = 0; i < 2; i++) {
840 if (Param[i]->getType()->isPointerTy())
841 Param[i] = IRB.CreatePointerCast(Param[i], IntptrTy);
843 IRB.CreateCall2(F, Param[0], Param[1]);
846 void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
847 bool IsWrite = false;
848 unsigned Alignment = 0;
849 Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment);
851 if (ClOpt && ClOptGlobals) {
852 if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
853 // If initialization order checking is disabled, a simple access to a
854 // dynamically initialized global is always valid.
855 if (!ClInitializers || GlobalIsLinkerInitialized(G)) {
856 NumOptimizedAccessesToGlobalVar++;
860 ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr);
861 if (CE && CE->isGEPWithNoNotionalOverIndexing()) {
862 if (GlobalVariable *G = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
863 if (CE->getOperand(1)->isNullValue() && GlobalIsLinkerInitialized(G)) {
864 NumOptimizedAccessesToGlobalArray++;
871 Type *OrigPtrTy = Addr->getType();
872 Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
874 assert(OrigTy->isSized());
875 uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
877 assert((TypeSize % 8) == 0);
880 NumInstrumentedWrites++;
882 NumInstrumentedReads++;
884 unsigned Granularity = 1 << Mapping.Scale;
885 // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
886 // if the data is properly aligned.
887 if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
889 (Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
890 return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
891 // Instrument unusual size or unusual alignment.
892 // We can not do it with a single check, so we do 1-byte check for the first
893 // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
894 // to report the actual access size.
896 Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8);
897 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
899 IRB.CreateCall2(AsanMemoryAccessCallbackSized[IsWrite], AddrLong, Size);
901 Value *LastByte = IRB.CreateIntToPtr(
902 IRB.CreateAdd(AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)),
904 instrumentAddress(I, I, Addr, 8, IsWrite, Size, false);
905 instrumentAddress(I, I, LastByte, 8, IsWrite, Size, false);
909 // Validate the result of Module::getOrInsertFunction called for an interface
910 // function of AddressSanitizer. If the instrumented module defines a function
911 // with the same name, their prototypes must match, otherwise
912 // getOrInsertFunction returns a bitcast.
913 static Function *checkInterfaceFunction(Constant *FuncOrBitcast) {
914 if (isa<Function>(FuncOrBitcast)) return cast<Function>(FuncOrBitcast);
915 FuncOrBitcast->dump();
916 report_fatal_error("trying to redefine an AddressSanitizer "
917 "interface function");
920 Instruction *AddressSanitizer::generateCrashCode(
921 Instruction *InsertBefore, Value *Addr,
922 bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) {
923 IRBuilder<> IRB(InsertBefore);
924 CallInst *Call = SizeArgument
925 ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument)
926 : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr);
928 // We don't do Call->setDoesNotReturn() because the BB already has
929 // UnreachableInst at the end.
930 // This EmptyAsm is required to avoid callback merge.
931 IRB.CreateCall(EmptyAsm);
935 Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong,
938 size_t Granularity = 1 << Mapping.Scale;
939 // Addr & (Granularity - 1)
940 Value *LastAccessedByte = IRB.CreateAnd(
941 AddrLong, ConstantInt::get(IntptrTy, Granularity - 1));
942 // (Addr & (Granularity - 1)) + size - 1
943 if (TypeSize / 8 > 1)
944 LastAccessedByte = IRB.CreateAdd(
945 LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1));
946 // (uint8_t) ((Addr & (Granularity-1)) + size - 1)
947 LastAccessedByte = IRB.CreateIntCast(
948 LastAccessedByte, ShadowValue->getType(), false);
949 // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue
950 return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue);
953 void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
954 Instruction *InsertBefore, Value *Addr,
955 uint32_t TypeSize, bool IsWrite,
956 Value *SizeArgument, bool UseCalls) {
957 IRBuilder<> IRB(InsertBefore);
958 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
959 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
962 IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][AccessSizeIndex],
967 Type *ShadowTy = IntegerType::get(
968 *C, std::max(8U, TypeSize >> Mapping.Scale));
969 Type *ShadowPtrTy = PointerType::get(ShadowTy, 0);
970 Value *ShadowPtr = memToShadow(AddrLong, IRB);
971 Value *CmpVal = Constant::getNullValue(ShadowTy);
972 Value *ShadowValue = IRB.CreateLoad(
973 IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy));
975 Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
976 size_t Granularity = 1 << Mapping.Scale;
977 TerminatorInst *CrashTerm = nullptr;
979 if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
980 // We use branch weights for the slow path check, to indicate that the slow
981 // path is rarely taken. This seems to be the case for SPEC benchmarks.
982 TerminatorInst *CheckTerm =
983 SplitBlockAndInsertIfThen(Cmp, InsertBefore, false,
984 MDBuilder(*C).createBranchWeights(1, 100000));
985 assert(dyn_cast<BranchInst>(CheckTerm)->isUnconditional());
986 BasicBlock *NextBB = CheckTerm->getSuccessor(0);
987 IRB.SetInsertPoint(CheckTerm);
988 Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize);
989 BasicBlock *CrashBlock =
990 BasicBlock::Create(*C, "", NextBB->getParent(), NextBB);
991 CrashTerm = new UnreachableInst(*C, CrashBlock);
992 BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2);
993 ReplaceInstWithInst(CheckTerm, NewTerm);
995 CrashTerm = SplitBlockAndInsertIfThen(Cmp, InsertBefore, true);
998 Instruction *Crash = generateCrashCode(
999 CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument);
1000 Crash->setDebugLoc(OrigIns->getDebugLoc());
1003 void AddressSanitizerModule::poisonOneInitializer(Function &GlobalInit,
1004 GlobalValue *ModuleName) {
1005 // Set up the arguments to our poison/unpoison functions.
1006 IRBuilder<> IRB(GlobalInit.begin()->getFirstInsertionPt());
1008 // Add a call to poison all external globals before the given function starts.
1009 Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
1010 IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr);
1012 // Add calls to unpoison all globals before each return instruction.
1013 for (auto &BB : GlobalInit.getBasicBlockList())
1014 if (ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator()))
1015 CallInst::Create(AsanUnpoisonGlobals, "", RI);
1018 void AddressSanitizerModule::createInitializerPoisonCalls(
1019 Module &M, GlobalValue *ModuleName) {
1020 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1022 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
1023 for (Use &OP : CA->operands()) {
1024 if (isa<ConstantAggregateZero>(OP))
1026 ConstantStruct *CS = cast<ConstantStruct>(OP);
1028 // Must have a function or null ptr.
1029 if (Function* F = dyn_cast<Function>(CS->getOperand(1))) {
1030 if (F->getName() == kAsanModuleCtorName) continue;
1031 ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
1032 // Don't instrument CTORs that will run before asan.module_ctor.
1033 if (Priority->getLimitedValue() <= kAsanCtorAndDtorPriority) continue;
1034 poisonOneInitializer(*F, ModuleName);
1039 bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) {
1040 Type *Ty = cast<PointerType>(G->getType())->getElementType();
1041 DEBUG(dbgs() << "GLOBAL: " << *G << "\n");
1043 if (GlobalsMD.get(G).IsBlacklisted) return false;
1044 if (!Ty->isSized()) return false;
1045 if (!G->hasInitializer()) return false;
1046 if (GlobalWasGeneratedByAsan(G)) return false; // Our own global.
1047 // Touch only those globals that will not be defined in other modules.
1048 // Don't handle ODR linkage types and COMDATs since other modules may be built
1050 if (G->getLinkage() != GlobalVariable::ExternalLinkage &&
1051 G->getLinkage() != GlobalVariable::PrivateLinkage &&
1052 G->getLinkage() != GlobalVariable::InternalLinkage)
1056 // Two problems with thread-locals:
1057 // - The address of the main thread's copy can't be computed at link-time.
1058 // - Need to poison all copies, not just the main thread's one.
1059 if (G->isThreadLocal())
1061 // For now, just ignore this Global if the alignment is large.
1062 if (G->getAlignment() > MinRedzoneSizeForGlobal()) return false;
1064 if (G->hasSection()) {
1065 StringRef Section(G->getSection());
1067 if (TargetTriple.isOSBinFormatMachO()) {
1068 StringRef ParsedSegment, ParsedSection;
1069 unsigned TAA = 0, StubSize = 0;
1071 std::string ErrorCode =
1072 MCSectionMachO::ParseSectionSpecifier(Section, ParsedSegment,
1073 ParsedSection, TAA, TAAParsed,
1075 if (!ErrorCode.empty()) {
1076 report_fatal_error("Invalid section specifier '" + ParsedSection +
1077 "': " + ErrorCode + ".");
1080 // Ignore the globals from the __OBJC section. The ObjC runtime assumes
1081 // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to
1083 if (ParsedSegment == "__OBJC" ||
1084 (ParsedSegment == "__DATA" && ParsedSection.startswith("__objc_"))) {
1085 DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G << "\n");
1088 // See http://code.google.com/p/address-sanitizer/issues/detail?id=32
1089 // Constant CFString instances are compiled in the following way:
1090 // -- the string buffer is emitted into
1091 // __TEXT,__cstring,cstring_literals
1092 // -- the constant NSConstantString structure referencing that buffer
1093 // is placed into __DATA,__cfstring
1094 // Therefore there's no point in placing redzones into __DATA,__cfstring.
1095 // Moreover, it causes the linker to crash on OS X 10.7
1096 if (ParsedSegment == "__DATA" && ParsedSection == "__cfstring") {
1097 DEBUG(dbgs() << "Ignoring CFString: " << *G << "\n");
1100 // The linker merges the contents of cstring_literals and removes the
1102 if (ParsedSegment == "__TEXT" && (TAA & MachO::S_CSTRING_LITERALS)) {
1103 DEBUG(dbgs() << "Ignoring a cstring literal: " << *G << "\n");
1108 // Callbacks put into the CRT initializer/terminator sections
1109 // should not be instrumented.
1110 // See https://code.google.com/p/address-sanitizer/issues/detail?id=305
1111 // and http://msdn.microsoft.com/en-US/en-en/library/bb918180(v=vs.120).aspx
1112 if (Section.startswith(".CRT")) {
1113 DEBUG(dbgs() << "Ignoring a global initializer callback: " << *G << "\n");
1117 // Globals from llvm.metadata aren't emitted, do not instrument them.
1118 if (Section == "llvm.metadata") return false;
1124 void AddressSanitizerModule::initializeCallbacks(Module &M) {
1125 IRBuilder<> IRB(*C);
1126 // Declare our poisoning and unpoisoning functions.
1127 AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1128 kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, nullptr));
1129 AsanPoisonGlobals->setLinkage(Function::ExternalLinkage);
1130 AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1131 kAsanUnpoisonGlobalsName, IRB.getVoidTy(), nullptr));
1132 AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage);
1133 // Declare functions that register/unregister globals.
1134 AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1135 kAsanRegisterGlobalsName, IRB.getVoidTy(),
1136 IntptrTy, IntptrTy, nullptr));
1137 AsanRegisterGlobals->setLinkage(Function::ExternalLinkage);
1138 AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction(
1139 kAsanUnregisterGlobalsName,
1140 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1141 AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage);
1144 // This function replaces all global variables with new variables that have
1145 // trailing redzones. It also creates a function that poisons
1146 // redzones and inserts this function into llvm.global_ctors.
1147 bool AddressSanitizerModule::InstrumentGlobals(IRBuilder<> &IRB, Module &M) {
1150 SmallVector<GlobalVariable *, 16> GlobalsToChange;
1152 for (auto &G : M.globals()) {
1153 if (ShouldInstrumentGlobal(&G))
1154 GlobalsToChange.push_back(&G);
1157 size_t n = GlobalsToChange.size();
1158 if (n == 0) return false;
1160 // A global is described by a structure
1163 // size_t size_with_redzone;
1164 // const char *name;
1165 // const char *module_name;
1166 // size_t has_dynamic_init;
1167 // void *source_location;
1168 // We initialize an array of such structures and pass it to a run-time call.
1169 StructType *GlobalStructTy =
1170 StructType::get(IntptrTy, IntptrTy, IntptrTy, IntptrTy, IntptrTy,
1171 IntptrTy, IntptrTy, nullptr);
1172 SmallVector<Constant *, 16> Initializers(n);
1174 bool HasDynamicallyInitializedGlobals = false;
1176 // We shouldn't merge same module names, as this string serves as unique
1177 // module ID in runtime.
1178 GlobalVariable *ModuleName = createPrivateGlobalForString(
1179 M, M.getModuleIdentifier(), /*AllowMerging*/false);
1181 for (size_t i = 0; i < n; i++) {
1182 static const uint64_t kMaxGlobalRedzone = 1 << 18;
1183 GlobalVariable *G = GlobalsToChange[i];
1185 auto MD = GlobalsMD.get(G);
1186 // Create string holding the global name (use global name from metadata
1187 // if it's available, otherwise just write the name of global variable).
1188 GlobalVariable *Name = createPrivateGlobalForString(
1189 M, MD.Name.empty() ? G->getName() : MD.Name,
1190 /*AllowMerging*/ true);
1192 PointerType *PtrTy = cast<PointerType>(G->getType());
1193 Type *Ty = PtrTy->getElementType();
1194 uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
1195 uint64_t MinRZ = MinRedzoneSizeForGlobal();
1196 // MinRZ <= RZ <= kMaxGlobalRedzone
1197 // and trying to make RZ to be ~ 1/4 of SizeInBytes.
1198 uint64_t RZ = std::max(MinRZ,
1199 std::min(kMaxGlobalRedzone,
1200 (SizeInBytes / MinRZ / 4) * MinRZ));
1201 uint64_t RightRedzoneSize = RZ;
1202 // Round up to MinRZ
1203 if (SizeInBytes % MinRZ)
1204 RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ);
1205 assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0);
1206 Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize);
1208 StructType *NewTy = StructType::get(Ty, RightRedZoneTy, nullptr);
1209 Constant *NewInitializer = ConstantStruct::get(
1210 NewTy, G->getInitializer(),
1211 Constant::getNullValue(RightRedZoneTy), nullptr);
1213 // Create a new global variable with enough space for a redzone.
1214 GlobalValue::LinkageTypes Linkage = G->getLinkage();
1215 if (G->isConstant() && Linkage == GlobalValue::PrivateLinkage)
1216 Linkage = GlobalValue::InternalLinkage;
1217 GlobalVariable *NewGlobal = new GlobalVariable(
1218 M, NewTy, G->isConstant(), Linkage,
1219 NewInitializer, "", G, G->getThreadLocalMode());
1220 NewGlobal->copyAttributesFrom(G);
1221 NewGlobal->setAlignment(MinRZ);
1224 Indices2[0] = IRB.getInt32(0);
1225 Indices2[1] = IRB.getInt32(0);
1227 G->replaceAllUsesWith(
1228 ConstantExpr::getGetElementPtr(NewGlobal, Indices2, true));
1229 NewGlobal->takeName(G);
1230 G->eraseFromParent();
1232 Constant *SourceLoc;
1233 if (!MD.SourceLoc.empty()) {
1234 auto SourceLocGlobal = createPrivateGlobalForSourceLoc(M, MD.SourceLoc);
1235 SourceLoc = ConstantExpr::getPointerCast(SourceLocGlobal, IntptrTy);
1237 SourceLoc = ConstantInt::get(IntptrTy, 0);
1240 Initializers[i] = ConstantStruct::get(
1241 GlobalStructTy, ConstantExpr::getPointerCast(NewGlobal, IntptrTy),
1242 ConstantInt::get(IntptrTy, SizeInBytes),
1243 ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize),
1244 ConstantExpr::getPointerCast(Name, IntptrTy),
1245 ConstantExpr::getPointerCast(ModuleName, IntptrTy),
1246 ConstantInt::get(IntptrTy, MD.IsDynInit), SourceLoc, nullptr);
1248 if (ClInitializers && MD.IsDynInit)
1249 HasDynamicallyInitializedGlobals = true;
1251 DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n");
1254 ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n);
1255 GlobalVariable *AllGlobals = new GlobalVariable(
1256 M, ArrayOfGlobalStructTy, false, GlobalVariable::InternalLinkage,
1257 ConstantArray::get(ArrayOfGlobalStructTy, Initializers), "");
1259 // Create calls for poisoning before initializers run and unpoisoning after.
1260 if (HasDynamicallyInitializedGlobals)
1261 createInitializerPoisonCalls(M, ModuleName);
1262 IRB.CreateCall2(AsanRegisterGlobals,
1263 IRB.CreatePointerCast(AllGlobals, IntptrTy),
1264 ConstantInt::get(IntptrTy, n));
1266 // We also need to unregister globals at the end, e.g. when a shared library
1268 Function *AsanDtorFunction = Function::Create(
1269 FunctionType::get(Type::getVoidTy(*C), false),
1270 GlobalValue::InternalLinkage, kAsanModuleDtorName, &M);
1271 BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction);
1272 IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB));
1273 IRB_Dtor.CreateCall2(AsanUnregisterGlobals,
1274 IRB.CreatePointerCast(AllGlobals, IntptrTy),
1275 ConstantInt::get(IntptrTy, n));
1276 appendToGlobalDtors(M, AsanDtorFunction, kAsanCtorAndDtorPriority);
1282 bool AddressSanitizerModule::runOnModule(Module &M) {
1283 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1286 DL = &DLP->getDataLayout();
1287 C = &(M.getContext());
1288 int LongSize = DL->getPointerSizeInBits();
1289 IntptrTy = Type::getIntNTy(*C, LongSize);
1290 TargetTriple = Triple(M.getTargetTriple());
1291 Mapping = getShadowMapping(TargetTriple, LongSize);
1292 initializeCallbacks(M);
1294 bool Changed = false;
1296 Function *CtorFunc = M.getFunction(kAsanModuleCtorName);
1298 IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator());
1301 Changed |= InstrumentGlobals(IRB, M);
1306 void AddressSanitizer::initializeCallbacks(Module &M) {
1307 IRBuilder<> IRB(*C);
1308 // Create __asan_report* callbacks.
1309 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
1310 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
1311 AccessSizeIndex++) {
1312 // IsWrite and TypeSize are encoded in the function name.
1313 std::string Suffix =
1314 (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex);
1315 AsanErrorCallback[AccessIsWrite][AccessSizeIndex] =
1316 checkInterfaceFunction(
1317 M.getOrInsertFunction(kAsanReportErrorTemplate + Suffix,
1318 IRB.getVoidTy(), IntptrTy, nullptr));
1319 AsanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
1320 checkInterfaceFunction(
1321 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + Suffix,
1322 IRB.getVoidTy(), IntptrTy, nullptr));
1325 AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction(
1326 kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1327 AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction(
1328 kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1330 AsanMemoryAccessCallbackSized[0] = checkInterfaceFunction(
1331 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "loadN",
1332 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1333 AsanMemoryAccessCallbackSized[1] = checkInterfaceFunction(
1334 M.getOrInsertFunction(ClMemoryAccessCallbackPrefix + "storeN",
1335 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1337 AsanMemmove = checkInterfaceFunction(M.getOrInsertFunction(
1338 ClMemoryAccessCallbackPrefix + "memmove", IRB.getInt8PtrTy(),
1339 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
1340 AsanMemcpy = checkInterfaceFunction(M.getOrInsertFunction(
1341 ClMemoryAccessCallbackPrefix + "memcpy", IRB.getInt8PtrTy(),
1342 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IntptrTy, nullptr));
1343 AsanMemset = checkInterfaceFunction(M.getOrInsertFunction(
1344 ClMemoryAccessCallbackPrefix + "memset", IRB.getInt8PtrTy(),
1345 IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy, nullptr));
1347 AsanHandleNoReturnFunc = checkInterfaceFunction(
1348 M.getOrInsertFunction(kAsanHandleNoReturnName, IRB.getVoidTy(), nullptr));
1350 AsanPtrCmpFunction = checkInterfaceFunction(M.getOrInsertFunction(
1351 kAsanPtrCmp, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1352 AsanPtrSubFunction = checkInterfaceFunction(M.getOrInsertFunction(
1353 kAsanPtrSub, IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1354 // We insert an empty inline asm after __asan_report* to avoid callback merge.
1355 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
1356 StringRef(""), StringRef(""),
1357 /*hasSideEffects=*/true);
1361 bool AddressSanitizer::doInitialization(Module &M) {
1362 // Initialize the private fields. No one has accessed them before.
1363 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1365 report_fatal_error("data layout missing");
1366 DL = &DLP->getDataLayout();
1370 C = &(M.getContext());
1371 LongSize = DL->getPointerSizeInBits();
1372 IntptrTy = Type::getIntNTy(*C, LongSize);
1373 TargetTriple = Triple(M.getTargetTriple());
1375 AsanCtorFunction = Function::Create(
1376 FunctionType::get(Type::getVoidTy(*C), false),
1377 GlobalValue::InternalLinkage, kAsanModuleCtorName, &M);
1378 BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction);
1379 // call __asan_init in the module ctor.
1380 IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB));
1381 AsanInitFunction = checkInterfaceFunction(
1382 M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), nullptr));
1383 AsanInitFunction->setLinkage(Function::ExternalLinkage);
1384 IRB.CreateCall(AsanInitFunction);
1386 Mapping = getShadowMapping(TargetTriple, LongSize);
1388 appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndDtorPriority);
1392 bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
1393 // For each NSObject descendant having a +load method, this method is invoked
1394 // by the ObjC runtime before any of the static constructors is called.
1395 // Therefore we need to instrument such methods with a call to __asan_init
1396 // at the beginning in order to initialize our runtime before any access to
1397 // the shadow memory.
1398 // We cannot just ignore these methods, because they may call other
1399 // instrumented functions.
1400 if (F.getName().find(" load]") != std::string::npos) {
1401 IRBuilder<> IRB(F.begin()->begin());
1402 IRB.CreateCall(AsanInitFunction);
1408 bool AddressSanitizer::runOnFunction(Function &F) {
1409 if (&F == AsanCtorFunction) return false;
1410 if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false;
1411 DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n");
1412 initializeCallbacks(*F.getParent());
1414 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1416 // If needed, insert __asan_init before checking for SanitizeAddress attr.
1417 maybeInsertAsanInitAtFunctionEntry(F);
1419 if (!F.hasFnAttribute(Attribute::SanitizeAddress))
1422 if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
1425 // We want to instrument every address only once per basic block (unless there
1426 // are calls between uses).
1427 SmallSet<Value*, 16> TempsToInstrument;
1428 SmallVector<Instruction*, 16> ToInstrument;
1429 SmallVector<Instruction*, 8> NoReturnCalls;
1430 SmallVector<BasicBlock*, 16> AllBlocks;
1431 SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts;
1436 // Fill the set of memory operations to instrument.
1437 for (auto &BB : F) {
1438 AllBlocks.push_back(&BB);
1439 TempsToInstrument.clear();
1440 int NumInsnsPerBB = 0;
1441 for (auto &Inst : BB) {
1442 if (LooksLikeCodeInBug11395(&Inst)) return false;
1444 isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
1445 if (ClOpt && ClOptSameTemp) {
1446 if (!TempsToInstrument.insert(Addr).second)
1447 continue; // We've seen this temp in the current BB.
1449 } else if (ClInvalidPointerPairs &&
1450 isInterestingPointerComparisonOrSubtraction(&Inst)) {
1451 PointerComparisonsOrSubtracts.push_back(&Inst);
1453 } else if (isa<MemIntrinsic>(Inst)) {
1456 if (isa<AllocaInst>(Inst))
1460 // A call inside BB.
1461 TempsToInstrument.clear();
1462 if (CS.doesNotReturn())
1463 NoReturnCalls.push_back(CS.getInstruction());
1467 ToInstrument.push_back(&Inst);
1469 if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB)
1474 bool UseCalls = false;
1475 if (ClInstrumentationWithCallsThreshold >= 0 &&
1476 ToInstrument.size() > (unsigned)ClInstrumentationWithCallsThreshold)
1480 int NumInstrumented = 0;
1481 for (auto Inst : ToInstrument) {
1482 if (ClDebugMin < 0 || ClDebugMax < 0 ||
1483 (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
1484 if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
1485 instrumentMop(Inst, UseCalls);
1487 instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));
1492 FunctionStackPoisoner FSP(F, *this);
1493 bool ChangedStack = FSP.runOnFunction();
1495 // We must unpoison the stack before every NoReturn call (throw, _exit, etc).
1496 // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37
1497 for (auto CI : NoReturnCalls) {
1498 IRBuilder<> IRB(CI);
1499 IRB.CreateCall(AsanHandleNoReturnFunc);
1502 for (auto Inst : PointerComparisonsOrSubtracts) {
1503 instrumentPointerComparisonOrSubtraction(Inst);
1507 bool res = NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty();
1509 DEBUG(dbgs() << "ASAN done instrumenting: " << res << " " << F << "\n");
1514 // Workaround for bug 11395: we don't want to instrument stack in functions
1515 // with large assembly blobs (32-bit only), otherwise reg alloc may crash.
1516 // FIXME: remove once the bug 11395 is fixed.
1517 bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) {
1518 if (LongSize != 32) return false;
1519 CallInst *CI = dyn_cast<CallInst>(I);
1520 if (!CI || !CI->isInlineAsm()) return false;
1521 if (CI->getNumArgOperands() <= 5) return false;
1522 // We have inline assembly with quite a few arguments.
1526 void FunctionStackPoisoner::initializeCallbacks(Module &M) {
1527 IRBuilder<> IRB(*C);
1528 for (int i = 0; i <= kMaxAsanStackMallocSizeClass; i++) {
1529 std::string Suffix = itostr(i);
1530 AsanStackMallocFunc[i] = checkInterfaceFunction(M.getOrInsertFunction(
1531 kAsanStackMallocNameTemplate + Suffix, IntptrTy, IntptrTy, nullptr));
1532 AsanStackFreeFunc[i] = checkInterfaceFunction(
1533 M.getOrInsertFunction(kAsanStackFreeNameTemplate + Suffix,
1534 IRB.getVoidTy(), IntptrTy, IntptrTy, nullptr));
1536 AsanPoisonStackMemoryFunc = checkInterfaceFunction(
1537 M.getOrInsertFunction(kAsanPoisonStackMemoryName, IRB.getVoidTy(),
1538 IntptrTy, IntptrTy, nullptr));
1539 AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(
1540 M.getOrInsertFunction(kAsanUnpoisonStackMemoryName, IRB.getVoidTy(),
1541 IntptrTy, IntptrTy, nullptr));
1545 FunctionStackPoisoner::poisonRedZones(ArrayRef<uint8_t> ShadowBytes,
1546 IRBuilder<> &IRB, Value *ShadowBase,
1548 size_t n = ShadowBytes.size();
1550 // We need to (un)poison n bytes of stack shadow. Poison as many as we can
1551 // using 64-bit stores (if we are on 64-bit arch), then poison the rest
1552 // with 32-bit stores, then with 16-byte stores, then with 8-byte stores.
1553 for (size_t LargeStoreSizeInBytes = ASan.LongSize / 8;
1554 LargeStoreSizeInBytes != 0; LargeStoreSizeInBytes /= 2) {
1555 for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
1557 for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
1558 if (ASan.DL->isLittleEndian())
1559 Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
1561 Val = (Val << 8) | ShadowBytes[i + j];
1564 Value *Ptr = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1565 Type *StoreTy = Type::getIntNTy(*C, LargeStoreSizeInBytes * 8);
1566 Value *Poison = ConstantInt::get(StoreTy, DoPoison ? Val : 0);
1567 IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, StoreTy->getPointerTo()));
1572 // Fake stack allocator (asan_fake_stack.h) has 11 size classes
1573 // for every power of 2 from kMinStackMallocSize to kMaxAsanStackMallocSizeClass
1574 static int StackMallocSizeClass(uint64_t LocalStackSize) {
1575 assert(LocalStackSize <= kMaxStackMallocSize);
1576 uint64_t MaxSize = kMinStackMallocSize;
1577 for (int i = 0; ; i++, MaxSize *= 2)
1578 if (LocalStackSize <= MaxSize)
1580 llvm_unreachable("impossible LocalStackSize");
1583 // Set Size bytes starting from ShadowBase to kAsanStackAfterReturnMagic.
1584 // We can not use MemSet intrinsic because it may end up calling the actual
1585 // memset. Size is a multiple of 8.
1586 // Currently this generates 8-byte stores on x86_64; it may be better to
1587 // generate wider stores.
1588 void FunctionStackPoisoner::SetShadowToStackAfterReturnInlined(
1589 IRBuilder<> &IRB, Value *ShadowBase, int Size) {
1590 assert(!(Size % 8));
1591 assert(kAsanStackAfterReturnMagic == 0xf5);
1592 for (int i = 0; i < Size; i += 8) {
1593 Value *p = IRB.CreateAdd(ShadowBase, ConstantInt::get(IntptrTy, i));
1594 IRB.CreateStore(ConstantInt::get(IRB.getInt64Ty(), 0xf5f5f5f5f5f5f5f5ULL),
1595 IRB.CreateIntToPtr(p, IRB.getInt64Ty()->getPointerTo()));
1599 static DebugLoc getFunctionEntryDebugLocation(Function &F) {
1600 for (const auto &Inst : F.getEntryBlock())
1601 if (!isa<AllocaInst>(Inst))
1602 return Inst.getDebugLoc();
1606 PHINode *FunctionStackPoisoner::createPHI(IRBuilder<> &IRB, Value *Cond,
1608 Instruction *ThenTerm,
1609 Value *ValueIfFalse) {
1610 PHINode *PHI = IRB.CreatePHI(IntptrTy, 2);
1611 BasicBlock *CondBlock = cast<Instruction>(Cond)->getParent();
1612 PHI->addIncoming(ValueIfFalse, CondBlock);
1613 BasicBlock *ThenBlock = ThenTerm->getParent();
1614 PHI->addIncoming(ValueIfTrue, ThenBlock);
1618 Value *FunctionStackPoisoner::createAllocaForLayout(
1619 IRBuilder<> &IRB, const ASanStackFrameLayout &L, bool Dynamic) {
1622 Alloca = IRB.CreateAlloca(IRB.getInt8Ty(),
1623 ConstantInt::get(IRB.getInt64Ty(), L.FrameSize),
1626 Alloca = IRB.CreateAlloca(ArrayType::get(IRB.getInt8Ty(), L.FrameSize),
1627 nullptr, "MyAlloca");
1628 assert(Alloca->isStaticAlloca());
1630 assert((ClRealignStack & (ClRealignStack - 1)) == 0);
1631 size_t FrameAlignment = std::max(L.FrameAlignment, (size_t)ClRealignStack);
1632 Alloca->setAlignment(FrameAlignment);
1633 return IRB.CreatePointerCast(Alloca, IntptrTy);
1636 void FunctionStackPoisoner::poisonStack() {
1637 assert(AllocaVec.size() > 0 || DynamicAllocaVec.size() > 0);
1639 if (ClInstrumentAllocas)
1640 // Handle dynamic allocas.
1641 for (auto &AllocaCall : DynamicAllocaVec)
1642 handleDynamicAllocaCall(AllocaCall);
1644 if (AllocaVec.size() == 0) return;
1646 int StackMallocIdx = -1;
1647 DebugLoc EntryDebugLocation = getFunctionEntryDebugLocation(F);
1649 Instruction *InsBefore = AllocaVec[0];
1650 IRBuilder<> IRB(InsBefore);
1651 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1653 SmallVector<ASanStackVariableDescription, 16> SVD;
1654 SVD.reserve(AllocaVec.size());
1655 for (AllocaInst *AI : AllocaVec) {
1656 ASanStackVariableDescription D = { AI->getName().data(),
1657 getAllocaSizeInBytes(AI),
1658 AI->getAlignment(), AI, 0};
1661 // Minimal header size (left redzone) is 4 pointers,
1662 // i.e. 32 bytes on 64-bit platforms and 16 bytes in 32-bit platforms.
1663 size_t MinHeaderSize = ASan.LongSize / 2;
1664 ASanStackFrameLayout L;
1665 ComputeASanStackFrameLayout(SVD, 1UL << Mapping.Scale, MinHeaderSize, &L);
1666 DEBUG(dbgs() << L.DescriptionString << " --- " << L.FrameSize << "\n");
1667 uint64_t LocalStackSize = L.FrameSize;
1668 bool DoStackMalloc =
1669 ClUseAfterReturn && LocalStackSize <= kMaxStackMallocSize;
1670 // Don't do dynamic alloca in presence of inline asm: too often it
1671 // makes assumptions on which registers are available.
1672 bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
1674 Value *StaticAlloca =
1675 DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
1678 Value *LocalStackBase;
1680 if (DoStackMalloc) {
1681 // void *FakeStack = __asan_option_detect_stack_use_after_return
1682 // ? __asan_stack_malloc_N(LocalStackSize)
1684 // void *LocalStackBase = (FakeStack) ? FakeStack : alloca(LocalStackSize);
1685 Constant *OptionDetectUAR = F.getParent()->getOrInsertGlobal(
1686 kAsanOptionDetectUAR, IRB.getInt32Ty());
1687 Value *UARIsEnabled =
1688 IRB.CreateICmpNE(IRB.CreateLoad(OptionDetectUAR),
1689 Constant::getNullValue(IRB.getInt32Ty()));
1691 SplitBlockAndInsertIfThen(UARIsEnabled, InsBefore, false);
1692 IRBuilder<> IRBIf(Term);
1693 IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
1694 StackMallocIdx = StackMallocSizeClass(LocalStackSize);
1695 assert(StackMallocIdx <= kMaxAsanStackMallocSizeClass);
1696 Value *FakeStackValue =
1697 IRBIf.CreateCall(AsanStackMallocFunc[StackMallocIdx],
1698 ConstantInt::get(IntptrTy, LocalStackSize));
1699 IRB.SetInsertPoint(InsBefore);
1700 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1701 FakeStack = createPHI(IRB, UARIsEnabled, FakeStackValue, Term,
1702 ConstantInt::get(IntptrTy, 0));
1704 Value *NoFakeStack =
1705 IRB.CreateICmpEQ(FakeStack, Constant::getNullValue(IntptrTy));
1706 Term = SplitBlockAndInsertIfThen(NoFakeStack, InsBefore, false);
1707 IRBIf.SetInsertPoint(Term);
1708 IRBIf.SetCurrentDebugLocation(EntryDebugLocation);
1709 Value *AllocaValue =
1710 DoDynamicAlloca ? createAllocaForLayout(IRBIf, L, true) : StaticAlloca;
1711 IRB.SetInsertPoint(InsBefore);
1712 IRB.SetCurrentDebugLocation(EntryDebugLocation);
1713 LocalStackBase = createPHI(IRB, NoFakeStack, AllocaValue, Term, FakeStack);
1715 // void *FakeStack = nullptr;
1716 // void *LocalStackBase = alloca(LocalStackSize);
1717 FakeStack = ConstantInt::get(IntptrTy, 0);
1719 DoDynamicAlloca ? createAllocaForLayout(IRB, L, true) : StaticAlloca;
1722 // Insert poison calls for lifetime intrinsics for alloca.
1723 bool HavePoisonedAllocas = false;
1724 for (const auto &APC : AllocaPoisonCallVec) {
1725 assert(APC.InsBefore);
1727 IRBuilder<> IRB(APC.InsBefore);
1728 poisonAlloca(APC.AI, APC.Size, IRB, APC.DoPoison);
1729 HavePoisonedAllocas |= APC.DoPoison;
1732 // Replace Alloca instructions with base+offset.
1733 for (const auto &Desc : SVD) {
1734 AllocaInst *AI = Desc.AI;
1735 Value *NewAllocaPtr = IRB.CreateIntToPtr(
1736 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Desc.Offset)),
1738 replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB);
1739 AI->replaceAllUsesWith(NewAllocaPtr);
1742 // The left-most redzone has enough space for at least 4 pointers.
1743 // Write the Magic value to redzone[0].
1744 Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy);
1745 IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic),
1747 // Write the frame description constant to redzone[1].
1748 Value *BasePlus1 = IRB.CreateIntToPtr(
1749 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)),
1751 GlobalVariable *StackDescriptionGlobal =
1752 createPrivateGlobalForString(*F.getParent(), L.DescriptionString,
1753 /*AllowMerging*/true);
1754 Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal,
1756 IRB.CreateStore(Description, BasePlus1);
1757 // Write the PC to redzone[2].
1758 Value *BasePlus2 = IRB.CreateIntToPtr(
1759 IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy,
1760 2 * ASan.LongSize/8)),
1762 IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2);
1764 // Poison the stack redzones at the entry.
1765 Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB);
1766 poisonRedZones(L.ShadowBytes, IRB, ShadowBase, true);
1768 // (Un)poison the stack before all ret instructions.
1769 for (auto Ret : RetVec) {
1770 IRBuilder<> IRBRet(Ret);
1771 // Mark the current frame as retired.
1772 IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic),
1774 if (DoStackMalloc) {
1775 assert(StackMallocIdx >= 0);
1776 // if FakeStack != 0 // LocalStackBase == FakeStack
1777 // // In use-after-return mode, poison the whole stack frame.
1778 // if StackMallocIdx <= 4
1779 // // For small sizes inline the whole thing:
1780 // memset(ShadowBase, kAsanStackAfterReturnMagic, ShadowSize);
1781 // **SavedFlagPtr(FakeStack) = 0
1783 // __asan_stack_free_N(FakeStack, LocalStackSize)
1785 // <This is not a fake stack; unpoison the redzones>
1787 IRBRet.CreateICmpNE(FakeStack, Constant::getNullValue(IntptrTy));
1788 TerminatorInst *ThenTerm, *ElseTerm;
1789 SplitBlockAndInsertIfThenElse(Cmp, Ret, &ThenTerm, &ElseTerm);
1791 IRBuilder<> IRBPoison(ThenTerm);
1792 if (StackMallocIdx <= 4) {
1793 int ClassSize = kMinStackMallocSize << StackMallocIdx;
1794 SetShadowToStackAfterReturnInlined(IRBPoison, ShadowBase,
1795 ClassSize >> Mapping.Scale);
1796 Value *SavedFlagPtrPtr = IRBPoison.CreateAdd(
1798 ConstantInt::get(IntptrTy, ClassSize - ASan.LongSize / 8));
1799 Value *SavedFlagPtr = IRBPoison.CreateLoad(
1800 IRBPoison.CreateIntToPtr(SavedFlagPtrPtr, IntptrPtrTy));
1801 IRBPoison.CreateStore(
1802 Constant::getNullValue(IRBPoison.getInt8Ty()),
1803 IRBPoison.CreateIntToPtr(SavedFlagPtr, IRBPoison.getInt8PtrTy()));
1805 // For larger frames call __asan_stack_free_*.
1806 IRBPoison.CreateCall2(AsanStackFreeFunc[StackMallocIdx], FakeStack,
1807 ConstantInt::get(IntptrTy, LocalStackSize));
1810 IRBuilder<> IRBElse(ElseTerm);
1811 poisonRedZones(L.ShadowBytes, IRBElse, ShadowBase, false);
1812 } else if (HavePoisonedAllocas) {
1813 // If we poisoned some allocas in llvm.lifetime analysis,
1814 // unpoison whole stack frame now.
1815 poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false);
1817 poisonRedZones(L.ShadowBytes, IRBRet, ShadowBase, false);
1821 if (ClInstrumentAllocas)
1822 // Unpoison dynamic allocas.
1823 for (auto &AllocaCall : DynamicAllocaVec)
1824 unpoisonDynamicAlloca(AllocaCall);
1826 // We are done. Remove the old unused alloca instructions.
1827 for (auto AI : AllocaVec)
1828 AI->eraseFromParent();
1831 void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
1832 IRBuilder<> &IRB, bool DoPoison) {
1833 // For now just insert the call to ASan runtime.
1834 Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy);
1835 Value *SizeArg = ConstantInt::get(IntptrTy, Size);
1836 IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc
1837 : AsanUnpoisonStackMemoryFunc,
1841 // Handling llvm.lifetime intrinsics for a given %alloca:
1842 // (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca.
1843 // (2) if %size is constant, poison memory for llvm.lifetime.end (to detect
1844 // invalid accesses) and unpoison it for llvm.lifetime.start (the memory
1845 // could be poisoned by previous llvm.lifetime.end instruction, as the
1846 // variable may go in and out of scope several times, e.g. in loops).
1847 // (3) if we poisoned at least one %alloca in a function,
1848 // unpoison the whole stack frame at function exit.
1850 AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
1851 if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
1852 // We're intested only in allocas we can handle.
1853 return isInterestingAlloca(*AI) ? AI : nullptr;
1854 // See if we've already calculated (or started to calculate) alloca for a
1856 AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
1857 if (I != AllocaForValue.end())
1859 // Store 0 while we're calculating alloca for value V to avoid
1860 // infinite recursion if the value references itself.
1861 AllocaForValue[V] = nullptr;
1862 AllocaInst *Res = nullptr;
1863 if (CastInst *CI = dyn_cast<CastInst>(V))
1864 Res = findAllocaForValue(CI->getOperand(0));
1865 else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1866 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1867 Value *IncValue = PN->getIncomingValue(i);
1868 // Allow self-referencing phi-nodes.
1869 if (IncValue == PN) continue;
1870 AllocaInst *IncValueAI = findAllocaForValue(IncValue);
1871 // AI for incoming values should exist and should all be equal.
1872 if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
1878 AllocaForValue[V] = Res;
1882 // Compute PartialRzMagic for dynamic alloca call. PartialRzMagic is
1883 // constructed from two separate 32-bit numbers: PartialRzMagic = Val1 | Val2.
1884 // (1) Val1 is resposible for forming base value for PartialRzMagic, containing
1885 // only 00 for fully addressable and 0xcb for fully poisoned bytes for each
1886 // 8-byte chunk of user memory respectively.
1887 // (2) Val2 forms the value for marking first poisoned byte in shadow memory
1888 // with appropriate value (0x01 - 0x07 or 0xcb if Padding % 8 == 0).
1890 // Shift = Padding & ~7; // the number of bits we need to shift to access first
1891 // chunk in shadow memory, containing nonzero bytes.
1893 // Padding = 21 Padding = 16
1894 // Shadow: |00|00|05|cb| Shadow: |00|00|cb|cb|
1897 // Shift = 21 & ~7 = 16 Shift = 16 & ~7 = 16
1899 // Val1 = 0xcbcbcbcb << Shift;
1900 // PartialBits = Padding ? Padding & 7 : 0xcb;
1901 // Val2 = PartialBits << Shift;
1902 // Result = Val1 | Val2;
1903 Value *FunctionStackPoisoner::computePartialRzMagic(Value *PartialSize,
1905 PartialSize = IRB.CreateIntCast(PartialSize, IRB.getInt32Ty(), false);
1906 Value *Shift = IRB.CreateAnd(PartialSize, IRB.getInt32(~7));
1907 unsigned Val1Int = kAsanAllocaPartialVal1;
1908 unsigned Val2Int = kAsanAllocaPartialVal2;
1909 if (!ASan.DL->isLittleEndian()) {
1910 Val1Int = sys::getSwappedBytes(Val1Int);
1911 Val2Int = sys::getSwappedBytes(Val2Int);
1913 Value *Val1 = shiftAllocaMagic(IRB.getInt32(Val1Int), IRB, Shift);
1914 Value *PartialBits = IRB.CreateAnd(PartialSize, IRB.getInt32(7));
1915 // For BigEndian get 0x000000YZ -> 0xYZ000000.
1916 if (ASan.DL->isBigEndian())
1917 PartialBits = IRB.CreateShl(PartialBits, IRB.getInt32(24));
1918 Value *Val2 = IRB.getInt32(Val2Int);
1920 IRB.CreateICmpNE(PartialBits, Constant::getNullValue(IRB.getInt32Ty()));
1921 Val2 = IRB.CreateSelect(Cond, shiftAllocaMagic(PartialBits, IRB, Shift),
1922 shiftAllocaMagic(Val2, IRB, Shift));
1923 return IRB.CreateOr(Val1, Val2);
1926 void FunctionStackPoisoner::handleDynamicAllocaCall(
1927 DynamicAllocaCall &AllocaCall) {
1928 AllocaInst *AI = AllocaCall.AI;
1929 if (!doesDominateAllExits(AI)) {
1930 // We do not yet handle complex allocas
1931 AllocaCall.Poison = false;
1935 IRBuilder<> IRB(AI);
1937 PointerType *Int32PtrTy = PointerType::getUnqual(IRB.getInt32Ty());
1938 const unsigned Align = std::max(kAllocaRzSize, AI->getAlignment());
1939 const uint64_t AllocaRedzoneMask = kAllocaRzSize - 1;
1941 Value *Zero = Constant::getNullValue(IntptrTy);
1942 Value *AllocaRzSize = ConstantInt::get(IntptrTy, kAllocaRzSize);
1943 Value *AllocaRzMask = ConstantInt::get(IntptrTy, AllocaRedzoneMask);
1944 Value *NotAllocaRzMask = ConstantInt::get(IntptrTy, ~AllocaRedzoneMask);
1946 // Since we need to extend alloca with additional memory to locate
1947 // redzones, and OldSize is number of allocated blocks with
1948 // ElementSize size, get allocated memory size in bytes by
1949 // OldSize * ElementSize.
1950 unsigned ElementSize = ASan.DL->getTypeAllocSize(AI->getAllocatedType());
1951 Value *OldSize = IRB.CreateMul(AI->getArraySize(),
1952 ConstantInt::get(IntptrTy, ElementSize));
1954 // PartialSize = OldSize % 32
1955 Value *PartialSize = IRB.CreateAnd(OldSize, AllocaRzMask);
1957 // Misalign = kAllocaRzSize - PartialSize;
1958 Value *Misalign = IRB.CreateSub(AllocaRzSize, PartialSize);
1960 // PartialPadding = Misalign != kAllocaRzSize ? Misalign : 0;
1961 Value *Cond = IRB.CreateICmpNE(Misalign, AllocaRzSize);
1962 Value *PartialPadding = IRB.CreateSelect(Cond, Misalign, Zero);
1964 // AdditionalChunkSize = Align + PartialPadding + kAllocaRzSize
1965 // Align is added to locate left redzone, PartialPadding for possible
1966 // partial redzone and kAllocaRzSize for right redzone respectively.
1967 Value *AdditionalChunkSize = IRB.CreateAdd(
1968 ConstantInt::get(IntptrTy, Align + kAllocaRzSize), PartialPadding);
1970 Value *NewSize = IRB.CreateAdd(OldSize, AdditionalChunkSize);
1972 // Insert new alloca with new NewSize and Align params.
1973 AllocaInst *NewAlloca = IRB.CreateAlloca(IRB.getInt8Ty(), NewSize);
1974 NewAlloca->setAlignment(Align);
1976 // NewAddress = Address + Align
1977 Value *NewAddress = IRB.CreateAdd(IRB.CreatePtrToInt(NewAlloca, IntptrTy),
1978 ConstantInt::get(IntptrTy, Align));
1980 Value *NewAddressPtr = IRB.CreateIntToPtr(NewAddress, AI->getType());
1982 // LeftRzAddress = NewAddress - kAllocaRzSize
1983 Value *LeftRzAddress = IRB.CreateSub(NewAddress, AllocaRzSize);
1985 // Poisoning left redzone.
1986 AllocaCall.LeftRzAddr = ASan.memToShadow(LeftRzAddress, IRB);
1987 IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaLeftMagic),
1988 IRB.CreateIntToPtr(AllocaCall.LeftRzAddr, Int32PtrTy));
1990 // PartialRzAligned = PartialRzAddr & ~AllocaRzMask
1991 Value *PartialRzAddr = IRB.CreateAdd(NewAddress, OldSize);
1992 Value *PartialRzAligned = IRB.CreateAnd(PartialRzAddr, NotAllocaRzMask);
1994 // Poisoning partial redzone.
1995 Value *PartialRzMagic = computePartialRzMagic(PartialSize, IRB);
1996 Value *PartialRzShadowAddr = ASan.memToShadow(PartialRzAligned, IRB);
1997 IRB.CreateStore(PartialRzMagic,
1998 IRB.CreateIntToPtr(PartialRzShadowAddr, Int32PtrTy));
2001 // = (PartialRzAddr + AllocaRzMask) & ~AllocaRzMask
2002 Value *RightRzAddress = IRB.CreateAnd(
2003 IRB.CreateAdd(PartialRzAddr, AllocaRzMask), NotAllocaRzMask);
2005 // Poisoning right redzone.
2006 AllocaCall.RightRzAddr = ASan.memToShadow(RightRzAddress, IRB);
2007 IRB.CreateStore(ConstantInt::get(IRB.getInt32Ty(), kAsanAllocaRightMagic),
2008 IRB.CreateIntToPtr(AllocaCall.RightRzAddr, Int32PtrTy));
2010 // Replace all uses of AddessReturnedByAlloca with NewAddress.
2011 AI->replaceAllUsesWith(NewAddressPtr);
2013 // We are done. Erase old alloca and store left, partial and right redzones
2014 // shadow addresses for future unpoisoning.
2015 AI->eraseFromParent();
2016 NumInstrumentedDynamicAllocas++;