X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FInstrumentation%2FAddressSanitizer.cpp;h=22851b46e8ac699122b0c1caca8f0b0bf6d323cb;hb=3e1d45bf44f882f3ee139d452dd50305d831a341;hp=375a2ae7b0ad787e38f4c03b88179e524f3841e9;hpb=ce718ff9f42c7da092eaa01dd0242e8d5ba84713;p=oota-llvm.git diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 375a2ae7b0a..22851b46e8a 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -15,40 +15,48 @@ #define DEBUG_TYPE "asan" -#include "FunctionBlackList.h" +#include "llvm/Transforms/Instrumentation.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/DepthFirstIterator.h" #include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/Triple.h" -#include "llvm/Function.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/LLVMContext.h" -#include "llvm/Module.h" +#include "llvm/DIBuilder.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InlineAsm.h" +#include "llvm/IR/IntrinsicInst.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Type.h" +#include "llvm/InstVisitor.h" +#include "llvm/Support/CallSite.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/IRBuilder.h" +#include "llvm/Support/Endian.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/system_error.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/Transforms/Utils/BlackList.h" +#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" -#include "llvm/Type.h" - -#include #include +#include using namespace llvm; static const uint64_t kDefaultShadowScale = 3; static const uint64_t kDefaultShadowOffset32 = 1ULL << 29; static const uint64_t kDefaultShadowOffset64 = 1ULL << 44; -static const uint64_t kDefaultShadowOffsetAndroid = 0; +static const uint64_t kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G. +static const uint64_t kPPC64_ShadowOffset64 = 1ULL << 41; +static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa8000; static const size_t kMaxStackMallocSize = 1 << 16; // 64K static const uintptr_t kCurrentStackFrameMagic = 0x41B58AB3; @@ -58,20 +66,31 @@ static const char *kAsanModuleCtorName = "asan.module_ctor"; static const char *kAsanModuleDtorName = "asan.module_dtor"; static const int kAsanCtorAndCtorPriority = 1; static const char *kAsanReportErrorTemplate = "__asan_report_"; +static const char *kAsanReportLoadN = "__asan_report_load_n"; +static const char *kAsanReportStoreN = "__asan_report_store_n"; static const char *kAsanRegisterGlobalsName = "__asan_register_globals"; static const char *kAsanUnregisterGlobalsName = "__asan_unregister_globals"; -static const char *kAsanInitName = "__asan_init"; +static const char *kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; +static const char *kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; +static const char *kAsanInitName = "__asan_init_v3"; static const char *kAsanHandleNoReturnName = "__asan_handle_no_return"; static const char *kAsanMappingOffsetName = "__asan_mapping_offset"; static const char *kAsanMappingScaleName = "__asan_mapping_scale"; static const char *kAsanStackMallocName = "__asan_stack_malloc"; static const char *kAsanStackFreeName = "__asan_stack_free"; +static const char *kAsanGenPrefix = "__asan_gen_"; +static const char *kAsanPoisonStackMemoryName = "__asan_poison_stack_memory"; +static const char *kAsanUnpoisonStackMemoryName = + "__asan_unpoison_stack_memory"; static const int kAsanStackLeftRedzoneMagic = 0xf1; static const int kAsanStackMidRedzoneMagic = 0xf2; static const int kAsanStackRightRedzoneMagic = 0xf3; static const int kAsanStackPartialRedzoneMagic = 0xf4; +// Accesses sizes are powers of two: 1, 2, 4, 8, 16. +static const size_t kNumberOfAccessSizes = 5; + // Command-line flags. // This flag may need to be replaced with -f[no-]asan-reads. @@ -82,6 +101,17 @@ static cl::opt ClInstrumentWrites("asan-instrument-writes", static cl::opt ClInstrumentAtomics("asan-instrument-atomics", cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden, cl::init(true)); +static cl::opt ClAlwaysSlowPath("asan-always-slow-path", + cl::desc("use instrumentation with slow path for all accesses"), + cl::Hidden, cl::init(false)); +// This flag limits the number of instructions to be instrumented +// in any given BB. Normally, this should be set to unlimited (INT_MAX), +// but due to http://llvm.org/bugs/show_bug.cgi?id=12652 we temporary +// set it to 10000. +static cl::opt ClMaxInsnsToInstrumentPerBB("asan-max-ins-per-bb", + cl::init(10000), + cl::desc("maximal number of instructions to instrument in any given BB"), + cl::Hidden); // This flag may need to be replaced with -f[no]asan-stack. static cl::opt ClStack("asan-stack", cl::desc("Handle stack memory"), cl::Hidden, cl::init(true)); @@ -91,11 +121,14 @@ static cl::opt ClUseAfterReturn("asan-use-after-return", // This flag may need to be replaced with -f[no]asan-globals. static cl::opt ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true)); +static cl::opt ClInitializers("asan-initialization-order", + cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false)); static cl::opt ClMemIntrin("asan-memintrin", cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true)); -// This flag may need to be replaced with -fasan-blacklist. -static cl::opt ClBlackListFile("asan-blacklist", - cl::desc("File containing the list of functions to ignore " +static cl::opt ClRealignStack("asan-realign-stack", + cl::desc("Realign stack to 32"), cl::Hidden, cl::init(true)); +static cl::opt ClBlacklistFile("asan-blacklist", + cl::desc("File containing the list of objects to ignore " "during instrumentation"), cl::Hidden); // These flags allow to change the shadow mapping. @@ -105,6 +138,9 @@ static cl::opt ClMappingScale("asan-mapping-scale", cl::desc("scale of asan shadow mapping"), cl::Hidden, cl::init(0)); static cl::opt ClMappingOffsetLog("asan-mapping-offset-log", cl::desc("offset of asan shadow mapping"), cl::Hidden, cl::init(-1)); +static cl::opt ClShort64BitOffset("asan-short-64bit-mapping-offset", + cl::desc("Use short immediate constant as the mapping offset for 64bit"), + cl::Hidden, cl::init(true)); // Optimization flags. Not user visible, used mostly for testing // and benchmarking the tool. @@ -116,6 +152,10 @@ static cl::opt ClOptSameTemp("asan-opt-same-temp", static cl::opt ClOptGlobals("asan-opt-globals", cl::desc("Don't instrument scalar globals"), cl::Hidden, cl::init(true)); +static cl::opt ClCheckLifetime("asan-check-lifetime", + cl::desc("Use llvm.lifetime intrinsics to insert extra checks"), + cl::Hidden, cl::init(false)); + // Debug flags. static cl::opt ClDebug("asan-debug", cl::desc("debug"), cl::Hidden, cl::init(0)); @@ -129,149 +169,414 @@ static cl::opt ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), cl::Hidden, cl::init(-1)); namespace { +/// A set of dynamically initialized globals extracted from metadata. +class SetOfDynamicallyInitializedGlobals { + public: + void Init(Module& M) { + // Clang generates metadata identifying all dynamically initialized globals. + NamedMDNode *DynamicGlobals = + M.getNamedMetadata("llvm.asan.dynamically_initialized_globals"); + if (!DynamicGlobals) + return; + for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) { + MDNode *MDN = DynamicGlobals->getOperand(i); + assert(MDN->getNumOperands() == 1); + Value *VG = MDN->getOperand(0); + // The optimizer may optimize away a global entirely, in which case we + // cannot instrument access to it. + if (!VG) + continue; + DynInitGlobals.insert(cast(VG)); + } + } + bool Contains(GlobalVariable *G) { return DynInitGlobals.count(G) != 0; } + private: + SmallSet DynInitGlobals; +}; + +/// This struct defines the shadow mapping using the rule: +/// shadow = (mem >> Scale) ADD-or-OR Offset. +struct ShadowMapping { + int Scale; + uint64_t Offset; + bool OrShadowOffset; +}; + +static ShadowMapping getShadowMapping(const Module &M, int LongSize, + bool ZeroBaseShadow) { + llvm::Triple TargetTriple(M.getTargetTriple()); + bool IsAndroid = TargetTriple.getEnvironment() == llvm::Triple::Android; + bool IsMacOSX = TargetTriple.getOS() == llvm::Triple::MacOSX; + bool IsPPC64 = TargetTriple.getArch() == llvm::Triple::ppc64; + bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64; + bool IsMIPS32 = TargetTriple.getArch() == llvm::Triple::mips || + TargetTriple.getArch() == llvm::Triple::mipsel; + + ShadowMapping Mapping; + + // OR-ing shadow offset if more efficient (at least on x86), + // but on ppc64 we have to use add since the shadow offset is not neccesary + // 1/8-th of the address space. + Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset; + + Mapping.Offset = (IsAndroid || ZeroBaseShadow) ? 0 : + (LongSize == 32 ? + (IsMIPS32 ? kMIPS32_ShadowOffset32 : kDefaultShadowOffset32) : + IsPPC64 ? kPPC64_ShadowOffset64 : kDefaultShadowOffset64); + if (!ZeroBaseShadow && ClShort64BitOffset && IsX86_64 && !IsMacOSX) { + assert(LongSize == 64); + Mapping.Offset = kDefaultShort64bitShadowOffset; + } + if (!ZeroBaseShadow && ClMappingOffsetLog >= 0) { + // Zero offset log is the special case. + Mapping.Offset = (ClMappingOffsetLog == 0) ? 0 : 1ULL << ClMappingOffsetLog; + } + + Mapping.Scale = kDefaultShadowScale; + if (ClMappingScale) { + Mapping.Scale = ClMappingScale; + } + + return Mapping; +} + +static size_t RedzoneSizeForScale(int MappingScale) { + // Redzone used for stack and globals is at least 32 bytes. + // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. + return std::max(32U, 1U << MappingScale); +} /// AddressSanitizer: instrument the code in module to find memory bugs. -struct AddressSanitizer : public ModulePass { - AddressSanitizer(); - virtual const char *getPassName() const; +struct AddressSanitizer : public FunctionPass { + AddressSanitizer(bool CheckInitOrder = true, + bool CheckUseAfterReturn = false, + bool CheckLifetime = false, + StringRef BlacklistFile = StringRef(), + bool ZeroBaseShadow = false) + : FunctionPass(ID), + CheckInitOrder(CheckInitOrder || ClInitializers), + CheckUseAfterReturn(CheckUseAfterReturn || ClUseAfterReturn), + CheckLifetime(CheckLifetime || ClCheckLifetime), + BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile + : BlacklistFile), + ZeroBaseShadow(ZeroBaseShadow) {} + virtual const char *getPassName() const { + return "AddressSanitizerFunctionPass"; + } void instrumentMop(Instruction *I); - void instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB, - Value *Addr, uint32_t TypeSize, bool IsWrite); - Instruction *generateCrashCode(IRBuilder<> &IRB, Value *Addr, - bool IsWrite, uint32_t TypeSize); + void instrumentAddress(Instruction *OrigIns, Instruction *InsertBefore, + Value *Addr, uint32_t TypeSize, bool IsWrite, + Value *SizeArgument); + Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, + Value *ShadowValue, uint32_t TypeSize); + Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, + bool IsWrite, size_t AccessSizeIndex, + Value *SizeArgument); bool instrumentMemIntrinsic(MemIntrinsic *MI); void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr, - Value *Size, + Value *Size, Instruction *InsertBefore, bool IsWrite); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); - bool handleFunction(Module &M, Function &F); + bool runOnFunction(Function &F); bool maybeInsertAsanInitAtFunctionEntry(Function &F); - bool poisonStackInFunction(Module &M, Function &F); - virtual bool runOnModule(Module &M); - bool insertGlobalRedzones(Module &M); - BranchInst *splitBlockAndInsertIfThen(Instruction *SplitBefore, Value *Cmp); + void emitShadowMapping(Module &M, IRBuilder<> &IRB) const; + virtual bool doInitialization(Module &M); + static char ID; // Pass identification, replacement for typeid + + private: + void initializeCallbacks(Module &M); + + bool ShouldInstrumentGlobal(GlobalVariable *G); + bool LooksLikeCodeInBug11395(Instruction *I); + void FindDynamicInitializers(Module &M); + + bool CheckInitOrder; + bool CheckUseAfterReturn; + bool CheckLifetime; + SmallString<64> BlacklistFile; + bool ZeroBaseShadow; + + LLVMContext *C; + DataLayout *TD; + int LongSize; + Type *IntptrTy; + ShadowMapping Mapping; + Function *AsanCtorFunction; + Function *AsanInitFunction; + Function *AsanHandleNoReturnFunc; + OwningPtr BL; + // This array is indexed by AccessIsWrite and log2(AccessSize). + Function *AsanErrorCallback[2][kNumberOfAccessSizes]; + // This array is indexed by AccessIsWrite. + Function *AsanErrorCallbackSized[2]; + InlineAsm *EmptyAsm; + SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals; + + friend struct FunctionStackPoisoner; +}; + +class AddressSanitizerModule : public ModulePass { + public: + AddressSanitizerModule(bool CheckInitOrder = true, + StringRef BlacklistFile = StringRef(), + bool ZeroBaseShadow = false) + : ModulePass(ID), + CheckInitOrder(CheckInitOrder || ClInitializers), + BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile + : BlacklistFile), + ZeroBaseShadow(ZeroBaseShadow) {} + bool runOnModule(Module &M); static char ID; // Pass identification, replacement for typeid + virtual const char *getPassName() const { + return "AddressSanitizerModule"; + } private: + void initializeCallbacks(Module &M); + + bool ShouldInstrumentGlobal(GlobalVariable *G); + void createInitializerPoisonCalls(Module &M, GlobalValue *ModuleName); + size_t RedzoneSize() const { + return RedzoneSizeForScale(Mapping.Scale); + } + + bool CheckInitOrder; + SmallString<64> BlacklistFile; + bool ZeroBaseShadow; + + OwningPtr BL; + SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals; + Type *IntptrTy; + LLVMContext *C; + DataLayout *TD; + ShadowMapping Mapping; + Function *AsanPoisonGlobals; + Function *AsanUnpoisonGlobals; + Function *AsanRegisterGlobals; + Function *AsanUnregisterGlobals; +}; + +// Stack poisoning does not play well with exception handling. +// When an exception is thrown, we essentially bypass the code +// that unpoisones the stack. This is why the run-time library has +// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire +// stack in the interceptor. This however does not work inside the +// actual function which catches the exception. Most likely because the +// compiler hoists the load of the shadow value somewhere too high. +// This causes asan to report a non-existing bug on 453.povray. +// It sounds like an LLVM bug. +struct FunctionStackPoisoner : public InstVisitor { + Function &F; + AddressSanitizer &ASan; + DIBuilder DIB; + LLVMContext *C; + Type *IntptrTy; + Type *IntptrPtrTy; + ShadowMapping Mapping; + + SmallVector AllocaVec; + SmallVector RetVec; + uint64_t TotalStackSize; + unsigned StackAlignment; + + Function *AsanStackMallocFunc, *AsanStackFreeFunc; + Function *AsanPoisonStackMemoryFunc, *AsanUnpoisonStackMemoryFunc; + + // Stores a place and arguments of poisoning/unpoisoning call for alloca. + struct AllocaPoisonCall { + IntrinsicInst *InsBefore; + uint64_t Size; + bool DoPoison; + }; + SmallVector AllocaPoisonCallVec; + + // Maps Value to an AllocaInst from which the Value is originated. + typedef DenseMap AllocaForValueMapTy; + AllocaForValueMapTy AllocaForValue; + + FunctionStackPoisoner(Function &F, AddressSanitizer &ASan) + : F(F), ASan(ASan), DIB(*F.getParent()), C(ASan.C), + IntptrTy(ASan.IntptrTy), IntptrPtrTy(PointerType::get(IntptrTy, 0)), + Mapping(ASan.Mapping), + TotalStackSize(0), StackAlignment(1 << Mapping.Scale) {} + + bool runOnFunction() { + if (!ClStack) return false; + // Collect alloca, ret, lifetime instructions etc. + for (df_iterator DI = df_begin(&F.getEntryBlock()), + DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) { + BasicBlock *BB = *DI; + visit(*BB); + } + if (AllocaVec.empty()) return false; + + initializeCallbacks(*F.getParent()); + + poisonStack(); + + if (ClDebugStack) { + DEBUG(dbgs() << F); + } + return true; + } + + // Finds all static Alloca instructions and puts + // poisoned red zones around all of them. + // Then unpoison everything back before the function returns. + void poisonStack(); + + // ----------------------- Visitors. + /// \brief Collect all Ret instructions. + void visitReturnInst(ReturnInst &RI) { + RetVec.push_back(&RI); + } + + /// \brief Collect Alloca instructions we want (and can) handle. + void visitAllocaInst(AllocaInst &AI) { + if (!isInterestingAlloca(AI)) return; + + StackAlignment = std::max(StackAlignment, AI.getAlignment()); + AllocaVec.push_back(&AI); + uint64_t AlignedSize = getAlignedAllocaSize(&AI); + TotalStackSize += AlignedSize; + } + + /// \brief Collect lifetime intrinsic calls to check for use-after-scope + /// errors. + void visitIntrinsicInst(IntrinsicInst &II) { + if (!ASan.CheckLifetime) return; + Intrinsic::ID ID = II.getIntrinsicID(); + if (ID != Intrinsic::lifetime_start && + ID != Intrinsic::lifetime_end) + return; + // Found lifetime intrinsic, add ASan instrumentation if necessary. + ConstantInt *Size = dyn_cast(II.getArgOperand(0)); + // If size argument is undefined, don't do anything. + if (Size->isMinusOne()) return; + // Check that size doesn't saturate uint64_t and can + // be stored in IntptrTy. + const uint64_t SizeValue = Size->getValue().getLimitedValue(); + if (SizeValue == ~0ULL || + !ConstantInt::isValueValidForType(IntptrTy, SizeValue)) + return; + // Find alloca instruction that corresponds to llvm.lifetime argument. + AllocaInst *AI = findAllocaForValue(II.getArgOperand(1)); + if (!AI) return; + bool DoPoison = (ID == Intrinsic::lifetime_end); + AllocaPoisonCall APC = {&II, SizeValue, DoPoison}; + AllocaPoisonCallVec.push_back(APC); + } + + // ---------------------- Helpers. + void initializeCallbacks(Module &M); + + // Check if we want (and can) handle this alloca. + bool isInterestingAlloca(AllocaInst &AI) { + return (!AI.isArrayAllocation() && + AI.isStaticAlloca() && + AI.getAllocatedType()->isSized()); + } + size_t RedzoneSize() const { + return RedzoneSizeForScale(Mapping.Scale); + } uint64_t getAllocaSizeInBytes(AllocaInst *AI) { Type *Ty = AI->getAllocatedType(); - uint64_t SizeInBytes = TD->getTypeAllocSize(Ty); + uint64_t SizeInBytes = ASan.TD->getTypeAllocSize(Ty); return SizeInBytes; } uint64_t getAlignedSize(uint64_t SizeInBytes) { - return ((SizeInBytes + RedzoneSize - 1) - / RedzoneSize) * RedzoneSize; + size_t RZ = RedzoneSize(); + return ((SizeInBytes + RZ - 1) / RZ) * RZ; } uint64_t getAlignedAllocaSize(AllocaInst *AI) { uint64_t SizeInBytes = getAllocaSizeInBytes(AI); return getAlignedSize(SizeInBytes); } - - Function *checkInterfaceFunction(Constant *FuncOrBitcast); - void PoisonStack(const ArrayRef &AllocaVec, IRBuilder<> IRB, - Value *ShadowBase, bool DoPoison); - bool LooksLikeCodeInBug11395(Instruction *I); - - Module *CurrentModule; - LLVMContext *C; - TargetData *TD; - uint64_t MappingOffset; - int MappingScale; - size_t RedzoneSize; - int LongSize; - Type *IntptrTy; - Type *IntptrPtrTy; - Function *AsanCtorFunction; - Function *AsanInitFunction; - Instruction *CtorInsertBefore; - OwningPtr BL; + /// Finds alloca where the value comes from. + AllocaInst *findAllocaForValue(Value *V); + void poisonRedZones(const ArrayRef &AllocaVec, IRBuilder<> IRB, + Value *ShadowBase, bool DoPoison); + void poisonAlloca(Value *V, uint64_t Size, IRBuilder<> IRB, bool DoPoison); }; + } // namespace char AddressSanitizer::ID = 0; INITIALIZE_PASS(AddressSanitizer, "asan", "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, false) -AddressSanitizer::AddressSanitizer() : ModulePass(ID) { } -ModulePass *llvm::createAddressSanitizerPass() { - return new AddressSanitizer(); +FunctionPass *llvm::createAddressSanitizerFunctionPass( + bool CheckInitOrder, bool CheckUseAfterReturn, bool CheckLifetime, + StringRef BlacklistFile, bool ZeroBaseShadow) { + return new AddressSanitizer(CheckInitOrder, CheckUseAfterReturn, + CheckLifetime, BlacklistFile, ZeroBaseShadow); } -const char *AddressSanitizer::getPassName() const { - return "AddressSanitizer"; +char AddressSanitizerModule::ID = 0; +INITIALIZE_PASS(AddressSanitizerModule, "asan-module", + "AddressSanitizer: detects use-after-free and out-of-bounds bugs." + "ModulePass", false, false) +ModulePass *llvm::createAddressSanitizerModulePass( + bool CheckInitOrder, StringRef BlacklistFile, bool ZeroBaseShadow) { + return new AddressSanitizerModule(CheckInitOrder, BlacklistFile, + ZeroBaseShadow); +} + +static size_t TypeSizeToSizeIndex(uint32_t TypeSize) { + size_t Res = countTrailingZeros(TypeSize / 8); + assert(Res < kNumberOfAccessSizes); + return Res; } // Create a constant for Str so that we can pass it to the run-time lib. static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) { Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str); - return new GlobalVariable(M, StrConst->getType(), true, - GlobalValue::PrivateLinkage, StrConst, ""); -} - -// Split the basic block and insert an if-then code. -// Before: -// Head -// SplitBefore -// Tail -// After: -// Head -// if (Cmp) -// NewBasicBlock -// SplitBefore -// Tail -// -// Returns the NewBasicBlock's terminator. -BranchInst *AddressSanitizer::splitBlockAndInsertIfThen( - Instruction *SplitBefore, Value *Cmp) { - BasicBlock *Head = SplitBefore->getParent(); - BasicBlock *Tail = Head->splitBasicBlock(SplitBefore); - TerminatorInst *HeadOldTerm = Head->getTerminator(); - BasicBlock *NewBasicBlock = - BasicBlock::Create(*C, "", Head->getParent()); - BranchInst *HeadNewTerm = BranchInst::Create(/*ifTrue*/NewBasicBlock, - /*ifFalse*/Tail, - Cmp); - ReplaceInstWithInst(HeadOldTerm, HeadNewTerm); - - BranchInst *CheckTerm = BranchInst::Create(Tail, NewBasicBlock); - return CheckTerm; + GlobalVariable *GV = new GlobalVariable(M, StrConst->getType(), true, + GlobalValue::PrivateLinkage, StrConst, + kAsanGenPrefix); + GV->setUnnamedAddr(true); // Ok to merge these. + GV->setAlignment(1); // Strings may not be merged w/o setting align 1. + return GV; +} + +static bool GlobalWasGeneratedByAsan(GlobalVariable *G) { + return G->getName().find(kAsanGenPrefix) == 0; } Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // Shadow >> scale - Shadow = IRB.CreateLShr(Shadow, MappingScale); - if (MappingOffset == 0) + Shadow = IRB.CreateLShr(Shadow, Mapping.Scale); + if (Mapping.Offset == 0) return Shadow; // (Shadow >> scale) | offset - return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, - MappingOffset)); + if (Mapping.OrShadowOffset) + return IRB.CreateOr(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); + else + return IRB.CreateAdd(Shadow, ConstantInt::get(IntptrTy, Mapping.Offset)); } -void AddressSanitizer::instrumentMemIntrinsicParam(Instruction *OrigIns, +void AddressSanitizer::instrumentMemIntrinsicParam( + Instruction *OrigIns, Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) { + IRBuilder<> IRB(InsertBefore); + if (Size->getType() != IntptrTy) + Size = IRB.CreateIntCast(Size, IntptrTy, false); // Check the first byte. - { - IRBuilder<> IRB(InsertBefore); - instrumentAddress(OrigIns, IRB, Addr, 8, IsWrite); - } + instrumentAddress(OrigIns, InsertBefore, Addr, 8, IsWrite, Size); // Check the last byte. - { - IRBuilder<> IRB(InsertBefore); - Value *SizeMinusOne = IRB.CreateSub( - Size, ConstantInt::get(Size->getType(), 1)); - SizeMinusOne = IRB.CreateIntCast(SizeMinusOne, IntptrTy, false); - Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); - Value *AddrPlusSizeMinisOne = IRB.CreateAdd(AddrLong, SizeMinusOne); - instrumentAddress(OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite); - } + IRB.SetInsertPoint(InsertBefore); + Value *SizeMinusOne = IRB.CreateSub(Size, ConstantInt::get(IntptrTy, 1)); + Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); + Value *AddrLast = IRB.CreateAdd(AddrLong, SizeMinusOne); + instrumentAddress(OrigIns, InsertBefore, AddrLast, 8, IsWrite, Size); } // Instrument memset/memmove/memcpy bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { Value *Dst = MI->getDest(); MemTransferInst *MemTran = dyn_cast(MI); - Value *Src = MemTran ? MemTran->getSource() : NULL; + Value *Src = MemTran ? MemTran->getSource() : 0; Value *Length = MI->getLength(); Constant *ConstLength = dyn_cast(Length); @@ -283,8 +588,8 @@ bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { IRBuilder<> IRB(InsertBefore); Value *Cmp = IRB.CreateICmpNE(Length, - Constant::getNullValue(Length->getType())); - InsertBefore = splitBlockAndInsertIfThen(InsertBefore, Cmp); + Constant::getNullValue(Length->getType())); + InsertBefore = SplitBlockAndInsertIfThen(cast(Cmp), false); } instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true); @@ -320,34 +625,54 @@ static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) { } void AddressSanitizer::instrumentMop(Instruction *I) { - bool IsWrite; + bool IsWrite = false; Value *Addr = isInterestingMemoryAccess(I, &IsWrite); assert(Addr); - if (ClOpt && ClOptGlobals && isa(Addr)) { - // We are accessing a global scalar variable. Nothing to catch here. - return; + if (ClOpt && ClOptGlobals) { + if (GlobalVariable *G = dyn_cast(Addr)) { + // If initialization order checking is disabled, a simple access to a + // dynamically initialized global is always valid. + if (!CheckInitOrder) + return; + // If a global variable does not have dynamic initialization we don't + // have to instrument it. However, if a global does not have initailizer + // at all, we assume it has dynamic initializer (in other TU). + if (G->hasInitializer() && !DynamicallyInitializedGlobals.Contains(G)) + return; + } } + Type *OrigPtrTy = Addr->getType(); Type *OrigTy = cast(OrigPtrTy)->getElementType(); assert(OrigTy->isSized()); uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy); - if (TypeSize != 8 && TypeSize != 16 && - TypeSize != 32 && TypeSize != 64 && TypeSize != 128) { - // Ignore all unusual sizes. - return; - } + assert((TypeSize % 8) == 0); + // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check. + if (TypeSize == 8 || TypeSize == 16 || + TypeSize == 32 || TypeSize == 64 || TypeSize == 128) + return instrumentAddress(I, I, Addr, TypeSize, IsWrite, 0); + // Instrument unusual size (but still multiple of 8). + // We can not do it with a single check, so we do 1-byte check for the first + // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able + // to report the actual access size. IRBuilder<> IRB(I); - instrumentAddress(I, IRB, Addr, TypeSize, IsWrite); + Value *LastByte = IRB.CreateIntToPtr( + IRB.CreateAdd(IRB.CreatePointerCast(Addr, IntptrTy), + ConstantInt::get(IntptrTy, TypeSize / 8 - 1)), + OrigPtrTy); + Value *Size = ConstantInt::get(IntptrTy, TypeSize / 8); + instrumentAddress(I, I, Addr, 8, IsWrite, Size); + instrumentAddress(I, I, LastByte, 8, IsWrite, Size); } // Validate the result of Module::getOrInsertFunction called for an interface // function of AddressSanitizer. If the instrumented module defines a function // with the same name, their prototypes must match, otherwise // getOrInsertFunction returns a bitcast. -Function *AddressSanitizer::checkInterfaceFunction(Constant *FuncOrBitcast) { +static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { if (isa(FuncOrBitcast)) return cast(FuncOrBitcast); FuncOrBitcast->dump(); report_fatal_error("trying to redefine an AddressSanitizer " @@ -355,24 +680,47 @@ Function *AddressSanitizer::checkInterfaceFunction(Constant *FuncOrBitcast) { } Instruction *AddressSanitizer::generateCrashCode( - IRBuilder<> &IRB, Value *Addr, bool IsWrite, uint32_t TypeSize) { - // IsWrite and TypeSize are encoded in the function name. - std::string FunctionName = std::string(kAsanReportErrorTemplate) + - (IsWrite ? "store" : "load") + itostr(TypeSize / 8); - Value *ReportWarningFunc = CurrentModule->getOrInsertFunction( - FunctionName, IRB.getVoidTy(), IntptrTy, NULL); - CallInst *Call = IRB.CreateCall(ReportWarningFunc, Addr); - Call->setDoesNotReturn(); + Instruction *InsertBefore, Value *Addr, + bool IsWrite, size_t AccessSizeIndex, Value *SizeArgument) { + IRBuilder<> IRB(InsertBefore); + CallInst *Call = SizeArgument + ? IRB.CreateCall2(AsanErrorCallbackSized[IsWrite], Addr, SizeArgument) + : IRB.CreateCall(AsanErrorCallback[IsWrite][AccessSizeIndex], Addr); + + // We don't do Call->setDoesNotReturn() because the BB already has + // UnreachableInst at the end. + // This EmptyAsm is required to avoid callback merge. + IRB.CreateCall(EmptyAsm); return Call; } +Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, + Value *ShadowValue, + uint32_t TypeSize) { + size_t Granularity = 1 << Mapping.Scale; + // Addr & (Granularity - 1) + Value *LastAccessedByte = IRB.CreateAnd( + AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); + // (Addr & (Granularity - 1)) + size - 1 + if (TypeSize / 8 > 1) + LastAccessedByte = IRB.CreateAdd( + LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); + // (uint8_t) ((Addr & (Granularity-1)) + size - 1) + LastAccessedByte = IRB.CreateIntCast( + LastAccessedByte, ShadowValue->getType(), false); + // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue + return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); +} + void AddressSanitizer::instrumentAddress(Instruction *OrigIns, - IRBuilder<> &IRB, Value *Addr, - uint32_t TypeSize, bool IsWrite) { + Instruction *InsertBefore, + Value *Addr, uint32_t TypeSize, + bool IsWrite, Value *SizeArgument) { + IRBuilder<> IRB(InsertBefore); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); Type *ShadowTy = IntegerType::get( - *C, std::max(8U, TypeSize >> MappingScale)); + *C, std::max(8U, TypeSize >> Mapping.Scale)); Type *ShadowPtrTy = PointerType::get(ShadowTy, 0); Value *ShadowPtr = memToShadow(AddrLong, IRB); Value *CmpVal = Constant::getNullValue(ShadowTy); @@ -380,97 +728,158 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns, IRB.CreateIntToPtr(ShadowPtr, ShadowPtrTy)); Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal); + size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize); + size_t Granularity = 1 << Mapping.Scale; + TerminatorInst *CrashTerm = 0; + + if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { + TerminatorInst *CheckTerm = + SplitBlockAndInsertIfThen(cast(Cmp), false); + assert(dyn_cast(CheckTerm)->isUnconditional()); + BasicBlock *NextBB = CheckTerm->getSuccessor(0); + IRB.SetInsertPoint(CheckTerm); + Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); + BasicBlock *CrashBlock = + BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); + CrashTerm = new UnreachableInst(*C, CrashBlock); + BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); + ReplaceInstWithInst(CheckTerm, NewTerm); + } else { + CrashTerm = SplitBlockAndInsertIfThen(cast(Cmp), true); + } - Instruction *CheckTerm = splitBlockAndInsertIfThen( - cast(Cmp)->getNextNode(), Cmp); - IRBuilder<> IRB2(CheckTerm); - - size_t Granularity = 1 << MappingScale; - if (TypeSize < 8 * Granularity) { - // Addr & (Granularity - 1) - Value *LastAccessedByte = IRB2.CreateAnd( - AddrLong, ConstantInt::get(IntptrTy, Granularity - 1)); - // (Addr & (Granularity - 1)) + size - 1 - if (TypeSize / 8 > 1) - LastAccessedByte = IRB2.CreateAdd( - LastAccessedByte, ConstantInt::get(IntptrTy, TypeSize / 8 - 1)); - // (uint8_t) ((Addr & (Granularity-1)) + size - 1) - LastAccessedByte = IRB2.CreateIntCast( - LastAccessedByte, IRB.getInt8Ty(), false); - // ((uint8_t) ((Addr & (Granularity-1)) + size - 1)) >= ShadowValue - Value *Cmp2 = IRB2.CreateICmpSGE(LastAccessedByte, ShadowValue); - - CheckTerm = splitBlockAndInsertIfThen(CheckTerm, Cmp2); - } - - IRBuilder<> IRB1(CheckTerm); - Instruction *Crash = generateCrashCode(IRB1, AddrLong, IsWrite, TypeSize); + Instruction *Crash = generateCrashCode( + CrashTerm, AddrLong, IsWrite, AccessSizeIndex, SizeArgument); Crash->setDebugLoc(OrigIns->getDebugLoc()); - ReplaceInstWithInst(CheckTerm, new UnreachableInst(*C)); } -// This function replaces all global variables with new variables that have -// trailing redzones. It also creates a function that poisons -// redzones and inserts this function into llvm.global_ctors. -bool AddressSanitizer::insertGlobalRedzones(Module &M) { - SmallVector GlobalsToChange; +void AddressSanitizerModule::createInitializerPoisonCalls( + Module &M, GlobalValue *ModuleName) { + // We do all of our poisoning and unpoisoning within _GLOBAL__I_a. + Function *GlobalInit = M.getFunction("_GLOBAL__I_a"); + // If that function is not present, this TU contains no globals, or they have + // all been optimized away + if (!GlobalInit) + return; + + // Set up the arguments to our poison/unpoison functions. + IRBuilder<> IRB(GlobalInit->begin()->getFirstInsertionPt()); + + // Add a call to poison all external globals before the given function starts. + Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy); + IRB.CreateCall(AsanPoisonGlobals, ModuleNameAddr); - for (Module::GlobalListType::iterator G = M.getGlobalList().begin(), - E = M.getGlobalList().end(); G != E; ++G) { - Type *Ty = cast(G->getType())->getElementType(); - DEBUG(dbgs() << "GLOBAL: " << *G); - - if (!Ty->isSized()) continue; - if (!G->hasInitializer()) continue; - // Touch only those globals that will not be defined in other modules. - // Don't handle ODR type linkages since other modules may be built w/o asan. - if (G->getLinkage() != GlobalVariable::ExternalLinkage && - G->getLinkage() != GlobalVariable::PrivateLinkage && - G->getLinkage() != GlobalVariable::InternalLinkage) - continue; - // Two problems with thread-locals: - // - The address of the main thread's copy can't be computed at link-time. - // - Need to poison all copies, not just the main thread's one. - if (G->isThreadLocal()) - continue; - // For now, just ignore this Alloca if the alignment is large. - if (G->getAlignment() > RedzoneSize) continue; - - // Ignore all the globals with the names starting with "\01L_OBJC_". - // Many of those are put into the .cstring section. The linker compresses - // that section by removing the spare \0s after the string terminator, so - // our redzones get broken. - if ((G->getName().find("\01L_OBJC_") == 0) || - (G->getName().find("\01l_OBJC_") == 0)) { - DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G); - continue; + // Add calls to unpoison all globals before each return instruction. + for (Function::iterator I = GlobalInit->begin(), E = GlobalInit->end(); + I != E; ++I) { + if (ReturnInst *RI = dyn_cast(I->getTerminator())) { + CallInst::Create(AsanUnpoisonGlobals, "", RI); } + } +} - if (G->hasSection()) { - StringRef Section(G->getSection()); - // Ignore the globals from the __OBJC section. The ObjC runtime assumes - // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to - // them. - if ((Section.find("__OBJC,") == 0) || - (Section.find("__DATA, __objc_") == 0)) { - DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G); - continue; - } - // See http://code.google.com/p/address-sanitizer/issues/detail?id=32 - // Constant CFString instances are compiled in the following way: - // -- the string buffer is emitted into - // __TEXT,__cstring,cstring_literals - // -- the constant NSConstantString structure referencing that buffer - // is placed into __DATA,__cfstring - // Therefore there's no point in placing redzones into __DATA,__cfstring. - // Moreover, it causes the linker to crash on OS X 10.7 - if (Section.find("__DATA,__cfstring") == 0) { - DEBUG(dbgs() << "Ignoring CFString: " << *G); - continue; - } +bool AddressSanitizerModule::ShouldInstrumentGlobal(GlobalVariable *G) { + Type *Ty = cast(G->getType())->getElementType(); + DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); + + if (BL->isIn(*G)) return false; + if (!Ty->isSized()) return false; + if (!G->hasInitializer()) return false; + if (GlobalWasGeneratedByAsan(G)) return false; // Our own global. + // Touch only those globals that will not be defined in other modules. + // Don't handle ODR type linkages since other modules may be built w/o asan. + if (G->getLinkage() != GlobalVariable::ExternalLinkage && + G->getLinkage() != GlobalVariable::PrivateLinkage && + G->getLinkage() != GlobalVariable::InternalLinkage) + return false; + // Two problems with thread-locals: + // - The address of the main thread's copy can't be computed at link-time. + // - Need to poison all copies, not just the main thread's one. + if (G->isThreadLocal()) + return false; + // For now, just ignore this Alloca if the alignment is large. + if (G->getAlignment() > RedzoneSize()) return false; + + // Ignore all the globals with the names starting with "\01L_OBJC_". + // Many of those are put into the .cstring section. The linker compresses + // that section by removing the spare \0s after the string terminator, so + // our redzones get broken. + if ((G->getName().find("\01L_OBJC_") == 0) || + (G->getName().find("\01l_OBJC_") == 0)) { + DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G); + return false; + } + + if (G->hasSection()) { + StringRef Section(G->getSection()); + // Ignore the globals from the __OBJC section. The ObjC runtime assumes + // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to + // them. + if ((Section.find("__OBJC,") == 0) || + (Section.find("__DATA, __objc_") == 0)) { + DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G); + return false; + } + // See http://code.google.com/p/address-sanitizer/issues/detail?id=32 + // Constant CFString instances are compiled in the following way: + // -- the string buffer is emitted into + // __TEXT,__cstring,cstring_literals + // -- the constant NSConstantString structure referencing that buffer + // is placed into __DATA,__cfstring + // Therefore there's no point in placing redzones into __DATA,__cfstring. + // Moreover, it causes the linker to crash on OS X 10.7 + if (Section.find("__DATA,__cfstring") == 0) { + DEBUG(dbgs() << "Ignoring CFString: " << *G); + return false; } + } + + return true; +} + +void AddressSanitizerModule::initializeCallbacks(Module &M) { + IRBuilder<> IRB(*C); + // Declare our poisoning and unpoisoning functions. + AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, NULL)); + AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); + AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL)); + AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); + // Declare functions that register/unregister globals. + AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanRegisterGlobalsName, IRB.getVoidTy(), + IntptrTy, IntptrTy, NULL)); + AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); + AsanUnregisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanUnregisterGlobalsName, + IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); +} - GlobalsToChange.push_back(G); +// This function replaces all global variables with new variables that have +// trailing redzones. It also creates a function that poisons +// redzones and inserts this function into llvm.global_ctors. +bool AddressSanitizerModule::runOnModule(Module &M) { + if (!ClGlobals) return false; + TD = getAnalysisIfAvailable(); + if (!TD) + return false; + BL.reset(new BlackList(BlacklistFile)); + if (BL->isIn(M)) return false; + C = &(M.getContext()); + int LongSize = TD->getPointerSizeInBits(); + IntptrTy = Type::getIntNTy(*C, LongSize); + Mapping = getShadowMapping(M, LongSize, ZeroBaseShadow); + initializeCallbacks(M); + DynamicallyInitializedGlobals.Init(M); + + SmallVector GlobalsToChange; + + for (Module::GlobalListType::iterator G = M.global_begin(), + E = M.global_end(); G != E; ++G) { + if (ShouldInstrumentGlobal(G)) + GlobalsToChange.push_back(G); } size_t n = GlobalsToChange.size(); @@ -481,39 +890,64 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { // size_t size; // size_t size_with_redzone; // const char *name; + // const char *module_name; + // size_t has_dynamic_init; // We initialize an array of such structures and pass it to a run-time call. StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy, + IntptrTy, IntptrTy, IntptrTy, IntptrTy, NULL); - SmallVector Initializers(n); + SmallVector Initializers(n), DynamicInit; + + + Function *CtorFunc = M.getFunction(kAsanModuleCtorName); + assert(CtorFunc); + IRBuilder<> IRB(CtorFunc->getEntryBlock().getTerminator()); - IRBuilder<> IRB(CtorInsertBefore); + bool HasDynamicallyInitializedGlobals = false; + + GlobalVariable *ModuleName = createPrivateGlobalForString( + M, M.getModuleIdentifier()); + // We shouldn't merge same module names, as this string serves as unique + // module ID in runtime. + ModuleName->setUnnamedAddr(false); for (size_t i = 0; i < n; i++) { + static const uint64_t kMaxGlobalRedzone = 1 << 18; GlobalVariable *G = GlobalsToChange[i]; PointerType *PtrTy = cast(G->getType()); Type *Ty = PtrTy->getElementType(); uint64_t SizeInBytes = TD->getTypeAllocSize(Ty); - uint64_t RightRedzoneSize = RedzoneSize + - (RedzoneSize - (SizeInBytes % RedzoneSize)); + uint64_t MinRZ = RedzoneSize(); + // MinRZ <= RZ <= kMaxGlobalRedzone + // and trying to make RZ to be ~ 1/4 of SizeInBytes. + uint64_t RZ = std::max(MinRZ, + std::min(kMaxGlobalRedzone, + (SizeInBytes / MinRZ / 4) * MinRZ)); + uint64_t RightRedzoneSize = RZ; + // Round up to MinRZ + if (SizeInBytes % MinRZ) + RightRedzoneSize += MinRZ - (SizeInBytes % MinRZ); + assert(((RightRedzoneSize + SizeInBytes) % MinRZ) == 0); Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); + // Determine whether this global should be poisoned in initialization. + bool GlobalHasDynamicInitializer = + DynamicallyInitializedGlobals.Contains(G); + // Don't check initialization order if this global is blacklisted. + GlobalHasDynamicInitializer &= !BL->isInInit(*G); StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL); Constant *NewInitializer = ConstantStruct::get( NewTy, G->getInitializer(), Constant::getNullValue(RightRedZoneTy), NULL); - SmallString<2048> DescriptionOfGlobal = G->getName(); - DescriptionOfGlobal += " ("; - DescriptionOfGlobal += M.getModuleIdentifier(); - DescriptionOfGlobal += ")"; - GlobalVariable *Name = createPrivateGlobalForString(M, DescriptionOfGlobal); + GlobalVariable *Name = createPrivateGlobalForString(M, G->getName()); // Create a new global variable with enough space for a redzone. GlobalVariable *NewGlobal = new GlobalVariable( M, NewTy, G->isConstant(), G->getLinkage(), NewInitializer, "", G, G->getThreadLocalMode()); NewGlobal->copyAttributesFrom(G); - NewGlobal->setAlignment(RedzoneSize); + NewGlobal->setAlignment(MinRZ); Value *Indices2[2]; Indices2[0] = IRB.getInt32(0); @@ -530,8 +964,15 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { ConstantInt::get(IntptrTy, SizeInBytes), ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), ConstantExpr::getPointerCast(Name, IntptrTy), + ConstantExpr::getPointerCast(ModuleName, IntptrTy), + ConstantInt::get(IntptrTy, GlobalHasDynamicInitializer), NULL); - DEBUG(dbgs() << "NEW GLOBAL:\n" << *NewGlobal); + + // Populate the first and last globals declared in this TU. + if (CheckInitOrder && GlobalHasDynamicInitializer) + HasDynamicallyInitializedGlobals = true; + + DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); } ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n); @@ -539,10 +980,9 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { M, ArrayOfGlobalStructTy, false, GlobalVariable::PrivateLinkage, ConstantArray::get(ArrayOfGlobalStructTy, Initializers), ""); - Function *AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); - AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); - + // Create calls for poisoning before initializers run and unpoisoning after. + if (CheckInitOrder && HasDynamicallyInitializedGlobals) + createInitializerPoisonCalls(M, ModuleName); IRB.CreateCall2(AsanRegisterGlobals, IRB.CreatePointerCast(AllGlobals, IntptrTy), ConstantInt::get(IntptrTy, n)); @@ -554,12 +994,6 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { GlobalValue::InternalLinkage, kAsanModuleDtorName, &M); BasicBlock *AsanDtorBB = BasicBlock::Create(*C, "", AsanDtorFunction); IRBuilder<> IRB_Dtor(ReturnInst::Create(*C, AsanDtorBB)); - Function *AsanUnregisterGlobals = - checkInterfaceFunction(M.getOrInsertFunction( - kAsanUnregisterGlobalsName, - IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); - AsanUnregisterGlobals->setLinkage(Function::ExternalLinkage); - IRB_Dtor.CreateCall2(AsanUnregisterGlobals, IRB.CreatePointerCast(AllGlobals, IntptrTy), ConstantInt::get(IntptrTy, n)); @@ -569,86 +1003,81 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { return true; } +void AddressSanitizer::initializeCallbacks(Module &M) { + IRBuilder<> IRB(*C); + // Create __asan_report* callbacks. + for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) { + for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; + AccessSizeIndex++) { + // IsWrite and TypeSize are encoded in the function name. + std::string FunctionName = std::string(kAsanReportErrorTemplate) + + (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex); + // If we are merging crash callbacks, they have two parameters. + AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = + checkInterfaceFunction(M.getOrInsertFunction( + FunctionName, IRB.getVoidTy(), IntptrTy, NULL)); + } + } + AsanErrorCallbackSized[0] = checkInterfaceFunction(M.getOrInsertFunction( + kAsanReportLoadN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + AsanErrorCallbackSized[1] = checkInterfaceFunction(M.getOrInsertFunction( + kAsanReportStoreN, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + + AsanHandleNoReturnFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanHandleNoReturnName, IRB.getVoidTy(), NULL)); + // We insert an empty inline asm after __asan_report* to avoid callback merge. + EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), + StringRef(""), StringRef(""), + /*hasSideEffects=*/true); +} + +void AddressSanitizer::emitShadowMapping(Module &M, IRBuilder<> &IRB) const { + // Tell the values of mapping offset and scale to the run-time. + GlobalValue *asan_mapping_offset = + new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage, + ConstantInt::get(IntptrTy, Mapping.Offset), + kAsanMappingOffsetName); + // Read the global, otherwise it may be optimized away. + IRB.CreateLoad(asan_mapping_offset, true); + + GlobalValue *asan_mapping_scale = + new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage, + ConstantInt::get(IntptrTy, Mapping.Scale), + kAsanMappingScaleName); + // Read the global, otherwise it may be optimized away. + IRB.CreateLoad(asan_mapping_scale, true); +} + // virtual -bool AddressSanitizer::runOnModule(Module &M) { +bool AddressSanitizer::doInitialization(Module &M) { // Initialize the private fields. No one has accessed them before. - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); + if (!TD) return false; - BL.reset(new FunctionBlackList(ClBlackListFile)); + BL.reset(new BlackList(BlacklistFile)); + DynamicallyInitializedGlobals.Init(M); - CurrentModule = &M; C = &(M.getContext()); LongSize = TD->getPointerSizeInBits(); IntptrTy = Type::getIntNTy(*C, LongSize); - IntptrPtrTy = PointerType::get(IntptrTy, 0); AsanCtorFunction = Function::Create( FunctionType::get(Type::getVoidTy(*C), false), GlobalValue::InternalLinkage, kAsanModuleCtorName, &M); BasicBlock *AsanCtorBB = BasicBlock::Create(*C, "", AsanCtorFunction); - CtorInsertBefore = ReturnInst::Create(*C, AsanCtorBB); - // call __asan_init in the module ctor. - IRBuilder<> IRB(CtorInsertBefore); + IRBuilder<> IRB(ReturnInst::Create(*C, AsanCtorBB)); AsanInitFunction = checkInterfaceFunction( M.getOrInsertFunction(kAsanInitName, IRB.getVoidTy(), NULL)); AsanInitFunction->setLinkage(Function::ExternalLinkage); IRB.CreateCall(AsanInitFunction); - llvm::Triple targetTriple(M.getTargetTriple()); - bool isAndroid = targetTriple.getEnvironment() == llvm::Triple::ANDROIDEABI; - - MappingOffset = isAndroid ? kDefaultShadowOffsetAndroid : - (LongSize == 32 ? kDefaultShadowOffset32 : kDefaultShadowOffset64); - if (ClMappingOffsetLog >= 0) { - if (ClMappingOffsetLog == 0) { - // special case - MappingOffset = 0; - } else { - MappingOffset = 1ULL << ClMappingOffsetLog; - } - } - MappingScale = kDefaultShadowScale; - if (ClMappingScale) { - MappingScale = ClMappingScale; - } - // Redzone used for stack and globals is at least 32 bytes. - // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. - RedzoneSize = std::max(32, (int)(1 << MappingScale)); - - bool Res = false; - - if (ClGlobals) - Res |= insertGlobalRedzones(M); - - if (ClMappingOffsetLog >= 0) { - // Tell the run-time the current values of mapping offset and scale. - GlobalValue *asan_mapping_offset = - new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage, - ConstantInt::get(IntptrTy, MappingOffset), - kAsanMappingOffsetName); - // Read the global, otherwise it may be optimized away. - IRB.CreateLoad(asan_mapping_offset, true); - } - if (ClMappingScale) { - GlobalValue *asan_mapping_scale = - new GlobalVariable(M, IntptrTy, true, GlobalValue::LinkOnceODRLinkage, - ConstantInt::get(IntptrTy, MappingScale), - kAsanMappingScaleName); - // Read the global, otherwise it may be optimized away. - IRB.CreateLoad(asan_mapping_scale, true); - } - - - for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { - if (F->isDeclaration()) continue; - Res |= handleFunction(M, *F); - } + Mapping = getShadowMapping(M, LongSize, ZeroBaseShadow); + emitShadowMapping(M, IRB); appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority); - - return Res; + return true; } bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { @@ -667,19 +1096,25 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { return false; } -bool AddressSanitizer::handleFunction(Module &M, Function &F) { +bool AddressSanitizer::runOnFunction(Function &F) { if (BL->isIn(F)) return false; if (&F == AsanCtorFunction) return false; + if (F.getLinkage() == GlobalValue::AvailableExternallyLinkage) return false; + DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); + initializeCallbacks(*F.getParent()); - // If needed, insert __asan_init before checking for AddressSafety attr. + // If needed, insert __asan_init before checking for SanitizeAddress attr. maybeInsertAsanInitAtFunctionEntry(F); - if (!F.hasFnAttr(Attribute::AddressSafety)) return false; + if (!F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::SanitizeAddress)) + return false; if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false; - // We want to instrument every address only once per basic block - // (unless there are calls between uses). + + // We want to instrument every address only once per basic block (unless there + // are calls between uses). SmallSet TempsToInstrument; SmallVector ToInstrument; SmallVector NoReturnCalls; @@ -689,6 +1124,7 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) { for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) { TempsToInstrument.clear(); + int NumInsnsPerBB = 0; for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) { if (LooksLikeCodeInBug11395(BI)) return false; @@ -700,16 +1136,19 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) { } else if (isa(BI) && ClMemIntrin) { // ok, take it. } else { - if (CallInst *CI = dyn_cast(BI)) { + CallSite CS(BI); + if (CS) { // A call inside BB. TempsToInstrument.clear(); - if (CI->doesNotReturn()) { - NoReturnCalls.push_back(CI); - } + if (CS.doesNotReturn()) + NoReturnCalls.push_back(CS.getInstruction()); } continue; } ToInstrument.push_back(BI); + NumInsnsPerBB++; + if (NumInsnsPerBB >= ClMaxInsnsToInstrumentPerBB) + break; } } @@ -727,18 +1166,17 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) { NumInstrumented++; } - DEBUG(dbgs() << F); - - bool ChangedStack = poisonStackInFunction(M, F); + FunctionStackPoisoner FSP(F, *this); + bool ChangedStack = FSP.runOnFunction(); // We must unpoison the stack before every NoReturn call (throw, _exit, etc). // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) { Instruction *CI = NoReturnCalls[i]; IRBuilder<> IRB(CI); - IRB.CreateCall(M.getOrInsertFunction(kAsanHandleNoReturnName, - IRB.getVoidTy(), NULL)); + IRB.CreateCall(AsanHandleNoReturnFunc); } + DEBUG(dbgs() << "ASAN done instrumenting:\n" << F << "\n"); return NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty(); } @@ -754,10 +1192,10 @@ static uint64_t ValueForPoison(uint64_t PoisonByte, size_t ShadowRedzoneSize) { static void PoisonShadowPartialRightRedzone(uint8_t *Shadow, size_t Size, - size_t RedzoneSize, + size_t RZSize, size_t ShadowGranularity, uint8_t Magic) { - for (size_t i = 0; i < RedzoneSize; + for (size_t i = 0; i < RZSize; i+= ShadowGranularity, Shadow++) { if (i + ShadowGranularity <= Size) { *Shadow = 0; // fully addressable @@ -769,10 +1207,35 @@ static void PoisonShadowPartialRightRedzone(uint8_t *Shadow, } } -void AddressSanitizer::PoisonStack(const ArrayRef &AllocaVec, - IRBuilder<> IRB, - Value *ShadowBase, bool DoPoison) { - size_t ShadowRZSize = RedzoneSize >> MappingScale; +// Workaround for bug 11395: we don't want to instrument stack in functions +// with large assembly blobs (32-bit only), otherwise reg alloc may crash. +// FIXME: remove once the bug 11395 is fixed. +bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { + if (LongSize != 32) return false; + CallInst *CI = dyn_cast(I); + if (!CI || !CI->isInlineAsm()) return false; + if (CI->getNumArgOperands() <= 5) return false; + // We have inline assembly with quite a few arguments. + return true; +} + +void FunctionStackPoisoner::initializeCallbacks(Module &M) { + IRBuilder<> IRB(*C); + AsanStackMallocFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL)); + AsanStackFreeFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanStackFreeName, IRB.getVoidTy(), + IntptrTy, IntptrTy, IntptrTy, NULL)); + AsanPoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanPoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + AsanUnpoisonStackMemoryFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanUnpoisonStackMemoryName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); +} + +void FunctionStackPoisoner::poisonRedZones( + const ArrayRef &AllocaVec, IRBuilder<> IRB, Value *ShadowBase, + bool DoPoison) { + size_t ShadowRZSize = RedzoneSize() >> Mapping.Scale; assert(ShadowRZSize >= 1 && ShadowRZSize <= 4); Type *RZTy = Type::getIntNTy(*C, ShadowRZSize * 8); Type *RZPtrTy = PointerType::get(RZTy, 0); @@ -788,12 +1251,12 @@ void AddressSanitizer::PoisonStack(const ArrayRef &AllocaVec, IRB.CreateStore(PoisonLeft, IRB.CreateIntToPtr(ShadowBase, RZPtrTy)); // poison all other red zones. - uint64_t Pos = RedzoneSize; + uint64_t Pos = RedzoneSize(); for (size_t i = 0, n = AllocaVec.size(); i < n; i++) { AllocaInst *AI = AllocaVec[i]; uint64_t SizeInBytes = getAllocaSizeInBytes(AI); uint64_t AlignedSize = getAlignedAllocaSize(AI); - assert(AlignedSize - SizeInBytes < RedzoneSize); + assert(AlignedSize - SizeInBytes < RedzoneSize()); Value *Ptr = NULL; Pos += AlignedSize; @@ -803,14 +1266,18 @@ void AddressSanitizer::PoisonStack(const ArrayRef &AllocaVec, // Poison the partial redzone at right Ptr = IRB.CreateAdd( ShadowBase, ConstantInt::get(IntptrTy, - (Pos >> MappingScale) - ShadowRZSize)); - size_t AddressableBytes = RedzoneSize - (AlignedSize - SizeInBytes); + (Pos >> Mapping.Scale) - ShadowRZSize)); + size_t AddressableBytes = RedzoneSize() - (AlignedSize - SizeInBytes); uint32_t Poison = 0; if (DoPoison) { PoisonShadowPartialRightRedzone((uint8_t*)&Poison, AddressableBytes, - RedzoneSize, - 1ULL << MappingScale, + RedzoneSize(), + 1ULL << Mapping.Scale, kAsanStackPartialRedzoneMagic); + Poison = + ASan.TD->isLittleEndian() + ? support::endian::byte_swap(Poison) + : support::endian::byte_swap(Poison); } Value *PartialPoison = ConstantInt::get(RZTy, Poison); IRB.CreateStore(PartialPoison, IRB.CreateIntToPtr(Ptr, RZPtrTy)); @@ -818,76 +1285,23 @@ void AddressSanitizer::PoisonStack(const ArrayRef &AllocaVec, // Poison the full redzone at right. Ptr = IRB.CreateAdd(ShadowBase, - ConstantInt::get(IntptrTy, Pos >> MappingScale)); - Value *Poison = i == AllocaVec.size() - 1 ? PoisonRight : PoisonMid; + ConstantInt::get(IntptrTy, Pos >> Mapping.Scale)); + bool LastAlloca = (i == AllocaVec.size() - 1); + Value *Poison = LastAlloca ? PoisonRight : PoisonMid; IRB.CreateStore(Poison, IRB.CreateIntToPtr(Ptr, RZPtrTy)); - Pos += RedzoneSize; + Pos += RedzoneSize(); } } -// Workaround for bug 11395: we don't want to instrument stack in functions -// with large assembly blobs (32-bit only), otherwise reg alloc may crash. -// FIXME: remove once the bug 11395 is fixed. -bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { - if (LongSize != 32) return false; - CallInst *CI = dyn_cast(I); - if (!CI || !CI->isInlineAsm()) return false; - if (CI->getNumArgOperands() <= 5) return false; - // We have inline assembly with quite a few arguments. - return true; -} - -// Find all static Alloca instructions and put -// poisoned red zones around all of them. -// Then unpoison everything back before the function returns. -// -// Stack poisoning does not play well with exception handling. -// When an exception is thrown, we essentially bypass the code -// that unpoisones the stack. This is why the run-time library has -// to intercept __cxa_throw (as well as longjmp, etc) and unpoison the entire -// stack in the interceptor. This however does not work inside the -// actual function which catches the exception. Most likely because the -// compiler hoists the load of the shadow value somewhere too high. -// This causes asan to report a non-existing bug on 453.povray. -// It sounds like an LLVM bug. -bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { - if (!ClStack) return false; - SmallVector AllocaVec; - SmallVector RetVec; - uint64_t TotalSize = 0; - - // Filter out Alloca instructions we want (and can) handle. - // Collect Ret instructions. - for (Function::iterator FI = F.begin(), FE = F.end(); - FI != FE; ++FI) { - BasicBlock &BB = *FI; - for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); - BI != BE; ++BI) { - if (isa(BI)) { - RetVec.push_back(BI); - continue; - } - - AllocaInst *AI = dyn_cast(BI); - if (!AI) continue; - if (AI->isArrayAllocation()) continue; - if (!AI->isStaticAlloca()) continue; - if (!AI->getAllocatedType()->isSized()) continue; - if (AI->getAlignment() > RedzoneSize) continue; - AllocaVec.push_back(AI); - uint64_t AlignedSize = getAlignedAllocaSize(AI); - TotalSize += AlignedSize; - } - } - - if (AllocaVec.empty()) return false; - - uint64_t LocalStackSize = TotalSize + (AllocaVec.size() + 1) * RedzoneSize; +void FunctionStackPoisoner::poisonStack() { + uint64_t LocalStackSize = TotalStackSize + + (AllocaVec.size() + 1) * RedzoneSize(); - bool DoStackMalloc = ClUseAfterReturn + bool DoStackMalloc = ASan.CheckUseAfterReturn && LocalStackSize <= kMaxStackMallocSize; + assert(AllocaVec.size() > 0); Instruction *InsBefore = AllocaVec[0]; IRBuilder<> IRB(InsBefore); @@ -895,24 +1309,36 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { Type *ByteArrayTy = ArrayType::get(IRB.getInt8Ty(), LocalStackSize); AllocaInst *MyAlloca = new AllocaInst(ByteArrayTy, "MyAlloca", InsBefore); - MyAlloca->setAlignment(RedzoneSize); + if (ClRealignStack && StackAlignment < RedzoneSize()) + StackAlignment = RedzoneSize(); + MyAlloca->setAlignment(StackAlignment); assert(MyAlloca->isStaticAlloca()); Value *OrigStackBase = IRB.CreatePointerCast(MyAlloca, IntptrTy); Value *LocalStackBase = OrigStackBase; if (DoStackMalloc) { - Value *AsanStackMallocFunc = M.getOrInsertFunction( - kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL); LocalStackBase = IRB.CreateCall2(AsanStackMallocFunc, ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase); } - // This string will be parsed by the run-time (DescribeStackAddress). + // This string will be parsed by the run-time (DescribeAddressIfStack). SmallString<2048> StackDescriptionStorage; raw_svector_ostream StackDescription(StackDescriptionStorage); - StackDescription << F.getName() << " " << AllocaVec.size() << " "; + StackDescription << AllocaVec.size() << " "; + + // Insert poison calls for lifetime intrinsics for alloca. + bool HavePoisonedAllocas = false; + for (size_t i = 0, n = AllocaPoisonCallVec.size(); i < n; i++) { + const AllocaPoisonCall &APC = AllocaPoisonCallVec[i]; + IntrinsicInst *II = APC.InsBefore; + AllocaInst *AI = findAllocaForValue(II->getArgOperand(1)); + assert(AI); + IRBuilder<> IRB(II); + poisonAlloca(AI, APC.Size, IRB, APC.DoPoison); + HavePoisonedAllocas |= APC.DoPoison; + } - uint64_t Pos = RedzoneSize; + uint64_t Pos = RedzoneSize(); // Replace Alloca instructions with base+offset. for (size_t i = 0, n = AllocaVec.size(); i < n; i++) { AllocaInst *AI = AllocaVec[i]; @@ -921,59 +1347,115 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { StackDescription << Pos << " " << SizeInBytes << " " << Name.size() << " " << Name << " "; uint64_t AlignedSize = getAlignedAllocaSize(AI); - assert((AlignedSize % RedzoneSize) == 0); - AI->replaceAllUsesWith( - IRB.CreateIntToPtr( + assert((AlignedSize % RedzoneSize()) == 0); + Value *NewAllocaPtr = IRB.CreateIntToPtr( IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, Pos)), - AI->getType())); - Pos += AlignedSize + RedzoneSize; + AI->getType()); + replaceDbgDeclareForAlloca(AI, NewAllocaPtr, DIB); + AI->replaceAllUsesWith(NewAllocaPtr); + Pos += AlignedSize + RedzoneSize(); } assert(Pos == LocalStackSize); - // Write the Magic value and the frame description constant to the redzone. + // The left-most redzone has enough space for at least 4 pointers. + // Write the Magic value to redzone[0]. Value *BasePlus0 = IRB.CreateIntToPtr(LocalStackBase, IntptrPtrTy); IRB.CreateStore(ConstantInt::get(IntptrTy, kCurrentStackFrameMagic), BasePlus0); - Value *BasePlus1 = IRB.CreateAdd(LocalStackBase, - ConstantInt::get(IntptrTy, LongSize/8)); - BasePlus1 = IRB.CreateIntToPtr(BasePlus1, IntptrPtrTy); - Value *Description = IRB.CreatePointerCast( - createPrivateGlobalForString(M, StackDescription.str()), - IntptrTy); + // Write the frame description constant to redzone[1]. + Value *BasePlus1 = IRB.CreateIntToPtr( + IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, ASan.LongSize/8)), + IntptrPtrTy); + GlobalVariable *StackDescriptionGlobal = + createPrivateGlobalForString(*F.getParent(), StackDescription.str()); + Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, + IntptrTy); IRB.CreateStore(Description, BasePlus1); + // Write the PC to redzone[2]. + Value *BasePlus2 = IRB.CreateIntToPtr( + IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, + 2 * ASan.LongSize/8)), + IntptrPtrTy); + IRB.CreateStore(IRB.CreatePointerCast(&F, IntptrTy), BasePlus2); // Poison the stack redzones at the entry. - Value *ShadowBase = memToShadow(LocalStackBase, IRB); - PoisonStack(ArrayRef(AllocaVec), IRB, ShadowBase, true); - - Value *AsanStackFreeFunc = NULL; - if (DoStackMalloc) { - AsanStackFreeFunc = M.getOrInsertFunction( - kAsanStackFreeName, IRB.getVoidTy(), - IntptrTy, IntptrTy, IntptrTy, NULL); - } + Value *ShadowBase = ASan.memToShadow(LocalStackBase, IRB); + poisonRedZones(AllocaVec, IRB, ShadowBase, true); // Unpoison the stack before all ret instructions. for (size_t i = 0, n = RetVec.size(); i < n; i++) { Instruction *Ret = RetVec[i]; IRBuilder<> IRBRet(Ret); - // Mark the current frame as retired. IRBRet.CreateStore(ConstantInt::get(IntptrTy, kRetiredStackFrameMagic), BasePlus0); // Unpoison the stack. - PoisonStack(ArrayRef(AllocaVec), IRBRet, ShadowBase, false); - + poisonRedZones(AllocaVec, IRBRet, ShadowBase, false); if (DoStackMalloc) { + // In use-after-return mode, mark the whole stack frame unaddressable. IRBRet.CreateCall3(AsanStackFreeFunc, LocalStackBase, ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase); + } else if (HavePoisonedAllocas) { + // If we poisoned some allocas in llvm.lifetime analysis, + // unpoison whole stack frame now. + assert(LocalStackBase == OrigStackBase); + poisonAlloca(LocalStackBase, LocalStackSize, IRBRet, false); } } - if (ClDebugStack) { - DEBUG(dbgs() << F); - } + // We are done. Remove the old unused alloca instructions. + for (size_t i = 0, n = AllocaVec.size(); i < n; i++) + AllocaVec[i]->eraseFromParent(); +} - return true; +void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size, + IRBuilder<> IRB, bool DoPoison) { + // For now just insert the call to ASan runtime. + Value *AddrArg = IRB.CreatePointerCast(V, IntptrTy); + Value *SizeArg = ConstantInt::get(IntptrTy, Size); + IRB.CreateCall2(DoPoison ? AsanPoisonStackMemoryFunc + : AsanUnpoisonStackMemoryFunc, + AddrArg, SizeArg); +} + +// Handling llvm.lifetime intrinsics for a given %alloca: +// (1) collect all llvm.lifetime.xxx(%size, %value) describing the alloca. +// (2) if %size is constant, poison memory for llvm.lifetime.end (to detect +// invalid accesses) and unpoison it for llvm.lifetime.start (the memory +// could be poisoned by previous llvm.lifetime.end instruction, as the +// variable may go in and out of scope several times, e.g. in loops). +// (3) if we poisoned at least one %alloca in a function, +// unpoison the whole stack frame at function exit. + +AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) { + if (AllocaInst *AI = dyn_cast(V)) + // We're intested only in allocas we can handle. + return isInterestingAlloca(*AI) ? AI : 0; + // See if we've already calculated (or started to calculate) alloca for a + // given value. + AllocaForValueMapTy::iterator I = AllocaForValue.find(V); + if (I != AllocaForValue.end()) + return I->second; + // Store 0 while we're calculating alloca for value V to avoid + // infinite recursion if the value references itself. + AllocaForValue[V] = 0; + AllocaInst *Res = 0; + if (CastInst *CI = dyn_cast(V)) + Res = findAllocaForValue(CI->getOperand(0)); + else if (PHINode *PN = dyn_cast(V)) { + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + Value *IncValue = PN->getIncomingValue(i); + // Allow self-referencing phi-nodes. + if (IncValue == PN) continue; + AllocaInst *IncValueAI = findAllocaForValue(IncValue); + // AI for incoming values should exist and should all be equal. + if (IncValueAI == 0 || (Res != 0 && IncValueAI != Res)) + return 0; + Res = IncValueAI; + } + } + if (Res != 0) + AllocaForValue[V] = Res; + return Res; }