1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// Status: early prototype.
15 /// The algorithm of the tool is similar to Memcheck
16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
17 /// byte of the application memory, poison the shadow of the malloc-ed
18 /// or alloca-ed memory, load the shadow bits on every memory read,
19 /// propagate the shadow bits through some of the arithmetic
20 /// instruction (including MOV), store the shadow bits on every memory
21 /// write, report a bug on some other instructions (e.g. JMP) if the
22 /// associated shadow is poisoned.
24 /// But there are differences too. The first and the major one:
25 /// compiler instrumentation instead of binary instrumentation. This
26 /// gives us much better register allocation, possible compiler
27 /// optimizations and a fast start-up. But this brings the major issue
28 /// as well: msan needs to see all program events, including system
29 /// calls and reads/writes in system libraries, so we either need to
30 /// compile *everything* with msan or use a binary translation
31 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
32 /// Another difference from Memcheck is that we use 8 shadow bits per
33 /// byte of application memory and use a direct shadow mapping. This
34 /// greatly simplifies the instrumentation code and avoids races on
35 /// shadow updates (Memcheck is single-threaded so races are not a
36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
37 /// path storage that uses 8 bits per byte).
39 /// The default value of shadow is 0, which means "clean" (not poisoned).
41 /// Every module initializer should call __msan_init to ensure that the
42 /// shadow memory is ready. On error, __msan_warning is called. Since
43 /// parameters and return values may be passed via registers, we have a
44 /// specialized thread-local shadow for return values
45 /// (__msan_retval_tls) and parameters (__msan_param_tls).
49 /// MemorySanitizer can track origins (allocation points) of all uninitialized
50 /// values. This behavior is controlled with a flag (msan-track-origins) and is
51 /// disabled by default.
53 /// Origins are 4-byte values created and interpreted by the runtime library.
54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
55 /// of application memory. Propagation of origins is basically a bunch of
56 /// "select" instructions that pick the origin of a dirty argument, if an
57 /// instruction has one.
59 /// Every 4 aligned, consecutive bytes of application memory have one origin
60 /// value associated with them. If these bytes contain uninitialized data
61 /// coming from 2 different allocations, the last store wins. Because of this,
62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
65 /// Origins are meaningless for fully initialized values, so MemorySanitizer
66 /// avoids storing origin to memory when a fully initialized value is stored.
67 /// This way it avoids needless overwritting origin of the 4-byte region on
68 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
69 //===----------------------------------------------------------------------===//
71 #define DEBUG_TYPE "msan"
73 #include "llvm/Transforms/Instrumentation.h"
74 #include "llvm/ADT/DepthFirstIterator.h"
75 #include "llvm/ADT/SmallString.h"
76 #include "llvm/ADT/SmallVector.h"
77 #include "llvm/ADT/ValueMap.h"
78 #include "llvm/IR/DataLayout.h"
79 #include "llvm/IR/Function.h"
80 #include "llvm/IR/IRBuilder.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/IntrinsicInst.h"
83 #include "llvm/IR/LLVMContext.h"
84 #include "llvm/IR/MDBuilder.h"
85 #include "llvm/IR/Module.h"
86 #include "llvm/IR/Type.h"
87 #include "llvm/InstVisitor.h"
88 #include "llvm/Support/CommandLine.h"
89 #include "llvm/Support/Compiler.h"
90 #include "llvm/Support/Debug.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
93 #include "llvm/Transforms/Utils/BlackList.h"
94 #include "llvm/Transforms/Utils/Local.h"
95 #include "llvm/Transforms/Utils/ModuleUtils.h"
99 static const uint64_t kShadowMask32 = 1ULL << 31;
100 static const uint64_t kShadowMask64 = 1ULL << 46;
101 static const uint64_t kOriginOffset32 = 1ULL << 30;
102 static const uint64_t kOriginOffset64 = 1ULL << 45;
103 static const unsigned kMinOriginAlignment = 4;
104 static const unsigned kShadowTLSAlignment = 8;
106 /// \brief Track origins of uninitialized values.
108 /// Adds a section to MemorySanitizer report that points to the allocation
109 /// (stack or heap) the uninitialized bits came from originally.
110 static cl::opt<bool> ClTrackOrigins("msan-track-origins",
111 cl::desc("Track origins (allocation sites) of poisoned memory"),
112 cl::Hidden, cl::init(false));
113 static cl::opt<bool> ClKeepGoing("msan-keep-going",
114 cl::desc("keep going after reporting a UMR"),
115 cl::Hidden, cl::init(false));
116 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
117 cl::desc("poison uninitialized stack variables"),
118 cl::Hidden, cl::init(true));
119 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
120 cl::desc("poison uninitialized stack variables with a call"),
121 cl::Hidden, cl::init(false));
122 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
123 cl::desc("poison uninitialized stack variables with the given patter"),
124 cl::Hidden, cl::init(0xff));
126 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
127 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
128 cl::Hidden, cl::init(true));
130 static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin",
131 cl::desc("store origin for clean (fully initialized) values"),
132 cl::Hidden, cl::init(false));
134 // This flag controls whether we check the shadow of the address
135 // operand of load or store. Such bugs are very rare, since load from
136 // a garbage address typically results in SEGV, but still happen
137 // (e.g. only lower bits of address are garbage, or the access happens
138 // early at program startup where malloc-ed memory is more likely to
139 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
140 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
141 cl::desc("report accesses through a pointer which has poisoned shadow"),
142 cl::Hidden, cl::init(true));
144 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
145 cl::desc("print out instructions with default strict semantics"),
146 cl::Hidden, cl::init(false));
148 static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
149 cl::desc("File containing the list of functions where MemorySanitizer "
150 "should not report bugs"), cl::Hidden);
154 /// \brief An instrumentation pass implementing detection of uninitialized
157 /// MemorySanitizer: instrument the code in module to find
158 /// uninitialized reads.
159 class MemorySanitizer : public FunctionPass {
161 MemorySanitizer(bool TrackOrigins = false,
162 StringRef BlacklistFile = StringRef())
164 TrackOrigins(TrackOrigins || ClTrackOrigins),
167 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
169 const char *getPassName() const { return "MemorySanitizer"; }
170 bool runOnFunction(Function &F);
171 bool doInitialization(Module &M);
172 static char ID; // Pass identification, replacement for typeid.
175 void initializeCallbacks(Module &M);
177 /// \brief Track origins (allocation points) of uninitialized values.
184 /// \brief Thread-local shadow storage for function parameters.
185 GlobalVariable *ParamTLS;
186 /// \brief Thread-local origin storage for function parameters.
187 GlobalVariable *ParamOriginTLS;
188 /// \brief Thread-local shadow storage for function return value.
189 GlobalVariable *RetvalTLS;
190 /// \brief Thread-local origin storage for function return value.
191 GlobalVariable *RetvalOriginTLS;
192 /// \brief Thread-local shadow storage for in-register va_arg function
193 /// parameters (x86_64-specific).
194 GlobalVariable *VAArgTLS;
195 /// \brief Thread-local shadow storage for va_arg overflow area
196 /// (x86_64-specific).
197 GlobalVariable *VAArgOverflowSizeTLS;
198 /// \brief Thread-local space used to pass origin value to the UMR reporting
200 GlobalVariable *OriginTLS;
202 /// \brief The run-time callback to print a warning.
204 /// \brief Run-time helper that copies origin info for a memory range.
205 Value *MsanCopyOriginFn;
206 /// \brief Run-time helper that generates a new origin value for a stack
208 Value *MsanSetAllocaOriginFn;
209 /// \brief Run-time helper that poisons stack on function entry.
210 Value *MsanPoisonStackFn;
211 /// \brief MSan runtime replacements for memmove, memcpy and memset.
212 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
214 /// \brief Address mask used in application-to-shadow address calculation.
215 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
217 /// \brief Offset of the origin shadow from the "normal" shadow.
218 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
219 uint64_t OriginOffset;
220 /// \brief Branch weights for error reporting.
221 MDNode *ColdCallWeights;
222 /// \brief Branch weights for origin store.
223 MDNode *OriginStoreWeights;
224 /// \bried Path to blacklist file.
225 SmallString<64> BlacklistFile;
226 /// \brief The blacklist.
227 OwningPtr<BlackList> BL;
228 /// \brief An empty volatile inline asm that prevents callback merge.
231 friend struct MemorySanitizerVisitor;
232 friend struct VarArgAMD64Helper;
236 char MemorySanitizer::ID = 0;
237 INITIALIZE_PASS(MemorySanitizer, "msan",
238 "MemorySanitizer: detects uninitialized reads.",
241 FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins,
242 StringRef BlacklistFile) {
243 return new MemorySanitizer(TrackOrigins, BlacklistFile);
246 /// \brief Create a non-const global initialized with the given string.
248 /// Creates a writable global for Str so that we can pass it to the
249 /// run-time lib. Runtime uses first 4 bytes of the string to store the
250 /// frame ID, so the string needs to be mutable.
251 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
253 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
254 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
255 GlobalValue::PrivateLinkage, StrConst, "");
259 /// \brief Insert extern declaration of runtime-provided functions and globals.
260 void MemorySanitizer::initializeCallbacks(Module &M) {
261 // Only do this once.
266 // Create the callback.
267 // FIXME: this function should have "Cold" calling conv,
268 // which is not yet implemented.
269 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
270 : "__msan_warning_noreturn";
271 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
273 MsanCopyOriginFn = M.getOrInsertFunction(
274 "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
275 IRB.getInt8PtrTy(), IntptrTy, NULL);
276 MsanSetAllocaOriginFn = M.getOrInsertFunction(
277 "__msan_set_alloca_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
278 IRB.getInt8PtrTy(), NULL);
279 MsanPoisonStackFn = M.getOrInsertFunction(
280 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
281 MemmoveFn = M.getOrInsertFunction(
282 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
283 IRB.getInt8PtrTy(), IntptrTy, NULL);
284 MemcpyFn = M.getOrInsertFunction(
285 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
287 MemsetFn = M.getOrInsertFunction(
288 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
292 RetvalTLS = new GlobalVariable(
293 M, ArrayType::get(IRB.getInt64Ty(), 8), false,
294 GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
295 GlobalVariable::GeneralDynamicTLSModel);
296 RetvalOriginTLS = new GlobalVariable(
297 M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
298 "__msan_retval_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
300 ParamTLS = new GlobalVariable(
301 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
302 GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
303 GlobalVariable::GeneralDynamicTLSModel);
304 ParamOriginTLS = new GlobalVariable(
305 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
306 0, "__msan_param_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
308 VAArgTLS = new GlobalVariable(
309 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
310 GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
311 GlobalVariable::GeneralDynamicTLSModel);
312 VAArgOverflowSizeTLS = new GlobalVariable(
313 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
314 "__msan_va_arg_overflow_size_tls", 0,
315 GlobalVariable::GeneralDynamicTLSModel);
316 OriginTLS = new GlobalVariable(
317 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
318 "__msan_origin_tls", 0, GlobalVariable::GeneralDynamicTLSModel);
320 // We insert an empty inline asm after __msan_report* to avoid callback merge.
321 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
322 StringRef(""), StringRef(""),
323 /*hasSideEffects=*/true);
326 /// \brief Module-level initialization.
328 /// inserts a call to __msan_init to the module's constructor list.
329 bool MemorySanitizer::doInitialization(Module &M) {
330 TD = getAnalysisIfAvailable<DataLayout>();
333 BL.reset(new BlackList(BlacklistFile));
334 C = &(M.getContext());
335 unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
338 ShadowMask = kShadowMask64;
339 OriginOffset = kOriginOffset64;
342 ShadowMask = kShadowMask32;
343 OriginOffset = kOriginOffset32;
346 report_fatal_error("unsupported pointer size");
351 IntptrTy = IRB.getIntPtrTy(TD);
352 OriginTy = IRB.getInt32Ty();
354 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
355 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
357 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
358 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
359 "__msan_init", IRB.getVoidTy(), NULL)), 0);
361 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
362 IRB.getInt32(TrackOrigins), "__msan_track_origins");
364 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
365 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
372 /// \brief A helper class that handles instrumentation of VarArg
373 /// functions on a particular platform.
375 /// Implementations are expected to insert the instrumentation
376 /// necessary to propagate argument shadow through VarArg function
377 /// calls. Visit* methods are called during an InstVisitor pass over
378 /// the function, and should avoid creating new basic blocks. A new
379 /// instance of this class is created for each instrumented function.
380 struct VarArgHelper {
381 /// \brief Visit a CallSite.
382 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
384 /// \brief Visit a va_start call.
385 virtual void visitVAStartInst(VAStartInst &I) = 0;
387 /// \brief Visit a va_copy call.
388 virtual void visitVACopyInst(VACopyInst &I) = 0;
390 /// \brief Finalize function instrumentation.
392 /// This method is called after visiting all interesting (see above)
393 /// instructions in a function.
394 virtual void finalizeInstrumentation() = 0;
396 virtual ~VarArgHelper() {}
399 struct MemorySanitizerVisitor;
402 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
403 MemorySanitizerVisitor &Visitor);
405 /// This class does all the work for a given function. Store and Load
406 /// instructions store and load corresponding shadow and origin
407 /// values. Most instructions propagate shadow from arguments to their
408 /// return values. Certain instructions (most importantly, BranchInst)
409 /// test their argument shadow and print reports (with a runtime call) if it's
411 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
414 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
415 ValueMap<Value*, Value*> ShadowMap, OriginMap;
417 OwningPtr<VarArgHelper> VAHelper;
419 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
420 // See a comment in visitCallSite for more details.
421 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
422 static const unsigned AMD64FpEndOffset = 176;
424 struct ShadowOriginAndInsertPoint {
427 Instruction *OrigIns;
428 ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I)
429 : Shadow(S), Origin(O), OrigIns(I) { }
430 ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
432 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
433 SmallVector<Instruction*, 16> StoreList;
435 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
436 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
437 InsertChecks = !MS.BL->isIn(F);
438 DEBUG(if (!InsertChecks)
439 dbgs() << "MemorySanitizer is not inserting checks into '"
440 << F.getName() << "'\n");
443 void materializeStores() {
444 for (size_t i = 0, n = StoreList.size(); i < n; i++) {
445 StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
448 Value *Val = I.getValueOperand();
449 Value *Addr = I.getPointerOperand();
450 Value *Shadow = getShadow(Val);
451 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
454 IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
455 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
458 if (ClCheckAccessAddress)
459 insertCheck(Addr, &I);
461 if (MS.TrackOrigins) {
462 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
463 if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
464 IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB),
467 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
469 Constant *Cst = dyn_cast_or_null<Constant>(ConvertedShadow);
470 // TODO(eugenis): handle non-zero constant shadow by inserting an
471 // unconditional check (can not simply fail compilation as this could
472 // be in the dead code).
476 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
477 getCleanShadow(ConvertedShadow), "_mscmp");
478 Instruction *CheckTerm =
479 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false,
480 MS.OriginStoreWeights);
481 IRBuilder<> IRBNew(CheckTerm);
482 IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew),
489 void materializeChecks() {
490 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
491 Instruction *Shadow = InstrumentationList[i].Shadow;
492 Instruction *OrigIns = InstrumentationList[i].OrigIns;
493 IRBuilder<> IRB(OrigIns);
494 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
495 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
496 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
497 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
498 getCleanShadow(ConvertedShadow), "_mscmp");
499 Instruction *CheckTerm =
500 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp),
501 /* Unreachable */ !ClKeepGoing,
504 IRB.SetInsertPoint(CheckTerm);
505 if (MS.TrackOrigins) {
506 Instruction *Origin = InstrumentationList[i].Origin;
507 IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
510 CallInst *Call = IRB.CreateCall(MS.WarningFn);
511 Call->setDebugLoc(OrigIns->getDebugLoc());
512 IRB.CreateCall(MS.EmptyAsm);
513 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
515 DEBUG(dbgs() << "DONE:\n" << F);
518 /// \brief Add MemorySanitizer instrumentation to a function.
519 bool runOnFunction() {
520 MS.initializeCallbacks(*F.getParent());
521 if (!MS.TD) return false;
523 // In the presence of unreachable blocks, we may see Phi nodes with
524 // incoming nodes from such blocks. Since InstVisitor skips unreachable
525 // blocks, such nodes will not have any shadow value associated with them.
526 // It's easier to remove unreachable blocks than deal with missing shadow.
527 removeUnreachableBlocks(F);
529 // Iterate all BBs in depth-first order and create shadow instructions
530 // for all instructions (where applicable).
531 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
532 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
533 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
534 BasicBlock *BB = *DI;
538 // Finalize PHI nodes.
539 for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
540 PHINode *PN = ShadowPHINodes[i];
541 PHINode *PNS = cast<PHINode>(getShadow(PN));
542 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
543 size_t NumValues = PN->getNumIncomingValues();
544 for (size_t v = 0; v < NumValues; v++) {
545 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
547 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
551 VAHelper->finalizeInstrumentation();
553 // Delayed instrumentation of StoreInst.
554 // This may add new checks to be inserted later.
557 // Insert shadow value checks.
563 /// \brief Compute the shadow type that corresponds to a given Value.
564 Type *getShadowTy(Value *V) {
565 return getShadowTy(V->getType());
568 /// \brief Compute the shadow type that corresponds to a given Type.
569 Type *getShadowTy(Type *OrigTy) {
570 if (!OrigTy->isSized()) {
573 // For integer type, shadow is the same as the original type.
574 // This may return weird-sized types like i1.
575 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
577 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
578 uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
579 return VectorType::get(IntegerType::get(*MS.C, EltSize),
580 VT->getNumElements());
582 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
583 SmallVector<Type*, 4> Elements;
584 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
585 Elements.push_back(getShadowTy(ST->getElementType(i)));
586 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
587 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
590 uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
591 return IntegerType::get(*MS.C, TypeSize);
594 /// \brief Flatten a vector type.
595 Type *getShadowTyNoVec(Type *ty) {
596 if (VectorType *vt = dyn_cast<VectorType>(ty))
597 return IntegerType::get(*MS.C, vt->getBitWidth());
601 /// \brief Convert a shadow value to it's flattened variant.
602 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
603 Type *Ty = V->getType();
604 Type *NoVecTy = getShadowTyNoVec(Ty);
605 if (Ty == NoVecTy) return V;
606 return IRB.CreateBitCast(V, NoVecTy);
609 /// \brief Compute the shadow address that corresponds to a given application
612 /// Shadow = Addr & ~ShadowMask.
613 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
616 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
617 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
618 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
621 /// \brief Compute the origin address that corresponds to a given application
624 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
625 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
627 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
628 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
630 IRB.CreateAdd(ShadowLong,
631 ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
633 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
634 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
637 /// \brief Compute the shadow address for a given function argument.
639 /// Shadow = ParamTLS+ArgOffset.
640 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
642 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
643 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
644 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
648 /// \brief Compute the origin address for a given function argument.
649 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
651 if (!MS.TrackOrigins) return 0;
652 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
653 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
654 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
658 /// \brief Compute the shadow address for a retval.
659 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
660 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
661 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
665 /// \brief Compute the origin address for a retval.
666 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
667 // We keep a single origin for the entire retval. Might be too optimistic.
668 return MS.RetvalOriginTLS;
671 /// \brief Set SV to be the shadow value for V.
672 void setShadow(Value *V, Value *SV) {
673 assert(!ShadowMap.count(V) && "Values may only have one shadow");
677 /// \brief Set Origin to be the origin value for V.
678 void setOrigin(Value *V, Value *Origin) {
679 if (!MS.TrackOrigins) return;
680 assert(!OriginMap.count(V) && "Values may only have one origin");
681 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
682 OriginMap[V] = Origin;
685 /// \brief Create a clean shadow value for a given value.
687 /// Clean shadow (all zeroes) means all bits of the value are defined
689 Value *getCleanShadow(Value *V) {
690 Type *ShadowTy = getShadowTy(V);
693 return Constant::getNullValue(ShadowTy);
696 /// \brief Create a dirty shadow of a given shadow type.
697 Constant *getPoisonedShadow(Type *ShadowTy) {
699 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
700 return Constant::getAllOnesValue(ShadowTy);
701 StructType *ST = cast<StructType>(ShadowTy);
702 SmallVector<Constant *, 4> Vals;
703 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
704 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
705 return ConstantStruct::get(ST, Vals);
708 /// \brief Create a clean (zero) origin.
709 Value *getCleanOrigin() {
710 return Constant::getNullValue(MS.OriginTy);
713 /// \brief Get the shadow value for a given Value.
715 /// This function either returns the value set earlier with setShadow,
716 /// or extracts if from ParamTLS (for function arguments).
717 Value *getShadow(Value *V) {
718 if (Instruction *I = dyn_cast<Instruction>(V)) {
719 // For instructions the shadow is already stored in the map.
720 Value *Shadow = ShadowMap[V];
722 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
724 assert(Shadow && "No shadow for a value");
728 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
729 Value *AllOnes = getPoisonedShadow(getShadowTy(V));
730 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
734 if (Argument *A = dyn_cast<Argument>(V)) {
735 // For arguments we compute the shadow on demand and store it in the map.
736 Value **ShadowPtr = &ShadowMap[V];
739 Function *F = A->getParent();
740 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
741 unsigned ArgOffset = 0;
742 for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
744 if (!AI->getType()->isSized()) {
745 DEBUG(dbgs() << "Arg is not sized\n");
748 unsigned Size = AI->hasByValAttr()
749 ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
750 : MS.TD->getTypeAllocSize(AI->getType());
752 Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
753 if (AI->hasByValAttr()) {
754 // ByVal pointer itself has clean shadow. We copy the actual
755 // argument shadow to the underlying memory.
756 Value *Cpy = EntryIRB.CreateMemCpy(
757 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
758 Base, Size, AI->getParamAlignment());
759 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
761 *ShadowPtr = getCleanShadow(V);
763 *ShadowPtr = EntryIRB.CreateLoad(Base);
765 DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
766 **ShadowPtr << "\n");
767 if (MS.TrackOrigins) {
768 Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
769 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
772 ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
774 assert(*ShadowPtr && "Could not find shadow for an argument");
777 // For everything else the shadow is zero.
778 return getCleanShadow(V);
781 /// \brief Get the shadow for i-th argument of the instruction I.
782 Value *getShadow(Instruction *I, int i) {
783 return getShadow(I->getOperand(i));
786 /// \brief Get the origin for a value.
787 Value *getOrigin(Value *V) {
788 if (!MS.TrackOrigins) return 0;
789 if (isa<Instruction>(V) || isa<Argument>(V)) {
790 Value *Origin = OriginMap[V];
792 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
793 Origin = getCleanOrigin();
797 return getCleanOrigin();
800 /// \brief Get the origin for i-th argument of the instruction I.
801 Value *getOrigin(Instruction *I, int i) {
802 return getOrigin(I->getOperand(i));
805 /// \brief Remember the place where a shadow check should be inserted.
807 /// This location will be later instrumented with a check that will print a
808 /// UMR warning in runtime if the value is not fully defined.
809 void insertCheck(Value *Val, Instruction *OrigIns) {
811 if (!InsertChecks) return;
812 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
815 Type *ShadowTy = Shadow->getType();
816 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
817 "Can only insert checks for integer and vector shadow types");
819 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
820 InstrumentationList.push_back(
821 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
824 // ------------------- Visitors.
826 /// \brief Instrument LoadInst
828 /// Loads the corresponding shadow and (optionally) origin.
829 /// Optionally, checks that the load address is fully defined.
830 void visitLoadInst(LoadInst &I) {
831 assert(I.getType()->isSized() && "Load type must have size");
833 Type *ShadowTy = getShadowTy(&I);
834 Value *Addr = I.getPointerOperand();
835 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
836 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
838 if (ClCheckAccessAddress)
839 insertCheck(I.getPointerOperand(), &I);
841 if (MS.TrackOrigins) {
842 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
843 setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
847 /// \brief Instrument StoreInst
849 /// Stores the corresponding shadow and (optionally) origin.
850 /// Optionally, checks that the store address is fully defined.
851 void visitStoreInst(StoreInst &I) {
852 StoreList.push_back(&I);
855 // Vector manipulation.
856 void visitExtractElementInst(ExtractElementInst &I) {
857 insertCheck(I.getOperand(1), &I);
859 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
861 setOrigin(&I, getOrigin(&I, 0));
864 void visitInsertElementInst(InsertElementInst &I) {
865 insertCheck(I.getOperand(2), &I);
867 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
868 I.getOperand(2), "_msprop"));
869 setOriginForNaryOp(I);
872 void visitShuffleVectorInst(ShuffleVectorInst &I) {
873 insertCheck(I.getOperand(2), &I);
875 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
876 I.getOperand(2), "_msprop"));
877 setOriginForNaryOp(I);
881 void visitSExtInst(SExtInst &I) {
883 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
884 setOrigin(&I, getOrigin(&I, 0));
887 void visitZExtInst(ZExtInst &I) {
889 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
890 setOrigin(&I, getOrigin(&I, 0));
893 void visitTruncInst(TruncInst &I) {
895 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
896 setOrigin(&I, getOrigin(&I, 0));
899 void visitBitCastInst(BitCastInst &I) {
901 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
902 setOrigin(&I, getOrigin(&I, 0));
905 void visitPtrToIntInst(PtrToIntInst &I) {
907 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
908 "_msprop_ptrtoint"));
909 setOrigin(&I, getOrigin(&I, 0));
912 void visitIntToPtrInst(IntToPtrInst &I) {
914 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
915 "_msprop_inttoptr"));
916 setOrigin(&I, getOrigin(&I, 0));
919 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
920 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
921 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
922 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
923 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
924 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
926 /// \brief Propagate shadow for bitwise AND.
928 /// This code is exact, i.e. if, for example, a bit in the left argument
929 /// is defined and 0, then neither the value not definedness of the
930 /// corresponding bit in B don't affect the resulting shadow.
931 void visitAnd(BinaryOperator &I) {
933 // "And" of 0 and a poisoned value results in unpoisoned value.
934 // 1&1 => 1; 0&1 => 0; p&1 => p;
935 // 1&0 => 0; 0&0 => 0; p&0 => 0;
936 // 1&p => p; 0&p => 0; p&p => p;
937 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
938 Value *S1 = getShadow(&I, 0);
939 Value *S2 = getShadow(&I, 1);
940 Value *V1 = I.getOperand(0);
941 Value *V2 = I.getOperand(1);
942 if (V1->getType() != S1->getType()) {
943 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
944 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
946 Value *S1S2 = IRB.CreateAnd(S1, S2);
947 Value *V1S2 = IRB.CreateAnd(V1, S2);
948 Value *S1V2 = IRB.CreateAnd(S1, V2);
949 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
950 setOriginForNaryOp(I);
953 void visitOr(BinaryOperator &I) {
955 // "Or" of 1 and a poisoned value results in unpoisoned value.
956 // 1|1 => 1; 0|1 => 1; p|1 => 1;
957 // 1|0 => 1; 0|0 => 0; p|0 => p;
958 // 1|p => 1; 0|p => p; p|p => p;
959 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
960 Value *S1 = getShadow(&I, 0);
961 Value *S2 = getShadow(&I, 1);
962 Value *V1 = IRB.CreateNot(I.getOperand(0));
963 Value *V2 = IRB.CreateNot(I.getOperand(1));
964 if (V1->getType() != S1->getType()) {
965 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
966 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
968 Value *S1S2 = IRB.CreateAnd(S1, S2);
969 Value *V1S2 = IRB.CreateAnd(V1, S2);
970 Value *S1V2 = IRB.CreateAnd(S1, V2);
971 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
972 setOriginForNaryOp(I);
975 /// \brief Default propagation of shadow and/or origin.
977 /// This class implements the general case of shadow propagation, used in all
978 /// cases where we don't know and/or don't care about what the operation
979 /// actually does. It converts all input shadow values to a common type
980 /// (extending or truncating as necessary), and bitwise OR's them.
982 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
983 /// fully initialized), and less prone to false positives.
985 /// This class also implements the general case of origin propagation. For a
986 /// Nary operation, result origin is set to the origin of an argument that is
987 /// not entirely initialized. If there is more than one such arguments, the
988 /// rightmost of them is picked. It does not matter which one is picked if all
989 /// arguments are initialized.
990 template <bool CombineShadow>
995 MemorySanitizerVisitor *MSV;
998 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
999 Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
1001 /// \brief Add a pair of shadow and origin values to the mix.
1002 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1003 if (CombineShadow) {
1008 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1009 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1013 if (MSV->MS.TrackOrigins) {
1018 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1019 Value *Cond = IRB.CreateICmpNE(FlatShadow,
1020 MSV->getCleanShadow(FlatShadow));
1021 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1027 /// \brief Add an application value to the mix.
1028 Combiner &Add(Value *V) {
1029 Value *OpShadow = MSV->getShadow(V);
1030 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
1031 return Add(OpShadow, OpOrigin);
1034 /// \brief Set the current combined values as the given instruction's shadow
1036 void Done(Instruction *I) {
1037 if (CombineShadow) {
1039 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1040 MSV->setShadow(I, Shadow);
1042 if (MSV->MS.TrackOrigins) {
1044 MSV->setOrigin(I, Origin);
1049 typedef Combiner<true> ShadowAndOriginCombiner;
1050 typedef Combiner<false> OriginCombiner;
1052 /// \brief Propagate origin for arbitrary operation.
1053 void setOriginForNaryOp(Instruction &I) {
1054 if (!MS.TrackOrigins) return;
1055 IRBuilder<> IRB(&I);
1056 OriginCombiner OC(this, IRB);
1057 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1062 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1063 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1064 "Vector of pointers is not a valid shadow type");
1065 return Ty->isVectorTy() ?
1066 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1067 Ty->getPrimitiveSizeInBits();
1070 /// \brief Cast between two shadow types, extending or truncating as
1072 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) {
1073 Type *srcTy = V->getType();
1074 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1075 return IRB.CreateIntCast(V, dstTy, false);
1076 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1077 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1078 return IRB.CreateIntCast(V, dstTy, false);
1079 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1080 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1081 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1083 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), false);
1084 return IRB.CreateBitCast(V2, dstTy);
1085 // TODO: handle struct types.
1088 /// \brief Propagate shadow for arbitrary operation.
1089 void handleShadowOr(Instruction &I) {
1090 IRBuilder<> IRB(&I);
1091 ShadowAndOriginCombiner SC(this, IRB);
1092 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1097 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1098 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1099 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1100 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1101 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1102 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1103 void visitMul(BinaryOperator &I) { handleShadowOr(I); }
1105 void handleDiv(Instruction &I) {
1106 IRBuilder<> IRB(&I);
1107 // Strict on the second argument.
1108 insertCheck(I.getOperand(1), &I);
1109 setShadow(&I, getShadow(&I, 0));
1110 setOrigin(&I, getOrigin(&I, 0));
1113 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1114 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1115 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1116 void visitURem(BinaryOperator &I) { handleDiv(I); }
1117 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1118 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1120 /// \brief Instrument == and != comparisons.
1122 /// Sometimes the comparison result is known even if some of the bits of the
1123 /// arguments are not.
1124 void handleEqualityComparison(ICmpInst &I) {
1125 IRBuilder<> IRB(&I);
1126 Value *A = I.getOperand(0);
1127 Value *B = I.getOperand(1);
1128 Value *Sa = getShadow(A);
1129 Value *Sb = getShadow(B);
1131 // Get rid of pointers and vectors of pointers.
1132 // For ints (and vectors of ints), types of A and Sa match,
1133 // and this is a no-op.
1134 A = IRB.CreatePointerCast(A, Sa->getType());
1135 B = IRB.CreatePointerCast(B, Sb->getType());
1137 // A == B <==> (C = A^B) == 0
1138 // A != B <==> (C = A^B) != 0
1140 Value *C = IRB.CreateXor(A, B);
1141 Value *Sc = IRB.CreateOr(Sa, Sb);
1142 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1143 // Result is defined if one of the following is true
1144 // * there is a defined 1 bit in C
1145 // * C is fully defined
1146 // Si = !(C & ~Sc) && Sc
1147 Value *Zero = Constant::getNullValue(Sc->getType());
1148 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1150 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1152 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1153 Si->setName("_msprop_icmp");
1155 setOriginForNaryOp(I);
1158 /// \brief Instrument signed relational comparisons.
1160 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1161 /// propagating the highest bit of the shadow. Everything else is delegated
1162 /// to handleShadowOr().
1163 void handleSignedRelationalComparison(ICmpInst &I) {
1164 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1165 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1167 CmpInst::Predicate pre = I.getPredicate();
1168 if (constOp0 && constOp0->isNullValue() &&
1169 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1170 op = I.getOperand(1);
1171 } else if (constOp1 && constOp1->isNullValue() &&
1172 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1173 op = I.getOperand(0);
1176 IRBuilder<> IRB(&I);
1178 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1179 setShadow(&I, Shadow);
1180 setOrigin(&I, getOrigin(op));
1186 void visitICmpInst(ICmpInst &I) {
1187 if (ClHandleICmp && I.isEquality())
1188 handleEqualityComparison(I);
1189 else if (ClHandleICmp && I.isSigned() && I.isRelational())
1190 handleSignedRelationalComparison(I);
1195 void visitFCmpInst(FCmpInst &I) {
1199 void handleShift(BinaryOperator &I) {
1200 IRBuilder<> IRB(&I);
1201 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1202 // Otherwise perform the same shift on S1.
1203 Value *S1 = getShadow(&I, 0);
1204 Value *S2 = getShadow(&I, 1);
1205 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1207 Value *V2 = I.getOperand(1);
1208 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1209 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1210 setOriginForNaryOp(I);
1213 void visitShl(BinaryOperator &I) { handleShift(I); }
1214 void visitAShr(BinaryOperator &I) { handleShift(I); }
1215 void visitLShr(BinaryOperator &I) { handleShift(I); }
1217 /// \brief Instrument llvm.memmove
1219 /// At this point we don't know if llvm.memmove will be inlined or not.
1220 /// If we don't instrument it and it gets inlined,
1221 /// our interceptor will not kick in and we will lose the memmove.
1222 /// If we instrument the call here, but it does not get inlined,
1223 /// we will memove the shadow twice: which is bad in case
1224 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1226 /// Similar situation exists for memcpy and memset.
1227 void visitMemMoveInst(MemMoveInst &I) {
1228 IRBuilder<> IRB(&I);
1231 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1232 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1233 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1234 I.eraseFromParent();
1237 // Similar to memmove: avoid copying shadow twice.
1238 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1239 // FIXME: consider doing manual inline for small constant sizes and proper
1241 void visitMemCpyInst(MemCpyInst &I) {
1242 IRBuilder<> IRB(&I);
1245 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1246 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1247 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1248 I.eraseFromParent();
1252 void visitMemSetInst(MemSetInst &I) {
1253 IRBuilder<> IRB(&I);
1256 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1257 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1258 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1259 I.eraseFromParent();
1262 void visitVAStartInst(VAStartInst &I) {
1263 VAHelper->visitVAStartInst(I);
1266 void visitVACopyInst(VACopyInst &I) {
1267 VAHelper->visitVACopyInst(I);
1270 enum IntrinsicKind {
1271 IK_DoesNotAccessMemory,
1276 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1277 const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1278 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1279 const int OnlyReadsMemory = IK_OnlyReadsMemory;
1280 const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1281 const int UnknownModRefBehavior = IK_WritesMemory;
1282 #define GET_INTRINSIC_MODREF_BEHAVIOR
1283 #define ModRefBehavior IntrinsicKind
1284 #include "llvm/IR/Intrinsics.gen"
1285 #undef ModRefBehavior
1286 #undef GET_INTRINSIC_MODREF_BEHAVIOR
1289 /// \brief Handle vector store-like intrinsics.
1291 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1292 /// has 1 pointer argument and 1 vector argument, returns void.
1293 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1294 IRBuilder<> IRB(&I);
1295 Value* Addr = I.getArgOperand(0);
1296 Value *Shadow = getShadow(&I, 1);
1297 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1299 // We don't know the pointer alignment (could be unaligned SSE store!).
1300 // Have to assume to worst case.
1301 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1303 if (ClCheckAccessAddress)
1304 insertCheck(Addr, &I);
1306 // FIXME: use ClStoreCleanOrigin
1307 // FIXME: factor out common code from materializeStores
1308 if (MS.TrackOrigins)
1309 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
1313 /// \brief Handle vector load-like intrinsics.
1315 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1316 /// has 1 pointer argument, returns a vector.
1317 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1318 IRBuilder<> IRB(&I);
1319 Value *Addr = I.getArgOperand(0);
1321 Type *ShadowTy = getShadowTy(&I);
1322 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1323 // We don't know the pointer alignment (could be unaligned SSE load!).
1324 // Have to assume to worst case.
1325 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1327 if (ClCheckAccessAddress)
1328 insertCheck(Addr, &I);
1330 if (MS.TrackOrigins)
1331 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
1335 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1337 /// Instrument intrinsics with any number of arguments of the same type,
1338 /// equal to the return type. The type should be simple (no aggregates or
1339 /// pointers; vectors are fine).
1340 /// Caller guarantees that this intrinsic does not access memory.
1341 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1342 Type *RetTy = I.getType();
1343 if (!(RetTy->isIntOrIntVectorTy() ||
1344 RetTy->isFPOrFPVectorTy() ||
1345 RetTy->isX86_MMXTy()))
1348 unsigned NumArgOperands = I.getNumArgOperands();
1350 for (unsigned i = 0; i < NumArgOperands; ++i) {
1351 Type *Ty = I.getArgOperand(i)->getType();
1356 IRBuilder<> IRB(&I);
1357 ShadowAndOriginCombiner SC(this, IRB);
1358 for (unsigned i = 0; i < NumArgOperands; ++i)
1359 SC.Add(I.getArgOperand(i));
1365 /// \brief Heuristically instrument unknown intrinsics.
1367 /// The main purpose of this code is to do something reasonable with all
1368 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1369 /// We recognize several classes of intrinsics by their argument types and
1370 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1371 /// sure that we know what the intrinsic does.
1373 /// We special-case intrinsics where this approach fails. See llvm.bswap
1374 /// handling as an example of that.
1375 bool handleUnknownIntrinsic(IntrinsicInst &I) {
1376 unsigned NumArgOperands = I.getNumArgOperands();
1377 if (NumArgOperands == 0)
1380 Intrinsic::ID iid = I.getIntrinsicID();
1381 IntrinsicKind IK = getIntrinsicKind(iid);
1382 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1383 bool WritesMemory = IK == IK_WritesMemory;
1384 assert(!(OnlyReadsMemory && WritesMemory));
1386 if (NumArgOperands == 2 &&
1387 I.getArgOperand(0)->getType()->isPointerTy() &&
1388 I.getArgOperand(1)->getType()->isVectorTy() &&
1389 I.getType()->isVoidTy() &&
1391 // This looks like a vector store.
1392 return handleVectorStoreIntrinsic(I);
1395 if (NumArgOperands == 1 &&
1396 I.getArgOperand(0)->getType()->isPointerTy() &&
1397 I.getType()->isVectorTy() &&
1399 // This looks like a vector load.
1400 return handleVectorLoadIntrinsic(I);
1403 if (!OnlyReadsMemory && !WritesMemory)
1404 if (maybeHandleSimpleNomemIntrinsic(I))
1407 // FIXME: detect and handle SSE maskstore/maskload
1411 void handleBswap(IntrinsicInst &I) {
1412 IRBuilder<> IRB(&I);
1413 Value *Op = I.getArgOperand(0);
1414 Type *OpType = Op->getType();
1415 Function *BswapFunc = Intrinsic::getDeclaration(
1416 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
1417 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1418 setOrigin(&I, getOrigin(Op));
1421 void visitIntrinsicInst(IntrinsicInst &I) {
1422 switch (I.getIntrinsicID()) {
1423 case llvm::Intrinsic::bswap:
1427 if (!handleUnknownIntrinsic(I))
1428 visitInstruction(I);
1433 void visitCallSite(CallSite CS) {
1434 Instruction &I = *CS.getInstruction();
1435 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
1437 CallInst *Call = cast<CallInst>(&I);
1439 // For inline asm, do the usual thing: check argument shadow and mark all
1440 // outputs as clean. Note that any side effects of the inline asm that are
1441 // not immediately visible in its constraints are not handled.
1442 if (Call->isInlineAsm()) {
1443 visitInstruction(I);
1447 // Allow only tail calls with the same types, otherwise
1448 // we may have a false positive: shadow for a non-void RetVal
1449 // will get propagated to a void RetVal.
1450 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
1451 Call->setTailCall(false);
1453 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
1455 // We are going to insert code that relies on the fact that the callee
1456 // will become a non-readonly function after it is instrumented by us. To
1457 // prevent this code from being optimized out, mark that function
1458 // non-readonly in advance.
1459 if (Function *Func = Call->getCalledFunction()) {
1460 // Clear out readonly/readnone attributes.
1462 B.addAttribute(Attribute::ReadOnly)
1463 .addAttribute(Attribute::ReadNone);
1464 Func->removeAttributes(AttributeSet::FunctionIndex,
1465 AttributeSet::get(Func->getContext(),
1466 AttributeSet::FunctionIndex,
1470 IRBuilder<> IRB(&I);
1471 unsigned ArgOffset = 0;
1472 DEBUG(dbgs() << " CallSite: " << I << "\n");
1473 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
1474 ArgIt != End; ++ArgIt) {
1476 unsigned i = ArgIt - CS.arg_begin();
1477 if (!A->getType()->isSized()) {
1478 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
1483 // Compute the Shadow for arg even if it is ByVal, because
1484 // in that case getShadow() will copy the actual arg shadow to
1485 // __msan_param_tls.
1486 Value *ArgShadow = getShadow(A);
1487 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
1488 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
1489 " Shadow: " << *ArgShadow << "\n");
1490 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
1491 assert(A->getType()->isPointerTy() &&
1492 "ByVal argument is not a pointer!");
1493 Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
1494 unsigned Alignment = CS.getParamAlignment(i + 1);
1495 Store = IRB.CreateMemCpy(ArgShadowBase,
1496 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
1499 Size = MS.TD->getTypeAllocSize(A->getType());
1500 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
1501 kShadowTLSAlignment);
1503 if (MS.TrackOrigins)
1504 IRB.CreateStore(getOrigin(A),
1505 getOriginPtrForArgument(A, IRB, ArgOffset));
1506 assert(Size != 0 && Store != 0);
1507 DEBUG(dbgs() << " Param:" << *Store << "\n");
1508 ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
1510 DEBUG(dbgs() << " done with call args\n");
1513 cast<FunctionType>(CS.getCalledValue()->getType()-> getContainedType(0));
1514 if (FT->isVarArg()) {
1515 VAHelper->visitCallSite(CS, IRB);
1518 // Now, get the shadow for the RetVal.
1519 if (!I.getType()->isSized()) return;
1520 IRBuilder<> IRBBefore(&I);
1521 // Untill we have full dynamic coverage, make sure the retval shadow is 0.
1522 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
1523 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
1524 Instruction *NextInsn = 0;
1526 NextInsn = I.getNextNode();
1528 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
1529 if (!NormalDest->getSinglePredecessor()) {
1530 // FIXME: this case is tricky, so we are just conservative here.
1531 // Perhaps we need to split the edge between this BB and NormalDest,
1532 // but a naive attempt to use SplitEdge leads to a crash.
1533 setShadow(&I, getCleanShadow(&I));
1534 setOrigin(&I, getCleanOrigin());
1537 NextInsn = NormalDest->getFirstInsertionPt();
1539 "Could not find insertion point for retval shadow load");
1541 IRBuilder<> IRBAfter(NextInsn);
1542 Value *RetvalShadow =
1543 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
1544 kShadowTLSAlignment, "_msret");
1545 setShadow(&I, RetvalShadow);
1546 if (MS.TrackOrigins)
1547 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
1550 void visitReturnInst(ReturnInst &I) {
1551 IRBuilder<> IRB(&I);
1552 if (Value *RetVal = I.getReturnValue()) {
1553 // Set the shadow for the RetVal.
1554 Value *Shadow = getShadow(RetVal);
1555 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
1556 DEBUG(dbgs() << "Return: " << *Shadow << "\n" << *ShadowPtr << "\n");
1557 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
1558 if (MS.TrackOrigins)
1559 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
1563 void visitPHINode(PHINode &I) {
1564 IRBuilder<> IRB(&I);
1565 ShadowPHINodes.push_back(&I);
1566 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
1568 if (MS.TrackOrigins)
1569 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
1573 void visitAllocaInst(AllocaInst &I) {
1574 setShadow(&I, getCleanShadow(&I));
1575 if (!ClPoisonStack) return;
1576 IRBuilder<> IRB(I.getNextNode());
1577 uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
1578 if (ClPoisonStackWithCall) {
1579 IRB.CreateCall2(MS.MsanPoisonStackFn,
1580 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1581 ConstantInt::get(MS.IntptrTy, Size));
1583 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
1584 IRB.CreateMemSet(ShadowBase, IRB.getInt8(ClPoisonStackPattern),
1585 Size, I.getAlignment());
1588 if (MS.TrackOrigins) {
1589 setOrigin(&I, getCleanOrigin());
1590 SmallString<2048> StackDescriptionStorage;
1591 raw_svector_ostream StackDescription(StackDescriptionStorage);
1592 // We create a string with a description of the stack allocation and
1593 // pass it into __msan_set_alloca_origin.
1594 // It will be printed by the run-time if stack-originated UMR is found.
1595 // The first 4 bytes of the string are set to '----' and will be replaced
1596 // by __msan_va_arg_overflow_size_tls at the first call.
1597 StackDescription << "----" << I.getName() << "@" << F.getName();
1599 createPrivateNonConstGlobalForString(*F.getParent(),
1600 StackDescription.str());
1601 IRB.CreateCall3(MS.MsanSetAllocaOriginFn,
1602 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1603 ConstantInt::get(MS.IntptrTy, Size),
1604 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()));
1608 void visitSelectInst(SelectInst& I) {
1609 IRBuilder<> IRB(&I);
1610 setShadow(&I, IRB.CreateSelect(I.getCondition(),
1611 getShadow(I.getTrueValue()), getShadow(I.getFalseValue()),
1613 if (MS.TrackOrigins) {
1614 // Origins are always i32, so any vector conditions must be flattened.
1615 // FIXME: consider tracking vector origins for app vectors?
1616 Value *Cond = I.getCondition();
1617 if (Cond->getType()->isVectorTy()) {
1618 Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB);
1619 Cond = IRB.CreateICmpNE(ConvertedShadow,
1620 getCleanShadow(ConvertedShadow), "_mso_select");
1622 setOrigin(&I, IRB.CreateSelect(Cond,
1623 getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue())));
1627 void visitLandingPadInst(LandingPadInst &I) {
1629 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
1630 setShadow(&I, getCleanShadow(&I));
1631 setOrigin(&I, getCleanOrigin());
1634 void visitGetElementPtrInst(GetElementPtrInst &I) {
1638 void visitExtractValueInst(ExtractValueInst &I) {
1639 IRBuilder<> IRB(&I);
1640 Value *Agg = I.getAggregateOperand();
1641 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
1642 Value *AggShadow = getShadow(Agg);
1643 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
1644 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
1645 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
1646 setShadow(&I, ResShadow);
1647 setOrigin(&I, getCleanOrigin());
1650 void visitInsertValueInst(InsertValueInst &I) {
1651 IRBuilder<> IRB(&I);
1652 DEBUG(dbgs() << "InsertValue: " << I << "\n");
1653 Value *AggShadow = getShadow(I.getAggregateOperand());
1654 Value *InsShadow = getShadow(I.getInsertedValueOperand());
1655 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
1656 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
1657 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
1658 DEBUG(dbgs() << " Res: " << *Res << "\n");
1660 setOrigin(&I, getCleanOrigin());
1663 void dumpInst(Instruction &I) {
1664 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1665 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
1667 errs() << "ZZZ " << I.getOpcodeName() << "\n";
1669 errs() << "QQQ " << I << "\n";
1672 void visitResumeInst(ResumeInst &I) {
1673 DEBUG(dbgs() << "Resume: " << I << "\n");
1674 // Nothing to do here.
1677 void visitInstruction(Instruction &I) {
1678 // Everything else: stop propagating and check for poisoned shadow.
1679 if (ClDumpStrictInstructions)
1681 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
1682 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
1683 insertCheck(I.getOperand(i), &I);
1684 setShadow(&I, getCleanShadow(&I));
1685 setOrigin(&I, getCleanOrigin());
1689 /// \brief AMD64-specific implementation of VarArgHelper.
1690 struct VarArgAMD64Helper : public VarArgHelper {
1691 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
1692 // See a comment in visitCallSite for more details.
1693 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
1694 static const unsigned AMD64FpEndOffset = 176;
1697 MemorySanitizer &MS;
1698 MemorySanitizerVisitor &MSV;
1699 Value *VAArgTLSCopy;
1700 Value *VAArgOverflowSize;
1702 SmallVector<CallInst*, 16> VAStartInstrumentationList;
1704 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
1705 MemorySanitizerVisitor &MSV)
1706 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
1708 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
1710 ArgKind classifyArgument(Value* arg) {
1711 // A very rough approximation of X86_64 argument classification rules.
1712 Type *T = arg->getType();
1713 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
1714 return AK_FloatingPoint;
1715 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
1716 return AK_GeneralPurpose;
1717 if (T->isPointerTy())
1718 return AK_GeneralPurpose;
1722 // For VarArg functions, store the argument shadow in an ABI-specific format
1723 // that corresponds to va_list layout.
1724 // We do this because Clang lowers va_arg in the frontend, and this pass
1725 // only sees the low level code that deals with va_list internals.
1726 // A much easier alternative (provided that Clang emits va_arg instructions)
1727 // would have been to associate each live instance of va_list with a copy of
1728 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
1730 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {
1731 unsigned GpOffset = 0;
1732 unsigned FpOffset = AMD64GpEndOffset;
1733 unsigned OverflowOffset = AMD64FpEndOffset;
1734 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
1735 ArgIt != End; ++ArgIt) {
1737 ArgKind AK = classifyArgument(A);
1738 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
1740 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
1744 case AK_GeneralPurpose:
1745 Base = getShadowPtrForVAArgument(A, IRB, GpOffset);
1748 case AK_FloatingPoint:
1749 Base = getShadowPtrForVAArgument(A, IRB, FpOffset);
1753 uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
1754 Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
1755 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
1757 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
1759 Constant *OverflowSize =
1760 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
1761 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
1764 /// \brief Compute the shadow address for a given va_arg.
1765 Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB,
1767 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
1768 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1769 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0),
1773 void visitVAStartInst(VAStartInst &I) {
1774 IRBuilder<> IRB(&I);
1775 VAStartInstrumentationList.push_back(&I);
1776 Value *VAListTag = I.getArgOperand(0);
1777 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
1779 // Unpoison the whole __va_list_tag.
1780 // FIXME: magic ABI constants.
1781 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
1782 /* size */24, /* alignment */8, false);
1785 void visitVACopyInst(VACopyInst &I) {
1786 IRBuilder<> IRB(&I);
1787 Value *VAListTag = I.getArgOperand(0);
1788 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
1790 // Unpoison the whole __va_list_tag.
1791 // FIXME: magic ABI constants.
1792 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
1793 /* size */24, /* alignment */8, false);
1796 void finalizeInstrumentation() {
1797 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
1798 "finalizeInstrumentation called twice");
1799 if (!VAStartInstrumentationList.empty()) {
1800 // If there is a va_start in this function, make a backup copy of
1801 // va_arg_tls somewhere in the function entry block.
1802 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
1803 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
1805 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
1807 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
1808 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
1811 // Instrument va_start.
1812 // Copy va_list shadow from the backup copy of the TLS contents.
1813 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
1814 CallInst *OrigInst = VAStartInstrumentationList[i];
1815 IRBuilder<> IRB(OrigInst->getNextNode());
1816 Value *VAListTag = OrigInst->getArgOperand(0);
1818 Value *RegSaveAreaPtrPtr =
1820 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
1821 ConstantInt::get(MS.IntptrTy, 16)),
1822 Type::getInt64PtrTy(*MS.C));
1823 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
1824 Value *RegSaveAreaShadowPtr =
1825 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
1826 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
1827 AMD64FpEndOffset, 16);
1829 Value *OverflowArgAreaPtrPtr =
1831 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
1832 ConstantInt::get(MS.IntptrTy, 8)),
1833 Type::getInt64PtrTy(*MS.C));
1834 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
1835 Value *OverflowArgAreaShadowPtr =
1836 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
1838 getShadowPtrForVAArgument(VAArgTLSCopy, IRB, AMD64FpEndOffset);
1839 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
1844 VarArgHelper* CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1845 MemorySanitizerVisitor &Visitor) {
1846 return new VarArgAMD64Helper(Func, Msan, Visitor);
1851 bool MemorySanitizer::runOnFunction(Function &F) {
1852 MemorySanitizerVisitor Visitor(F, *this);
1854 // Clear out readonly/readnone attributes.
1856 B.addAttribute(Attribute::ReadOnly)
1857 .addAttribute(Attribute::ReadNone);
1858 F.removeAttributes(AttributeSet::FunctionIndex,
1859 AttributeSet::get(F.getContext(),
1860 AttributeSet::FunctionIndex, B));
1862 return Visitor.runOnFunction();