1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// Status: early prototype.
15 /// The algorithm of the tool is similar to Memcheck
16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
17 /// byte of the application memory, poison the shadow of the malloc-ed
18 /// or alloca-ed memory, load the shadow bits on every memory read,
19 /// propagate the shadow bits through some of the arithmetic
20 /// instruction (including MOV), store the shadow bits on every memory
21 /// write, report a bug on some other instructions (e.g. JMP) if the
22 /// associated shadow is poisoned.
24 /// But there are differences too. The first and the major one:
25 /// compiler instrumentation instead of binary instrumentation. This
26 /// gives us much better register allocation, possible compiler
27 /// optimizations and a fast start-up. But this brings the major issue
28 /// as well: msan needs to see all program events, including system
29 /// calls and reads/writes in system libraries, so we either need to
30 /// compile *everything* with msan or use a binary translation
31 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
32 /// Another difference from Memcheck is that we use 8 shadow bits per
33 /// byte of application memory and use a direct shadow mapping. This
34 /// greatly simplifies the instrumentation code and avoids races on
35 /// shadow updates (Memcheck is single-threaded so races are not a
36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
37 /// path storage that uses 8 bits per byte).
39 /// The default value of shadow is 0, which means "clean" (not poisoned).
41 /// Every module initializer should call __msan_init to ensure that the
42 /// shadow memory is ready. On error, __msan_warning is called. Since
43 /// parameters and return values may be passed via registers, we have a
44 /// specialized thread-local shadow for return values
45 /// (__msan_retval_tls) and parameters (__msan_param_tls).
49 /// MemorySanitizer can track origins (allocation points) of all uninitialized
50 /// values. This behavior is controlled with a flag (msan-track-origins) and is
51 /// disabled by default.
53 /// Origins are 4-byte values created and interpreted by the runtime library.
54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
55 /// of application memory. Propagation of origins is basically a bunch of
56 /// "select" instructions that pick the origin of a dirty argument, if an
57 /// instruction has one.
59 /// Every 4 aligned, consecutive bytes of application memory have one origin
60 /// value associated with them. If these bytes contain uninitialized data
61 /// coming from 2 different allocations, the last store wins. Because of this,
62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
65 /// Origins are meaningless for fully initialized values, so MemorySanitizer
66 /// avoids storing origin to memory when a fully initialized value is stored.
67 /// This way it avoids needless overwritting origin of the 4-byte region on
68 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
72 /// Ideally, every atomic store of application value should update the
73 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
74 /// of two disjoint locations can not be done without severe slowdown.
76 /// Therefore, we implement an approximation that may err on the safe side.
77 /// In this implementation, every atomically accessed location in the program
78 /// may only change from (partially) uninitialized to fully initialized, but
79 /// not the other way around. We load the shadow _after_ the application load,
80 /// and we store the shadow _before_ the app store. Also, we always store clean
81 /// shadow (if the application store is atomic). This way, if the store-load
82 /// pair constitutes a happens-before arc, shadow store and load are correctly
83 /// ordered such that the load will get either the value that was stored, or
84 /// some later value (which is always clean).
86 /// This does not work very well with Compare-And-Swap (CAS) and
87 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
88 /// must store the new shadow before the app operation, and load the shadow
89 /// after the app operation. Computers don't work this way. Current
90 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
91 /// value. It implements the store part as a simple atomic store by storing a
94 //===----------------------------------------------------------------------===//
96 #define DEBUG_TYPE "msan"
98 #include "llvm/Transforms/Instrumentation.h"
99 #include "llvm/ADT/DepthFirstIterator.h"
100 #include "llvm/ADT/SmallString.h"
101 #include "llvm/ADT/SmallVector.h"
102 #include "llvm/ADT/Triple.h"
103 #include "llvm/IR/DataLayout.h"
104 #include "llvm/IR/Function.h"
105 #include "llvm/IR/IRBuilder.h"
106 #include "llvm/IR/InlineAsm.h"
107 #include "llvm/IR/InstVisitor.h"
108 #include "llvm/IR/IntrinsicInst.h"
109 #include "llvm/IR/LLVMContext.h"
110 #include "llvm/IR/MDBuilder.h"
111 #include "llvm/IR/Module.h"
112 #include "llvm/IR/Type.h"
113 #include "llvm/IR/ValueMap.h"
114 #include "llvm/Support/CommandLine.h"
115 #include "llvm/Support/Compiler.h"
116 #include "llvm/Support/Debug.h"
117 #include "llvm/Support/raw_ostream.h"
118 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
119 #include "llvm/Transforms/Utils/Local.h"
120 #include "llvm/Transforms/Utils/ModuleUtils.h"
121 #include "llvm/Transforms/Utils/SpecialCaseList.h"
123 using namespace llvm;
125 static const uint64_t kShadowMask32 = 1ULL << 31;
126 static const uint64_t kShadowMask64 = 1ULL << 46;
127 static const uint64_t kOriginOffset32 = 1ULL << 30;
128 static const uint64_t kOriginOffset64 = 1ULL << 45;
129 static const unsigned kMinOriginAlignment = 4;
130 static const unsigned kShadowTLSAlignment = 8;
132 /// \brief Track origins of uninitialized values.
134 /// Adds a section to MemorySanitizer report that points to the allocation
135 /// (stack or heap) the uninitialized bits came from originally.
136 static cl::opt<bool> ClTrackOrigins("msan-track-origins",
137 cl::desc("Track origins (allocation sites) of poisoned memory"),
138 cl::Hidden, cl::init(false));
139 static cl::opt<bool> ClKeepGoing("msan-keep-going",
140 cl::desc("keep going after reporting a UMR"),
141 cl::Hidden, cl::init(false));
142 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
143 cl::desc("poison uninitialized stack variables"),
144 cl::Hidden, cl::init(true));
145 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
146 cl::desc("poison uninitialized stack variables with a call"),
147 cl::Hidden, cl::init(false));
148 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
149 cl::desc("poison uninitialized stack variables with the given patter"),
150 cl::Hidden, cl::init(0xff));
151 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
152 cl::desc("poison undef temps"),
153 cl::Hidden, cl::init(true));
155 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
156 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
157 cl::Hidden, cl::init(true));
159 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
160 cl::desc("exact handling of relational integer ICmp"),
161 cl::Hidden, cl::init(false));
163 static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin",
164 cl::desc("store origin for clean (fully initialized) values"),
165 cl::Hidden, cl::init(false));
167 // This flag controls whether we check the shadow of the address
168 // operand of load or store. Such bugs are very rare, since load from
169 // a garbage address typically results in SEGV, but still happen
170 // (e.g. only lower bits of address are garbage, or the access happens
171 // early at program startup where malloc-ed memory is more likely to
172 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
173 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
174 cl::desc("report accesses through a pointer which has poisoned shadow"),
175 cl::Hidden, cl::init(true));
177 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
178 cl::desc("print out instructions with default strict semantics"),
179 cl::Hidden, cl::init(false));
181 static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
182 cl::desc("File containing the list of functions where MemorySanitizer "
183 "should not report bugs"), cl::Hidden);
185 // Experimental. Wraps all indirect calls in the instrumented code with
186 // a call to the given function. This is needed to assist the dynamic
187 // helper tool (MSanDR) to regain control on transition between instrumented and
188 // non-instrumented code.
189 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls",
190 cl::desc("Wrap indirect calls with a given function"),
193 static cl::opt<bool> ClWrapIndirectCallsFast("msan-wrap-indirect-calls-fast",
194 cl::desc("Do not wrap indirect calls with target in the same module"),
195 cl::Hidden, cl::init(true));
199 /// \brief An instrumentation pass implementing detection of uninitialized
202 /// MemorySanitizer: instrument the code in module to find
203 /// uninitialized reads.
204 class MemorySanitizer : public FunctionPass {
206 MemorySanitizer(bool TrackOrigins = false,
207 StringRef BlacklistFile = StringRef())
209 TrackOrigins(TrackOrigins || ClTrackOrigins),
212 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
213 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
214 const char *getPassName() const override { return "MemorySanitizer"; }
215 bool runOnFunction(Function &F) override;
216 bool doInitialization(Module &M) override;
217 static char ID; // Pass identification, replacement for typeid.
220 void initializeCallbacks(Module &M);
222 /// \brief Track origins (allocation points) of uninitialized values.
225 const DataLayout *DL;
229 /// \brief Thread-local shadow storage for function parameters.
230 GlobalVariable *ParamTLS;
231 /// \brief Thread-local origin storage for function parameters.
232 GlobalVariable *ParamOriginTLS;
233 /// \brief Thread-local shadow storage for function return value.
234 GlobalVariable *RetvalTLS;
235 /// \brief Thread-local origin storage for function return value.
236 GlobalVariable *RetvalOriginTLS;
237 /// \brief Thread-local shadow storage for in-register va_arg function
238 /// parameters (x86_64-specific).
239 GlobalVariable *VAArgTLS;
240 /// \brief Thread-local shadow storage for va_arg overflow area
241 /// (x86_64-specific).
242 GlobalVariable *VAArgOverflowSizeTLS;
243 /// \brief Thread-local space used to pass origin value to the UMR reporting
245 GlobalVariable *OriginTLS;
247 GlobalVariable *MsandrModuleStart;
248 GlobalVariable *MsandrModuleEnd;
250 /// \brief The run-time callback to print a warning.
252 /// \brief Run-time helper that copies origin info for a memory range.
253 Value *MsanCopyOriginFn;
254 /// \brief Run-time helper that generates a new origin value for a stack
256 Value *MsanSetAllocaOrigin4Fn;
257 /// \brief Run-time helper that poisons stack on function entry.
258 Value *MsanPoisonStackFn;
259 /// \brief MSan runtime replacements for memmove, memcpy and memset.
260 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
262 /// \brief Address mask used in application-to-shadow address calculation.
263 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
265 /// \brief Offset of the origin shadow from the "normal" shadow.
266 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
267 uint64_t OriginOffset;
268 /// \brief Branch weights for error reporting.
269 MDNode *ColdCallWeights;
270 /// \brief Branch weights for origin store.
271 MDNode *OriginStoreWeights;
272 /// \brief Path to blacklist file.
273 SmallString<64> BlacklistFile;
274 /// \brief The blacklist.
275 OwningPtr<SpecialCaseList> BL;
276 /// \brief An empty volatile inline asm that prevents callback merge.
279 bool WrapIndirectCalls;
280 /// \brief Run-time wrapper for indirect calls.
281 Value *IndirectCallWrapperFn;
282 // Argument and return type of IndirectCallWrapperFn: void (*f)(void).
283 Type *AnyFunctionPtrTy;
285 friend struct MemorySanitizerVisitor;
286 friend struct VarArgAMD64Helper;
290 char MemorySanitizer::ID = 0;
291 INITIALIZE_PASS(MemorySanitizer, "msan",
292 "MemorySanitizer: detects uninitialized reads.",
295 FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins,
296 StringRef BlacklistFile) {
297 return new MemorySanitizer(TrackOrigins, BlacklistFile);
300 /// \brief Create a non-const global initialized with the given string.
302 /// Creates a writable global for Str so that we can pass it to the
303 /// run-time lib. Runtime uses first 4 bytes of the string to store the
304 /// frame ID, so the string needs to be mutable.
305 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
307 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
308 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
309 GlobalValue::PrivateLinkage, StrConst, "");
313 /// \brief Insert extern declaration of runtime-provided functions and globals.
314 void MemorySanitizer::initializeCallbacks(Module &M) {
315 // Only do this once.
320 // Create the callback.
321 // FIXME: this function should have "Cold" calling conv,
322 // which is not yet implemented.
323 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
324 : "__msan_warning_noreturn";
325 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
327 MsanCopyOriginFn = M.getOrInsertFunction(
328 "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
329 IRB.getInt8PtrTy(), IntptrTy, NULL);
330 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
331 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
332 IRB.getInt8PtrTy(), IntptrTy, NULL);
333 MsanPoisonStackFn = M.getOrInsertFunction(
334 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
335 MemmoveFn = M.getOrInsertFunction(
336 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
337 IRB.getInt8PtrTy(), IntptrTy, NULL);
338 MemcpyFn = M.getOrInsertFunction(
339 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
341 MemsetFn = M.getOrInsertFunction(
342 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
346 RetvalTLS = new GlobalVariable(
347 M, ArrayType::get(IRB.getInt64Ty(), 8), false,
348 GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
349 GlobalVariable::InitialExecTLSModel);
350 RetvalOriginTLS = new GlobalVariable(
351 M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
352 "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
354 ParamTLS = new GlobalVariable(
355 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
356 GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
357 GlobalVariable::InitialExecTLSModel);
358 ParamOriginTLS = new GlobalVariable(
359 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
360 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
362 VAArgTLS = new GlobalVariable(
363 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
364 GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
365 GlobalVariable::InitialExecTLSModel);
366 VAArgOverflowSizeTLS = new GlobalVariable(
367 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
368 "__msan_va_arg_overflow_size_tls", 0,
369 GlobalVariable::InitialExecTLSModel);
370 OriginTLS = new GlobalVariable(
371 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
372 "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
374 // We insert an empty inline asm after __msan_report* to avoid callback merge.
375 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
376 StringRef(""), StringRef(""),
377 /*hasSideEffects=*/true);
379 if (WrapIndirectCalls) {
381 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false));
382 IndirectCallWrapperFn = M.getOrInsertFunction(
383 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
386 if (ClWrapIndirectCallsFast) {
387 MsandrModuleStart = new GlobalVariable(
388 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
389 0, "__executable_start");
390 MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility);
391 MsandrModuleEnd = new GlobalVariable(
392 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
394 MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility);
398 /// \brief Module-level initialization.
400 /// inserts a call to __msan_init to the module's constructor list.
401 bool MemorySanitizer::doInitialization(Module &M) {
402 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
405 DL = &DLP->getDataLayout();
407 BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
408 C = &(M.getContext());
409 unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);
412 ShadowMask = kShadowMask64;
413 OriginOffset = kOriginOffset64;
416 ShadowMask = kShadowMask32;
417 OriginOffset = kOriginOffset32;
420 report_fatal_error("unsupported pointer size");
425 IntptrTy = IRB.getIntPtrTy(DL);
426 OriginTy = IRB.getInt32Ty();
428 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
429 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
431 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
432 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
433 "__msan_init", IRB.getVoidTy(), NULL)), 0);
436 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
437 IRB.getInt32(TrackOrigins), "__msan_track_origins");
440 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
441 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
448 /// \brief A helper class that handles instrumentation of VarArg
449 /// functions on a particular platform.
451 /// Implementations are expected to insert the instrumentation
452 /// necessary to propagate argument shadow through VarArg function
453 /// calls. Visit* methods are called during an InstVisitor pass over
454 /// the function, and should avoid creating new basic blocks. A new
455 /// instance of this class is created for each instrumented function.
456 struct VarArgHelper {
457 /// \brief Visit a CallSite.
458 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
460 /// \brief Visit a va_start call.
461 virtual void visitVAStartInst(VAStartInst &I) = 0;
463 /// \brief Visit a va_copy call.
464 virtual void visitVACopyInst(VACopyInst &I) = 0;
466 /// \brief Finalize function instrumentation.
468 /// This method is called after visiting all interesting (see above)
469 /// instructions in a function.
470 virtual void finalizeInstrumentation() = 0;
472 virtual ~VarArgHelper() {}
475 struct MemorySanitizerVisitor;
478 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
479 MemorySanitizerVisitor &Visitor);
481 /// This class does all the work for a given function. Store and Load
482 /// instructions store and load corresponding shadow and origin
483 /// values. Most instructions propagate shadow from arguments to their
484 /// return values. Certain instructions (most importantly, BranchInst)
485 /// test their argument shadow and print reports (with a runtime call) if it's
487 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
490 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
491 ValueMap<Value*, Value*> ShadowMap, OriginMap;
492 OwningPtr<VarArgHelper> VAHelper;
494 // The following flags disable parts of MSan instrumentation based on
495 // blacklist contents and command-line options.
500 bool CheckReturnValue;
502 struct ShadowOriginAndInsertPoint {
505 Instruction *OrigIns;
506 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
507 : Shadow(S), Origin(O), OrigIns(I) { }
508 ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
510 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
511 SmallVector<Instruction*, 16> StoreList;
512 SmallVector<CallSite, 16> IndirectCallList;
514 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
515 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
516 bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute(
517 AttributeSet::FunctionIndex,
518 Attribute::SanitizeMemory);
519 InsertChecks = SanitizeFunction;
520 LoadShadow = SanitizeFunction;
521 PoisonStack = SanitizeFunction && ClPoisonStack;
522 PoisonUndef = SanitizeFunction && ClPoisonUndef;
523 // FIXME: Consider using SpecialCaseList to specify a list of functions that
524 // must always return fully initialized values. For now, we hardcode "main".
525 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
527 DEBUG(if (!InsertChecks)
528 dbgs() << "MemorySanitizer is not inserting checks into '"
529 << F.getName() << "'\n");
532 void materializeStores() {
533 for (size_t i = 0, n = StoreList.size(); i < n; i++) {
534 StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
537 Value *Val = I.getValueOperand();
538 Value *Addr = I.getPointerOperand();
539 Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
540 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
543 IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
544 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
547 if (ClCheckAccessAddress)
548 insertShadowCheck(Addr, &I);
551 I.setOrdering(addReleaseOrdering(I.getOrdering()));
553 if (MS.TrackOrigins) {
554 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
555 if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
556 IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB),
559 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
561 // TODO(eugenis): handle non-zero constant shadow by inserting an
562 // unconditional check (can not simply fail compilation as this could
563 // be in the dead code).
564 if (isa<Constant>(ConvertedShadow))
567 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
568 getCleanShadow(ConvertedShadow), "_mscmp");
569 Instruction *CheckTerm =
570 SplitBlockAndInsertIfThen(Cmp, &I, false, MS.OriginStoreWeights);
571 IRBuilder<> IRBNew(CheckTerm);
572 IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew),
579 void materializeChecks() {
580 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
581 Value *Shadow = InstrumentationList[i].Shadow;
582 Instruction *OrigIns = InstrumentationList[i].OrigIns;
583 IRBuilder<> IRB(OrigIns);
584 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
585 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
586 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
587 // See the comment in materializeStores().
588 if (isa<Constant>(ConvertedShadow))
590 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
591 getCleanShadow(ConvertedShadow), "_mscmp");
592 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
594 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights);
596 IRB.SetInsertPoint(CheckTerm);
597 if (MS.TrackOrigins) {
598 Value *Origin = InstrumentationList[i].Origin;
599 IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
602 CallInst *Call = IRB.CreateCall(MS.WarningFn);
603 Call->setDebugLoc(OrigIns->getDebugLoc());
604 IRB.CreateCall(MS.EmptyAsm);
605 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
607 DEBUG(dbgs() << "DONE:\n" << F);
610 void materializeIndirectCalls() {
611 for (size_t i = 0, n = IndirectCallList.size(); i < n; i++) {
612 CallSite CS = IndirectCallList[i];
613 Instruction *I = CS.getInstruction();
614 BasicBlock *B = I->getParent();
616 Value *Fn0 = CS.getCalledValue();
617 Value *Fn = IRB.CreateBitCast(Fn0, MS.AnyFunctionPtrTy);
619 if (ClWrapIndirectCallsFast) {
620 // Check that call target is inside this module limits.
622 IRB.CreateBitCast(MS.MsandrModuleStart, MS.AnyFunctionPtrTy);
623 Value *End = IRB.CreateBitCast(MS.MsandrModuleEnd, MS.AnyFunctionPtrTy);
625 Value *NotInThisModule = IRB.CreateOr(IRB.CreateICmpULT(Fn, Start),
626 IRB.CreateICmpUGE(Fn, End));
629 IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target");
631 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
632 NotInThisModule, NewFnPhi,
633 /* Unreachable */ false, MS.ColdCallWeights);
635 IRB.SetInsertPoint(CheckTerm);
636 // Slow path: call wrapper function to possibly transform the call
638 Value *NewFn = IRB.CreateBitCast(
639 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType());
641 NewFnPhi->addIncoming(Fn0, B);
642 NewFnPhi->addIncoming(NewFn, dyn_cast<Instruction>(NewFn)->getParent());
643 CS.setCalledFunction(NewFnPhi);
645 Value *NewFn = IRB.CreateBitCast(
646 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType());
647 CS.setCalledFunction(NewFn);
652 /// \brief Add MemorySanitizer instrumentation to a function.
653 bool runOnFunction() {
654 MS.initializeCallbacks(*F.getParent());
655 if (!MS.DL) return false;
657 // In the presence of unreachable blocks, we may see Phi nodes with
658 // incoming nodes from such blocks. Since InstVisitor skips unreachable
659 // blocks, such nodes will not have any shadow value associated with them.
660 // It's easier to remove unreachable blocks than deal with missing shadow.
661 removeUnreachableBlocks(F);
663 // Iterate all BBs in depth-first order and create shadow instructions
664 // for all instructions (where applicable).
665 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
666 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
667 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
668 BasicBlock *BB = *DI;
672 // Finalize PHI nodes.
673 for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
674 PHINode *PN = ShadowPHINodes[i];
675 PHINode *PNS = cast<PHINode>(getShadow(PN));
676 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
677 size_t NumValues = PN->getNumIncomingValues();
678 for (size_t v = 0; v < NumValues; v++) {
679 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
681 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
685 VAHelper->finalizeInstrumentation();
687 // Delayed instrumentation of StoreInst.
688 // This may add new checks to be inserted later.
691 // Insert shadow value checks.
694 // Wrap indirect calls.
695 materializeIndirectCalls();
700 /// \brief Compute the shadow type that corresponds to a given Value.
701 Type *getShadowTy(Value *V) {
702 return getShadowTy(V->getType());
705 /// \brief Compute the shadow type that corresponds to a given Type.
706 Type *getShadowTy(Type *OrigTy) {
707 if (!OrigTy->isSized()) {
710 // For integer type, shadow is the same as the original type.
711 // This may return weird-sized types like i1.
712 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
714 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
715 uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
716 return VectorType::get(IntegerType::get(*MS.C, EltSize),
717 VT->getNumElements());
719 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
720 SmallVector<Type*, 4> Elements;
721 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
722 Elements.push_back(getShadowTy(ST->getElementType(i)));
723 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
724 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
727 uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
728 return IntegerType::get(*MS.C, TypeSize);
731 /// \brief Flatten a vector type.
732 Type *getShadowTyNoVec(Type *ty) {
733 if (VectorType *vt = dyn_cast<VectorType>(ty))
734 return IntegerType::get(*MS.C, vt->getBitWidth());
738 /// \brief Convert a shadow value to it's flattened variant.
739 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
740 Type *Ty = V->getType();
741 Type *NoVecTy = getShadowTyNoVec(Ty);
742 if (Ty == NoVecTy) return V;
743 return IRB.CreateBitCast(V, NoVecTy);
746 /// \brief Compute the shadow address that corresponds to a given application
749 /// Shadow = Addr & ~ShadowMask.
750 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
753 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
754 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
755 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
758 /// \brief Compute the origin address that corresponds to a given application
761 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
762 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
764 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
765 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
767 IRB.CreateAdd(ShadowLong,
768 ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
770 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
771 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
774 /// \brief Compute the shadow address for a given function argument.
776 /// Shadow = ParamTLS+ArgOffset.
777 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
779 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
780 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
781 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
785 /// \brief Compute the origin address for a given function argument.
786 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
788 if (!MS.TrackOrigins) return 0;
789 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
790 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
791 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
795 /// \brief Compute the shadow address for a retval.
796 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
797 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
798 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
802 /// \brief Compute the origin address for a retval.
803 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
804 // We keep a single origin for the entire retval. Might be too optimistic.
805 return MS.RetvalOriginTLS;
808 /// \brief Set SV to be the shadow value for V.
809 void setShadow(Value *V, Value *SV) {
810 assert(!ShadowMap.count(V) && "Values may only have one shadow");
814 /// \brief Set Origin to be the origin value for V.
815 void setOrigin(Value *V, Value *Origin) {
816 if (!MS.TrackOrigins) return;
817 assert(!OriginMap.count(V) && "Values may only have one origin");
818 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
819 OriginMap[V] = Origin;
822 /// \brief Create a clean shadow value for a given value.
824 /// Clean shadow (all zeroes) means all bits of the value are defined
826 Constant *getCleanShadow(Value *V) {
827 Type *ShadowTy = getShadowTy(V);
830 return Constant::getNullValue(ShadowTy);
833 /// \brief Create a dirty shadow of a given shadow type.
834 Constant *getPoisonedShadow(Type *ShadowTy) {
836 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
837 return Constant::getAllOnesValue(ShadowTy);
838 StructType *ST = cast<StructType>(ShadowTy);
839 SmallVector<Constant *, 4> Vals;
840 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
841 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
842 return ConstantStruct::get(ST, Vals);
845 /// \brief Create a dirty shadow for a given value.
846 Constant *getPoisonedShadow(Value *V) {
847 Type *ShadowTy = getShadowTy(V);
850 return getPoisonedShadow(ShadowTy);
853 /// \brief Create a clean (zero) origin.
854 Value *getCleanOrigin() {
855 return Constant::getNullValue(MS.OriginTy);
858 /// \brief Get the shadow value for a given Value.
860 /// This function either returns the value set earlier with setShadow,
861 /// or extracts if from ParamTLS (for function arguments).
862 Value *getShadow(Value *V) {
863 if (Instruction *I = dyn_cast<Instruction>(V)) {
864 // For instructions the shadow is already stored in the map.
865 Value *Shadow = ShadowMap[V];
867 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
869 assert(Shadow && "No shadow for a value");
873 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
874 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
875 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
879 if (Argument *A = dyn_cast<Argument>(V)) {
880 // For arguments we compute the shadow on demand and store it in the map.
881 Value **ShadowPtr = &ShadowMap[V];
884 Function *F = A->getParent();
885 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
886 unsigned ArgOffset = 0;
887 for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
889 if (!AI->getType()->isSized()) {
890 DEBUG(dbgs() << "Arg is not sized\n");
893 unsigned Size = AI->hasByValAttr()
894 ? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType())
895 : MS.DL->getTypeAllocSize(AI->getType());
897 Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
898 if (AI->hasByValAttr()) {
899 // ByVal pointer itself has clean shadow. We copy the actual
900 // argument shadow to the underlying memory.
901 // Figure out maximal valid memcpy alignment.
902 unsigned ArgAlign = AI->getParamAlignment();
904 Type *EltType = A->getType()->getPointerElementType();
905 ArgAlign = MS.DL->getABITypeAlignment(EltType);
907 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
908 Value *Cpy = EntryIRB.CreateMemCpy(
909 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
911 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
913 *ShadowPtr = getCleanShadow(V);
915 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
917 DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
918 **ShadowPtr << "\n");
919 if (MS.TrackOrigins) {
920 Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
921 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
924 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment);
926 assert(*ShadowPtr && "Could not find shadow for an argument");
929 // For everything else the shadow is zero.
930 return getCleanShadow(V);
933 /// \brief Get the shadow for i-th argument of the instruction I.
934 Value *getShadow(Instruction *I, int i) {
935 return getShadow(I->getOperand(i));
938 /// \brief Get the origin for a value.
939 Value *getOrigin(Value *V) {
940 if (!MS.TrackOrigins) return 0;
941 if (isa<Instruction>(V) || isa<Argument>(V)) {
942 Value *Origin = OriginMap[V];
944 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
945 Origin = getCleanOrigin();
949 return getCleanOrigin();
952 /// \brief Get the origin for i-th argument of the instruction I.
953 Value *getOrigin(Instruction *I, int i) {
954 return getOrigin(I->getOperand(i));
957 /// \brief Remember the place where a shadow check should be inserted.
959 /// This location will be later instrumented with a check that will print a
960 /// UMR warning in runtime if the shadow value is not 0.
961 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
963 if (!InsertChecks) return;
965 Type *ShadowTy = Shadow->getType();
966 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
967 "Can only insert checks for integer and vector shadow types");
969 InstrumentationList.push_back(
970 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
973 /// \brief Remember the place where a shadow check should be inserted.
975 /// This location will be later instrumented with a check that will print a
976 /// UMR warning in runtime if the value is not fully defined.
977 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
979 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
981 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
982 insertShadowCheck(Shadow, Origin, OrigIns);
985 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
995 return AcquireRelease;
996 case SequentiallyConsistent:
997 return SequentiallyConsistent;
999 llvm_unreachable("Unknown ordering");
1002 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1011 case AcquireRelease:
1012 return AcquireRelease;
1013 case SequentiallyConsistent:
1014 return SequentiallyConsistent;
1016 llvm_unreachable("Unknown ordering");
1019 // ------------------- Visitors.
1021 /// \brief Instrument LoadInst
1023 /// Loads the corresponding shadow and (optionally) origin.
1024 /// Optionally, checks that the load address is fully defined.
1025 void visitLoadInst(LoadInst &I) {
1026 assert(I.getType()->isSized() && "Load type must have size");
1027 IRBuilder<> IRB(I.getNextNode());
1028 Type *ShadowTy = getShadowTy(&I);
1029 Value *Addr = I.getPointerOperand();
1031 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1033 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
1035 setShadow(&I, getCleanShadow(&I));
1038 if (ClCheckAccessAddress)
1039 insertShadowCheck(I.getPointerOperand(), &I);
1042 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1044 if (MS.TrackOrigins) {
1046 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
1048 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
1050 setOrigin(&I, getCleanOrigin());
1055 /// \brief Instrument StoreInst
1057 /// Stores the corresponding shadow and (optionally) origin.
1058 /// Optionally, checks that the store address is fully defined.
1059 void visitStoreInst(StoreInst &I) {
1060 StoreList.push_back(&I);
1063 void handleCASOrRMW(Instruction &I) {
1064 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1066 IRBuilder<> IRB(&I);
1067 Value *Addr = I.getOperand(0);
1068 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1070 if (ClCheckAccessAddress)
1071 insertShadowCheck(Addr, &I);
1073 // Only test the conditional argument of cmpxchg instruction.
1074 // The other argument can potentially be uninitialized, but we can not
1075 // detect this situation reliably without possible false positives.
1076 if (isa<AtomicCmpXchgInst>(I))
1077 insertShadowCheck(I.getOperand(1), &I);
1079 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1081 setShadow(&I, getCleanShadow(&I));
1084 void visitAtomicRMWInst(AtomicRMWInst &I) {
1086 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1089 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1091 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1094 // Vector manipulation.
1095 void visitExtractElementInst(ExtractElementInst &I) {
1096 insertShadowCheck(I.getOperand(1), &I);
1097 IRBuilder<> IRB(&I);
1098 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1100 setOrigin(&I, getOrigin(&I, 0));
1103 void visitInsertElementInst(InsertElementInst &I) {
1104 insertShadowCheck(I.getOperand(2), &I);
1105 IRBuilder<> IRB(&I);
1106 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1107 I.getOperand(2), "_msprop"));
1108 setOriginForNaryOp(I);
1111 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1112 insertShadowCheck(I.getOperand(2), &I);
1113 IRBuilder<> IRB(&I);
1114 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1115 I.getOperand(2), "_msprop"));
1116 setOriginForNaryOp(I);
1120 void visitSExtInst(SExtInst &I) {
1121 IRBuilder<> IRB(&I);
1122 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1123 setOrigin(&I, getOrigin(&I, 0));
1126 void visitZExtInst(ZExtInst &I) {
1127 IRBuilder<> IRB(&I);
1128 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1129 setOrigin(&I, getOrigin(&I, 0));
1132 void visitTruncInst(TruncInst &I) {
1133 IRBuilder<> IRB(&I);
1134 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1135 setOrigin(&I, getOrigin(&I, 0));
1138 void visitBitCastInst(BitCastInst &I) {
1139 IRBuilder<> IRB(&I);
1140 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1141 setOrigin(&I, getOrigin(&I, 0));
1144 void visitPtrToIntInst(PtrToIntInst &I) {
1145 IRBuilder<> IRB(&I);
1146 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1147 "_msprop_ptrtoint"));
1148 setOrigin(&I, getOrigin(&I, 0));
1151 void visitIntToPtrInst(IntToPtrInst &I) {
1152 IRBuilder<> IRB(&I);
1153 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1154 "_msprop_inttoptr"));
1155 setOrigin(&I, getOrigin(&I, 0));
1158 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1159 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1160 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1161 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1162 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1163 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1165 /// \brief Propagate shadow for bitwise AND.
1167 /// This code is exact, i.e. if, for example, a bit in the left argument
1168 /// is defined and 0, then neither the value not definedness of the
1169 /// corresponding bit in B don't affect the resulting shadow.
1170 void visitAnd(BinaryOperator &I) {
1171 IRBuilder<> IRB(&I);
1172 // "And" of 0 and a poisoned value results in unpoisoned value.
1173 // 1&1 => 1; 0&1 => 0; p&1 => p;
1174 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1175 // 1&p => p; 0&p => 0; p&p => p;
1176 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1177 Value *S1 = getShadow(&I, 0);
1178 Value *S2 = getShadow(&I, 1);
1179 Value *V1 = I.getOperand(0);
1180 Value *V2 = I.getOperand(1);
1181 if (V1->getType() != S1->getType()) {
1182 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1183 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1185 Value *S1S2 = IRB.CreateAnd(S1, S2);
1186 Value *V1S2 = IRB.CreateAnd(V1, S2);
1187 Value *S1V2 = IRB.CreateAnd(S1, V2);
1188 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1189 setOriginForNaryOp(I);
1192 void visitOr(BinaryOperator &I) {
1193 IRBuilder<> IRB(&I);
1194 // "Or" of 1 and a poisoned value results in unpoisoned value.
1195 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1196 // 1|0 => 1; 0|0 => 0; p|0 => p;
1197 // 1|p => 1; 0|p => p; p|p => p;
1198 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1199 Value *S1 = getShadow(&I, 0);
1200 Value *S2 = getShadow(&I, 1);
1201 Value *V1 = IRB.CreateNot(I.getOperand(0));
1202 Value *V2 = IRB.CreateNot(I.getOperand(1));
1203 if (V1->getType() != S1->getType()) {
1204 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1205 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1207 Value *S1S2 = IRB.CreateAnd(S1, S2);
1208 Value *V1S2 = IRB.CreateAnd(V1, S2);
1209 Value *S1V2 = IRB.CreateAnd(S1, V2);
1210 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1211 setOriginForNaryOp(I);
1214 /// \brief Default propagation of shadow and/or origin.
1216 /// This class implements the general case of shadow propagation, used in all
1217 /// cases where we don't know and/or don't care about what the operation
1218 /// actually does. It converts all input shadow values to a common type
1219 /// (extending or truncating as necessary), and bitwise OR's them.
1221 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1222 /// fully initialized), and less prone to false positives.
1224 /// This class also implements the general case of origin propagation. For a
1225 /// Nary operation, result origin is set to the origin of an argument that is
1226 /// not entirely initialized. If there is more than one such arguments, the
1227 /// rightmost of them is picked. It does not matter which one is picked if all
1228 /// arguments are initialized.
1229 template <bool CombineShadow>
1234 MemorySanitizerVisitor *MSV;
1237 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1238 Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
1240 /// \brief Add a pair of shadow and origin values to the mix.
1241 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1242 if (CombineShadow) {
1247 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1248 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1252 if (MSV->MS.TrackOrigins) {
1257 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1258 Value *Cond = IRB.CreateICmpNE(FlatShadow,
1259 MSV->getCleanShadow(FlatShadow));
1260 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1266 /// \brief Add an application value to the mix.
1267 Combiner &Add(Value *V) {
1268 Value *OpShadow = MSV->getShadow(V);
1269 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
1270 return Add(OpShadow, OpOrigin);
1273 /// \brief Set the current combined values as the given instruction's shadow
1275 void Done(Instruction *I) {
1276 if (CombineShadow) {
1278 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1279 MSV->setShadow(I, Shadow);
1281 if (MSV->MS.TrackOrigins) {
1283 MSV->setOrigin(I, Origin);
1288 typedef Combiner<true> ShadowAndOriginCombiner;
1289 typedef Combiner<false> OriginCombiner;
1291 /// \brief Propagate origin for arbitrary operation.
1292 void setOriginForNaryOp(Instruction &I) {
1293 if (!MS.TrackOrigins) return;
1294 IRBuilder<> IRB(&I);
1295 OriginCombiner OC(this, IRB);
1296 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1301 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1302 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1303 "Vector of pointers is not a valid shadow type");
1304 return Ty->isVectorTy() ?
1305 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1306 Ty->getPrimitiveSizeInBits();
1309 /// \brief Cast between two shadow types, extending or truncating as
1311 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1312 bool Signed = false) {
1313 Type *srcTy = V->getType();
1314 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1315 return IRB.CreateIntCast(V, dstTy, Signed);
1316 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1317 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1318 return IRB.CreateIntCast(V, dstTy, Signed);
1319 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1320 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1321 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1323 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1324 return IRB.CreateBitCast(V2, dstTy);
1325 // TODO: handle struct types.
1328 /// \brief Propagate shadow for arbitrary operation.
1329 void handleShadowOr(Instruction &I) {
1330 IRBuilder<> IRB(&I);
1331 ShadowAndOriginCombiner SC(this, IRB);
1332 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1337 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1338 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1339 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1340 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1341 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1342 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1343 void visitMul(BinaryOperator &I) { handleShadowOr(I); }
1345 void handleDiv(Instruction &I) {
1346 IRBuilder<> IRB(&I);
1347 // Strict on the second argument.
1348 insertShadowCheck(I.getOperand(1), &I);
1349 setShadow(&I, getShadow(&I, 0));
1350 setOrigin(&I, getOrigin(&I, 0));
1353 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1354 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1355 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1356 void visitURem(BinaryOperator &I) { handleDiv(I); }
1357 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1358 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1360 /// \brief Instrument == and != comparisons.
1362 /// Sometimes the comparison result is known even if some of the bits of the
1363 /// arguments are not.
1364 void handleEqualityComparison(ICmpInst &I) {
1365 IRBuilder<> IRB(&I);
1366 Value *A = I.getOperand(0);
1367 Value *B = I.getOperand(1);
1368 Value *Sa = getShadow(A);
1369 Value *Sb = getShadow(B);
1371 // Get rid of pointers and vectors of pointers.
1372 // For ints (and vectors of ints), types of A and Sa match,
1373 // and this is a no-op.
1374 A = IRB.CreatePointerCast(A, Sa->getType());
1375 B = IRB.CreatePointerCast(B, Sb->getType());
1377 // A == B <==> (C = A^B) == 0
1378 // A != B <==> (C = A^B) != 0
1380 Value *C = IRB.CreateXor(A, B);
1381 Value *Sc = IRB.CreateOr(Sa, Sb);
1382 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1383 // Result is defined if one of the following is true
1384 // * there is a defined 1 bit in C
1385 // * C is fully defined
1386 // Si = !(C & ~Sc) && Sc
1387 Value *Zero = Constant::getNullValue(Sc->getType());
1388 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1390 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1392 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1393 Si->setName("_msprop_icmp");
1395 setOriginForNaryOp(I);
1398 /// \brief Build the lowest possible value of V, taking into account V's
1399 /// uninitialized bits.
1400 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1403 // Split shadow into sign bit and other bits.
1404 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1405 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1406 // Maximise the undefined shadow bit, minimize other undefined bits.
1408 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1410 // Minimize undefined bits.
1411 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1415 /// \brief Build the highest possible value of V, taking into account V's
1416 /// uninitialized bits.
1417 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1420 // Split shadow into sign bit and other bits.
1421 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1422 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1423 // Minimise the undefined shadow bit, maximise other undefined bits.
1425 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1427 // Maximize undefined bits.
1428 return IRB.CreateOr(A, Sa);
1432 /// \brief Instrument relational comparisons.
1434 /// This function does exact shadow propagation for all relational
1435 /// comparisons of integers, pointers and vectors of those.
1436 /// FIXME: output seems suboptimal when one of the operands is a constant
1437 void handleRelationalComparisonExact(ICmpInst &I) {
1438 IRBuilder<> IRB(&I);
1439 Value *A = I.getOperand(0);
1440 Value *B = I.getOperand(1);
1441 Value *Sa = getShadow(A);
1442 Value *Sb = getShadow(B);
1444 // Get rid of pointers and vectors of pointers.
1445 // For ints (and vectors of ints), types of A and Sa match,
1446 // and this is a no-op.
1447 A = IRB.CreatePointerCast(A, Sa->getType());
1448 B = IRB.CreatePointerCast(B, Sb->getType());
1450 // Let [a0, a1] be the interval of possible values of A, taking into account
1451 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1452 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1453 bool IsSigned = I.isSigned();
1454 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1455 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1456 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1457 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1458 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1459 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1460 Value *Si = IRB.CreateXor(S1, S2);
1462 setOriginForNaryOp(I);
1465 /// \brief Instrument signed relational comparisons.
1467 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1468 /// propagating the highest bit of the shadow. Everything else is delegated
1469 /// to handleShadowOr().
1470 void handleSignedRelationalComparison(ICmpInst &I) {
1471 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1472 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1474 CmpInst::Predicate pre = I.getPredicate();
1475 if (constOp0 && constOp0->isNullValue() &&
1476 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1477 op = I.getOperand(1);
1478 } else if (constOp1 && constOp1->isNullValue() &&
1479 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1480 op = I.getOperand(0);
1483 IRBuilder<> IRB(&I);
1485 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1486 setShadow(&I, Shadow);
1487 setOrigin(&I, getOrigin(op));
1493 void visitICmpInst(ICmpInst &I) {
1494 if (!ClHandleICmp) {
1498 if (I.isEquality()) {
1499 handleEqualityComparison(I);
1503 assert(I.isRelational());
1504 if (ClHandleICmpExact) {
1505 handleRelationalComparisonExact(I);
1509 handleSignedRelationalComparison(I);
1513 assert(I.isUnsigned());
1514 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1515 handleRelationalComparisonExact(I);
1522 void visitFCmpInst(FCmpInst &I) {
1526 void handleShift(BinaryOperator &I) {
1527 IRBuilder<> IRB(&I);
1528 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1529 // Otherwise perform the same shift on S1.
1530 Value *S1 = getShadow(&I, 0);
1531 Value *S2 = getShadow(&I, 1);
1532 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1534 Value *V2 = I.getOperand(1);
1535 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1536 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1537 setOriginForNaryOp(I);
1540 void visitShl(BinaryOperator &I) { handleShift(I); }
1541 void visitAShr(BinaryOperator &I) { handleShift(I); }
1542 void visitLShr(BinaryOperator &I) { handleShift(I); }
1544 /// \brief Instrument llvm.memmove
1546 /// At this point we don't know if llvm.memmove will be inlined or not.
1547 /// If we don't instrument it and it gets inlined,
1548 /// our interceptor will not kick in and we will lose the memmove.
1549 /// If we instrument the call here, but it does not get inlined,
1550 /// we will memove the shadow twice: which is bad in case
1551 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1553 /// Similar situation exists for memcpy and memset.
1554 void visitMemMoveInst(MemMoveInst &I) {
1555 IRBuilder<> IRB(&I);
1558 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1559 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1560 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1561 I.eraseFromParent();
1564 // Similar to memmove: avoid copying shadow twice.
1565 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1566 // FIXME: consider doing manual inline for small constant sizes and proper
1568 void visitMemCpyInst(MemCpyInst &I) {
1569 IRBuilder<> IRB(&I);
1572 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1573 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1574 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1575 I.eraseFromParent();
1579 void visitMemSetInst(MemSetInst &I) {
1580 IRBuilder<> IRB(&I);
1583 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1584 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1585 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1586 I.eraseFromParent();
1589 void visitVAStartInst(VAStartInst &I) {
1590 VAHelper->visitVAStartInst(I);
1593 void visitVACopyInst(VACopyInst &I) {
1594 VAHelper->visitVACopyInst(I);
1597 enum IntrinsicKind {
1598 IK_DoesNotAccessMemory,
1603 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1604 const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1605 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1606 const int OnlyReadsMemory = IK_OnlyReadsMemory;
1607 const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1608 const int UnknownModRefBehavior = IK_WritesMemory;
1609 #define GET_INTRINSIC_MODREF_BEHAVIOR
1610 #define ModRefBehavior IntrinsicKind
1611 #include "llvm/IR/Intrinsics.gen"
1612 #undef ModRefBehavior
1613 #undef GET_INTRINSIC_MODREF_BEHAVIOR
1616 /// \brief Handle vector store-like intrinsics.
1618 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1619 /// has 1 pointer argument and 1 vector argument, returns void.
1620 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1621 IRBuilder<> IRB(&I);
1622 Value* Addr = I.getArgOperand(0);
1623 Value *Shadow = getShadow(&I, 1);
1624 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1626 // We don't know the pointer alignment (could be unaligned SSE store!).
1627 // Have to assume to worst case.
1628 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1630 if (ClCheckAccessAddress)
1631 insertShadowCheck(Addr, &I);
1633 // FIXME: use ClStoreCleanOrigin
1634 // FIXME: factor out common code from materializeStores
1635 if (MS.TrackOrigins)
1636 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
1640 /// \brief Handle vector load-like intrinsics.
1642 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1643 /// has 1 pointer argument, returns a vector.
1644 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1645 IRBuilder<> IRB(&I);
1646 Value *Addr = I.getArgOperand(0);
1648 Type *ShadowTy = getShadowTy(&I);
1650 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1651 // We don't know the pointer alignment (could be unaligned SSE load!).
1652 // Have to assume to worst case.
1653 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1655 setShadow(&I, getCleanShadow(&I));
1658 if (ClCheckAccessAddress)
1659 insertShadowCheck(Addr, &I);
1661 if (MS.TrackOrigins) {
1663 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
1665 setOrigin(&I, getCleanOrigin());
1670 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1672 /// Instrument intrinsics with any number of arguments of the same type,
1673 /// equal to the return type. The type should be simple (no aggregates or
1674 /// pointers; vectors are fine).
1675 /// Caller guarantees that this intrinsic does not access memory.
1676 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1677 Type *RetTy = I.getType();
1678 if (!(RetTy->isIntOrIntVectorTy() ||
1679 RetTy->isFPOrFPVectorTy() ||
1680 RetTy->isX86_MMXTy()))
1683 unsigned NumArgOperands = I.getNumArgOperands();
1685 for (unsigned i = 0; i < NumArgOperands; ++i) {
1686 Type *Ty = I.getArgOperand(i)->getType();
1691 IRBuilder<> IRB(&I);
1692 ShadowAndOriginCombiner SC(this, IRB);
1693 for (unsigned i = 0; i < NumArgOperands; ++i)
1694 SC.Add(I.getArgOperand(i));
1700 /// \brief Heuristically instrument unknown intrinsics.
1702 /// The main purpose of this code is to do something reasonable with all
1703 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1704 /// We recognize several classes of intrinsics by their argument types and
1705 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1706 /// sure that we know what the intrinsic does.
1708 /// We special-case intrinsics where this approach fails. See llvm.bswap
1709 /// handling as an example of that.
1710 bool handleUnknownIntrinsic(IntrinsicInst &I) {
1711 unsigned NumArgOperands = I.getNumArgOperands();
1712 if (NumArgOperands == 0)
1715 Intrinsic::ID iid = I.getIntrinsicID();
1716 IntrinsicKind IK = getIntrinsicKind(iid);
1717 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1718 bool WritesMemory = IK == IK_WritesMemory;
1719 assert(!(OnlyReadsMemory && WritesMemory));
1721 if (NumArgOperands == 2 &&
1722 I.getArgOperand(0)->getType()->isPointerTy() &&
1723 I.getArgOperand(1)->getType()->isVectorTy() &&
1724 I.getType()->isVoidTy() &&
1726 // This looks like a vector store.
1727 return handleVectorStoreIntrinsic(I);
1730 if (NumArgOperands == 1 &&
1731 I.getArgOperand(0)->getType()->isPointerTy() &&
1732 I.getType()->isVectorTy() &&
1734 // This looks like a vector load.
1735 return handleVectorLoadIntrinsic(I);
1738 if (!OnlyReadsMemory && !WritesMemory)
1739 if (maybeHandleSimpleNomemIntrinsic(I))
1742 // FIXME: detect and handle SSE maskstore/maskload
1746 void handleBswap(IntrinsicInst &I) {
1747 IRBuilder<> IRB(&I);
1748 Value *Op = I.getArgOperand(0);
1749 Type *OpType = Op->getType();
1750 Function *BswapFunc = Intrinsic::getDeclaration(
1751 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
1752 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1753 setOrigin(&I, getOrigin(Op));
1756 // \brief Instrument vector convert instrinsic.
1758 // This function instruments intrinsics like cvtsi2ss:
1759 // %Out = int_xxx_cvtyyy(%ConvertOp)
1761 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
1762 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
1763 // number \p Out elements, and (if has 2 arguments) copies the rest of the
1764 // elements from \p CopyOp.
1765 // In most cases conversion involves floating-point value which may trigger a
1766 // hardware exception when not fully initialized. For this reason we require
1767 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
1768 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
1769 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
1770 // return a fully initialized value.
1771 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
1772 IRBuilder<> IRB(&I);
1773 Value *CopyOp, *ConvertOp;
1775 switch (I.getNumArgOperands()) {
1777 CopyOp = I.getArgOperand(0);
1778 ConvertOp = I.getArgOperand(1);
1781 ConvertOp = I.getArgOperand(0);
1785 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
1788 // The first *NumUsedElements* elements of ConvertOp are converted to the
1789 // same number of output elements. The rest of the output is copied from
1790 // CopyOp, or (if not available) filled with zeroes.
1791 // Combine shadow for elements of ConvertOp that are used in this operation,
1792 // and insert a check.
1793 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
1794 // int->any conversion.
1795 Value *ConvertShadow = getShadow(ConvertOp);
1796 Value *AggShadow = 0;
1797 if (ConvertOp->getType()->isVectorTy()) {
1798 AggShadow = IRB.CreateExtractElement(
1799 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
1800 for (int i = 1; i < NumUsedElements; ++i) {
1801 Value *MoreShadow = IRB.CreateExtractElement(
1802 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
1803 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
1806 AggShadow = ConvertShadow;
1808 assert(AggShadow->getType()->isIntegerTy());
1809 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
1811 // Build result shadow by zero-filling parts of CopyOp shadow that come from
1814 assert(CopyOp->getType() == I.getType());
1815 assert(CopyOp->getType()->isVectorTy());
1816 Value *ResultShadow = getShadow(CopyOp);
1817 Type *EltTy = ResultShadow->getType()->getVectorElementType();
1818 for (int i = 0; i < NumUsedElements; ++i) {
1819 ResultShadow = IRB.CreateInsertElement(
1820 ResultShadow, ConstantInt::getNullValue(EltTy),
1821 ConstantInt::get(IRB.getInt32Ty(), i));
1823 setShadow(&I, ResultShadow);
1824 setOrigin(&I, getOrigin(CopyOp));
1826 setShadow(&I, getCleanShadow(&I));
1830 // Given a scalar or vector, extract lower 64 bits (or less), and return all
1831 // zeroes if it is zero, and all ones otherwise.
1832 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
1833 if (S->getType()->isVectorTy())
1834 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
1835 assert(S->getType()->getPrimitiveSizeInBits() <= 64);
1836 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
1837 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
1840 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
1841 Type *T = S->getType();
1842 assert(T->isVectorTy());
1843 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
1844 return IRB.CreateSExt(S2, T);
1847 // \brief Instrument vector shift instrinsic.
1849 // This function instruments intrinsics like int_x86_avx2_psll_w.
1850 // Intrinsic shifts %In by %ShiftSize bits.
1851 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
1852 // size, and the rest is ignored. Behavior is defined even if shift size is
1853 // greater than register (or field) width.
1854 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
1855 assert(I.getNumArgOperands() == 2);
1856 IRBuilder<> IRB(&I);
1857 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1858 // Otherwise perform the same shift on S1.
1859 Value *S1 = getShadow(&I, 0);
1860 Value *S2 = getShadow(&I, 1);
1861 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
1862 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
1863 Value *V1 = I.getOperand(0);
1864 Value *V2 = I.getOperand(1);
1865 Value *Shift = IRB.CreateCall2(I.getCalledValue(),
1866 IRB.CreateBitCast(S1, V1->getType()), V2);
1867 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
1868 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1869 setOriginForNaryOp(I);
1872 void visitIntrinsicInst(IntrinsicInst &I) {
1873 switch (I.getIntrinsicID()) {
1874 case llvm::Intrinsic::bswap:
1877 case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
1878 case llvm::Intrinsic::x86_avx512_cvtsd2usi:
1879 case llvm::Intrinsic::x86_avx512_cvtss2usi64:
1880 case llvm::Intrinsic::x86_avx512_cvtss2usi:
1881 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
1882 case llvm::Intrinsic::x86_avx512_cvttss2usi:
1883 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
1884 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
1885 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
1886 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
1887 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
1888 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
1889 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
1890 case llvm::Intrinsic::x86_sse2_cvtsd2si:
1891 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
1892 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
1893 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
1894 case llvm::Intrinsic::x86_sse2_cvtss2sd:
1895 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
1896 case llvm::Intrinsic::x86_sse2_cvttsd2si:
1897 case llvm::Intrinsic::x86_sse_cvtsi2ss:
1898 case llvm::Intrinsic::x86_sse_cvtsi642ss:
1899 case llvm::Intrinsic::x86_sse_cvtss2si64:
1900 case llvm::Intrinsic::x86_sse_cvtss2si:
1901 case llvm::Intrinsic::x86_sse_cvttss2si64:
1902 case llvm::Intrinsic::x86_sse_cvttss2si:
1903 handleVectorConvertIntrinsic(I, 1);
1905 case llvm::Intrinsic::x86_sse2_cvtdq2pd:
1906 case llvm::Intrinsic::x86_sse2_cvtps2pd:
1907 case llvm::Intrinsic::x86_sse_cvtps2pi:
1908 case llvm::Intrinsic::x86_sse_cvttps2pi:
1909 handleVectorConvertIntrinsic(I, 2);
1911 case llvm::Intrinsic::x86_avx512_psll_dq:
1912 case llvm::Intrinsic::x86_avx512_psrl_dq:
1913 case llvm::Intrinsic::x86_avx2_psll_w:
1914 case llvm::Intrinsic::x86_avx2_psll_d:
1915 case llvm::Intrinsic::x86_avx2_psll_q:
1916 case llvm::Intrinsic::x86_avx2_pslli_w:
1917 case llvm::Intrinsic::x86_avx2_pslli_d:
1918 case llvm::Intrinsic::x86_avx2_pslli_q:
1919 case llvm::Intrinsic::x86_avx2_psll_dq:
1920 case llvm::Intrinsic::x86_avx2_psrl_w:
1921 case llvm::Intrinsic::x86_avx2_psrl_d:
1922 case llvm::Intrinsic::x86_avx2_psrl_q:
1923 case llvm::Intrinsic::x86_avx2_psra_w:
1924 case llvm::Intrinsic::x86_avx2_psra_d:
1925 case llvm::Intrinsic::x86_avx2_psrli_w:
1926 case llvm::Intrinsic::x86_avx2_psrli_d:
1927 case llvm::Intrinsic::x86_avx2_psrli_q:
1928 case llvm::Intrinsic::x86_avx2_psrai_w:
1929 case llvm::Intrinsic::x86_avx2_psrai_d:
1930 case llvm::Intrinsic::x86_avx2_psrl_dq:
1931 case llvm::Intrinsic::x86_sse2_psll_w:
1932 case llvm::Intrinsic::x86_sse2_psll_d:
1933 case llvm::Intrinsic::x86_sse2_psll_q:
1934 case llvm::Intrinsic::x86_sse2_pslli_w:
1935 case llvm::Intrinsic::x86_sse2_pslli_d:
1936 case llvm::Intrinsic::x86_sse2_pslli_q:
1937 case llvm::Intrinsic::x86_sse2_psll_dq:
1938 case llvm::Intrinsic::x86_sse2_psrl_w:
1939 case llvm::Intrinsic::x86_sse2_psrl_d:
1940 case llvm::Intrinsic::x86_sse2_psrl_q:
1941 case llvm::Intrinsic::x86_sse2_psra_w:
1942 case llvm::Intrinsic::x86_sse2_psra_d:
1943 case llvm::Intrinsic::x86_sse2_psrli_w:
1944 case llvm::Intrinsic::x86_sse2_psrli_d:
1945 case llvm::Intrinsic::x86_sse2_psrli_q:
1946 case llvm::Intrinsic::x86_sse2_psrai_w:
1947 case llvm::Intrinsic::x86_sse2_psrai_d:
1948 case llvm::Intrinsic::x86_sse2_psrl_dq:
1949 case llvm::Intrinsic::x86_mmx_psll_w:
1950 case llvm::Intrinsic::x86_mmx_psll_d:
1951 case llvm::Intrinsic::x86_mmx_psll_q:
1952 case llvm::Intrinsic::x86_mmx_pslli_w:
1953 case llvm::Intrinsic::x86_mmx_pslli_d:
1954 case llvm::Intrinsic::x86_mmx_pslli_q:
1955 case llvm::Intrinsic::x86_mmx_psrl_w:
1956 case llvm::Intrinsic::x86_mmx_psrl_d:
1957 case llvm::Intrinsic::x86_mmx_psrl_q:
1958 case llvm::Intrinsic::x86_mmx_psra_w:
1959 case llvm::Intrinsic::x86_mmx_psra_d:
1960 case llvm::Intrinsic::x86_mmx_psrli_w:
1961 case llvm::Intrinsic::x86_mmx_psrli_d:
1962 case llvm::Intrinsic::x86_mmx_psrli_q:
1963 case llvm::Intrinsic::x86_mmx_psrai_w:
1964 case llvm::Intrinsic::x86_mmx_psrai_d:
1965 handleVectorShiftIntrinsic(I, /* Variable */ false);
1967 case llvm::Intrinsic::x86_avx2_psllv_d:
1968 case llvm::Intrinsic::x86_avx2_psllv_d_256:
1969 case llvm::Intrinsic::x86_avx2_psllv_q:
1970 case llvm::Intrinsic::x86_avx2_psllv_q_256:
1971 case llvm::Intrinsic::x86_avx2_psrlv_d:
1972 case llvm::Intrinsic::x86_avx2_psrlv_d_256:
1973 case llvm::Intrinsic::x86_avx2_psrlv_q:
1974 case llvm::Intrinsic::x86_avx2_psrlv_q_256:
1975 case llvm::Intrinsic::x86_avx2_psrav_d:
1976 case llvm::Intrinsic::x86_avx2_psrav_d_256:
1977 handleVectorShiftIntrinsic(I, /* Variable */ true);
1980 // Byte shifts are not implemented.
1981 // case llvm::Intrinsic::x86_avx512_psll_dq_bs:
1982 // case llvm::Intrinsic::x86_avx512_psrl_dq_bs:
1983 // case llvm::Intrinsic::x86_avx2_psll_dq_bs:
1984 // case llvm::Intrinsic::x86_avx2_psrl_dq_bs:
1985 // case llvm::Intrinsic::x86_sse2_psll_dq_bs:
1986 // case llvm::Intrinsic::x86_sse2_psrl_dq_bs:
1989 if (!handleUnknownIntrinsic(I))
1990 visitInstruction(I);
1995 void visitCallSite(CallSite CS) {
1996 Instruction &I = *CS.getInstruction();
1997 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
1999 CallInst *Call = cast<CallInst>(&I);
2001 // For inline asm, do the usual thing: check argument shadow and mark all
2002 // outputs as clean. Note that any side effects of the inline asm that are
2003 // not immediately visible in its constraints are not handled.
2004 if (Call->isInlineAsm()) {
2005 visitInstruction(I);
2009 // Allow only tail calls with the same types, otherwise
2010 // we may have a false positive: shadow for a non-void RetVal
2011 // will get propagated to a void RetVal.
2012 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
2013 Call->setTailCall(false);
2015 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
2017 // We are going to insert code that relies on the fact that the callee
2018 // will become a non-readonly function after it is instrumented by us. To
2019 // prevent this code from being optimized out, mark that function
2020 // non-readonly in advance.
2021 if (Function *Func = Call->getCalledFunction()) {
2022 // Clear out readonly/readnone attributes.
2024 B.addAttribute(Attribute::ReadOnly)
2025 .addAttribute(Attribute::ReadNone);
2026 Func->removeAttributes(AttributeSet::FunctionIndex,
2027 AttributeSet::get(Func->getContext(),
2028 AttributeSet::FunctionIndex,
2032 IRBuilder<> IRB(&I);
2034 if (MS.WrapIndirectCalls && !CS.getCalledFunction())
2035 IndirectCallList.push_back(CS);
2037 unsigned ArgOffset = 0;
2038 DEBUG(dbgs() << " CallSite: " << I << "\n");
2039 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2040 ArgIt != End; ++ArgIt) {
2042 unsigned i = ArgIt - CS.arg_begin();
2043 if (!A->getType()->isSized()) {
2044 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
2049 // Compute the Shadow for arg even if it is ByVal, because
2050 // in that case getShadow() will copy the actual arg shadow to
2051 // __msan_param_tls.
2052 Value *ArgShadow = getShadow(A);
2053 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2054 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
2055 " Shadow: " << *ArgShadow << "\n");
2056 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
2057 assert(A->getType()->isPointerTy() &&
2058 "ByVal argument is not a pointer!");
2059 Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
2060 unsigned Alignment = CS.getParamAlignment(i + 1);
2061 Store = IRB.CreateMemCpy(ArgShadowBase,
2062 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2065 Size = MS.DL->getTypeAllocSize(A->getType());
2066 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2067 kShadowTLSAlignment);
2069 if (MS.TrackOrigins)
2070 IRB.CreateStore(getOrigin(A),
2071 getOriginPtrForArgument(A, IRB, ArgOffset));
2073 assert(Size != 0 && Store != 0);
2074 DEBUG(dbgs() << " Param:" << *Store << "\n");
2075 ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
2077 DEBUG(dbgs() << " done with call args\n");
2080 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2081 if (FT->isVarArg()) {
2082 VAHelper->visitCallSite(CS, IRB);
2085 // Now, get the shadow for the RetVal.
2086 if (!I.getType()->isSized()) return;
2087 IRBuilder<> IRBBefore(&I);
2088 // Until we have full dynamic coverage, make sure the retval shadow is 0.
2089 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2090 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2091 Instruction *NextInsn = 0;
2093 NextInsn = I.getNextNode();
2095 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2096 if (!NormalDest->getSinglePredecessor()) {
2097 // FIXME: this case is tricky, so we are just conservative here.
2098 // Perhaps we need to split the edge between this BB and NormalDest,
2099 // but a naive attempt to use SplitEdge leads to a crash.
2100 setShadow(&I, getCleanShadow(&I));
2101 setOrigin(&I, getCleanOrigin());
2104 NextInsn = NormalDest->getFirstInsertionPt();
2106 "Could not find insertion point for retval shadow load");
2108 IRBuilder<> IRBAfter(NextInsn);
2109 Value *RetvalShadow =
2110 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2111 kShadowTLSAlignment, "_msret");
2112 setShadow(&I, RetvalShadow);
2113 if (MS.TrackOrigins)
2114 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2117 void visitReturnInst(ReturnInst &I) {
2118 IRBuilder<> IRB(&I);
2119 Value *RetVal = I.getReturnValue();
2120 if (!RetVal) return;
2121 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2122 if (CheckReturnValue) {
2123 insertShadowCheck(RetVal, &I);
2124 Value *Shadow = getCleanShadow(RetVal);
2125 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2127 Value *Shadow = getShadow(RetVal);
2128 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2129 // FIXME: make it conditional if ClStoreCleanOrigin==0
2130 if (MS.TrackOrigins)
2131 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2135 void visitPHINode(PHINode &I) {
2136 IRBuilder<> IRB(&I);
2137 ShadowPHINodes.push_back(&I);
2138 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2140 if (MS.TrackOrigins)
2141 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2145 void visitAllocaInst(AllocaInst &I) {
2146 setShadow(&I, getCleanShadow(&I));
2147 IRBuilder<> IRB(I.getNextNode());
2148 uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
2149 if (PoisonStack && ClPoisonStackWithCall) {
2150 IRB.CreateCall2(MS.MsanPoisonStackFn,
2151 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2152 ConstantInt::get(MS.IntptrTy, Size));
2154 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2155 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2156 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
2159 if (PoisonStack && MS.TrackOrigins) {
2160 setOrigin(&I, getCleanOrigin());
2161 SmallString<2048> StackDescriptionStorage;
2162 raw_svector_ostream StackDescription(StackDescriptionStorage);
2163 // We create a string with a description of the stack allocation and
2164 // pass it into __msan_set_alloca_origin.
2165 // It will be printed by the run-time if stack-originated UMR is found.
2166 // The first 4 bytes of the string are set to '----' and will be replaced
2167 // by __msan_va_arg_overflow_size_tls at the first call.
2168 StackDescription << "----" << I.getName() << "@" << F.getName();
2170 createPrivateNonConstGlobalForString(*F.getParent(),
2171 StackDescription.str());
2173 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
2174 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2175 ConstantInt::get(MS.IntptrTy, Size),
2176 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2177 IRB.CreatePointerCast(&F, MS.IntptrTy));
2181 void visitSelectInst(SelectInst& I) {
2182 IRBuilder<> IRB(&I);
2183 // a = select b, c, d
2184 Value *S = IRB.CreateSelect(I.getCondition(), getShadow(I.getTrueValue()),
2185 getShadow(I.getFalseValue()));
2186 if (I.getType()->isAggregateType()) {
2187 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2188 // an extra "select". This results in much more compact IR.
2189 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2190 S = IRB.CreateSelect(getShadow(I.getCondition()),
2191 getPoisonedShadow(getShadowTy(I.getType())), S,
2192 "_msprop_select_agg");
2194 // Sa = (sext Sb) | (select b, Sc, Sd)
2195 S = IRB.CreateOr(S, CreateShadowCast(IRB, getShadow(I.getCondition()),
2196 S->getType(), true),
2200 if (MS.TrackOrigins) {
2201 // Origins are always i32, so any vector conditions must be flattened.
2202 // FIXME: consider tracking vector origins for app vectors?
2203 Value *Cond = I.getCondition();
2204 Value *CondShadow = getShadow(Cond);
2205 if (Cond->getType()->isVectorTy()) {
2206 Type *FlatTy = getShadowTyNoVec(Cond->getType());
2207 Cond = IRB.CreateICmpNE(IRB.CreateBitCast(Cond, FlatTy),
2208 ConstantInt::getNullValue(FlatTy));
2209 CondShadow = IRB.CreateICmpNE(IRB.CreateBitCast(CondShadow, FlatTy),
2210 ConstantInt::getNullValue(FlatTy));
2212 // a = select b, c, d
2213 // Oa = Sb ? Ob : (b ? Oc : Od)
2214 setOrigin(&I, IRB.CreateSelect(
2215 CondShadow, getOrigin(I.getCondition()),
2216 IRB.CreateSelect(Cond, getOrigin(I.getTrueValue()),
2217 getOrigin(I.getFalseValue()))));
2221 void visitLandingPadInst(LandingPadInst &I) {
2223 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2224 setShadow(&I, getCleanShadow(&I));
2225 setOrigin(&I, getCleanOrigin());
2228 void visitGetElementPtrInst(GetElementPtrInst &I) {
2232 void visitExtractValueInst(ExtractValueInst &I) {
2233 IRBuilder<> IRB(&I);
2234 Value *Agg = I.getAggregateOperand();
2235 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
2236 Value *AggShadow = getShadow(Agg);
2237 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2238 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2239 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
2240 setShadow(&I, ResShadow);
2241 setOriginForNaryOp(I);
2244 void visitInsertValueInst(InsertValueInst &I) {
2245 IRBuilder<> IRB(&I);
2246 DEBUG(dbgs() << "InsertValue: " << I << "\n");
2247 Value *AggShadow = getShadow(I.getAggregateOperand());
2248 Value *InsShadow = getShadow(I.getInsertedValueOperand());
2249 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2250 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
2251 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2252 DEBUG(dbgs() << " Res: " << *Res << "\n");
2254 setOriginForNaryOp(I);
2257 void dumpInst(Instruction &I) {
2258 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2259 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2261 errs() << "ZZZ " << I.getOpcodeName() << "\n";
2263 errs() << "QQQ " << I << "\n";
2266 void visitResumeInst(ResumeInst &I) {
2267 DEBUG(dbgs() << "Resume: " << I << "\n");
2268 // Nothing to do here.
2271 void visitInstruction(Instruction &I) {
2272 // Everything else: stop propagating and check for poisoned shadow.
2273 if (ClDumpStrictInstructions)
2275 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
2276 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2277 insertShadowCheck(I.getOperand(i), &I);
2278 setShadow(&I, getCleanShadow(&I));
2279 setOrigin(&I, getCleanOrigin());
2283 /// \brief AMD64-specific implementation of VarArgHelper.
2284 struct VarArgAMD64Helper : public VarArgHelper {
2285 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2286 // See a comment in visitCallSite for more details.
2287 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
2288 static const unsigned AMD64FpEndOffset = 176;
2291 MemorySanitizer &MS;
2292 MemorySanitizerVisitor &MSV;
2293 Value *VAArgTLSCopy;
2294 Value *VAArgOverflowSize;
2296 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2298 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2299 MemorySanitizerVisitor &MSV)
2300 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
2302 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2304 ArgKind classifyArgument(Value* arg) {
2305 // A very rough approximation of X86_64 argument classification rules.
2306 Type *T = arg->getType();
2307 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2308 return AK_FloatingPoint;
2309 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2310 return AK_GeneralPurpose;
2311 if (T->isPointerTy())
2312 return AK_GeneralPurpose;
2316 // For VarArg functions, store the argument shadow in an ABI-specific format
2317 // that corresponds to va_list layout.
2318 // We do this because Clang lowers va_arg in the frontend, and this pass
2319 // only sees the low level code that deals with va_list internals.
2320 // A much easier alternative (provided that Clang emits va_arg instructions)
2321 // would have been to associate each live instance of va_list with a copy of
2322 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2324 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2325 unsigned GpOffset = 0;
2326 unsigned FpOffset = AMD64GpEndOffset;
2327 unsigned OverflowOffset = AMD64FpEndOffset;
2328 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2329 ArgIt != End; ++ArgIt) {
2331 ArgKind AK = classifyArgument(A);
2332 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2334 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2338 case AK_GeneralPurpose:
2339 Base = getShadowPtrForVAArgument(A, IRB, GpOffset);
2342 case AK_FloatingPoint:
2343 Base = getShadowPtrForVAArgument(A, IRB, FpOffset);
2347 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
2348 Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
2349 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
2351 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2353 Constant *OverflowSize =
2354 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2355 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2358 /// \brief Compute the shadow address for a given va_arg.
2359 Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB,
2361 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2362 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2363 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0),
2367 void visitVAStartInst(VAStartInst &I) override {
2368 IRBuilder<> IRB(&I);
2369 VAStartInstrumentationList.push_back(&I);
2370 Value *VAListTag = I.getArgOperand(0);
2371 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2373 // Unpoison the whole __va_list_tag.
2374 // FIXME: magic ABI constants.
2375 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2376 /* size */24, /* alignment */8, false);
2379 void visitVACopyInst(VACopyInst &I) override {
2380 IRBuilder<> IRB(&I);
2381 Value *VAListTag = I.getArgOperand(0);
2382 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2384 // Unpoison the whole __va_list_tag.
2385 // FIXME: magic ABI constants.
2386 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2387 /* size */24, /* alignment */8, false);
2390 void finalizeInstrumentation() override {
2391 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
2392 "finalizeInstrumentation called twice");
2393 if (!VAStartInstrumentationList.empty()) {
2394 // If there is a va_start in this function, make a backup copy of
2395 // va_arg_tls somewhere in the function entry block.
2396 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2397 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2399 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2401 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2402 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2405 // Instrument va_start.
2406 // Copy va_list shadow from the backup copy of the TLS contents.
2407 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2408 CallInst *OrigInst = VAStartInstrumentationList[i];
2409 IRBuilder<> IRB(OrigInst->getNextNode());
2410 Value *VAListTag = OrigInst->getArgOperand(0);
2412 Value *RegSaveAreaPtrPtr =
2414 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2415 ConstantInt::get(MS.IntptrTy, 16)),
2416 Type::getInt64PtrTy(*MS.C));
2417 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
2418 Value *RegSaveAreaShadowPtr =
2419 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
2420 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
2421 AMD64FpEndOffset, 16);
2423 Value *OverflowArgAreaPtrPtr =
2425 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2426 ConstantInt::get(MS.IntptrTy, 8)),
2427 Type::getInt64PtrTy(*MS.C));
2428 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
2429 Value *OverflowArgAreaShadowPtr =
2430 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
2431 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
2432 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
2437 /// \brief A no-op implementation of VarArgHelper.
2438 struct VarArgNoOpHelper : public VarArgHelper {
2439 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
2440 MemorySanitizerVisitor &MSV) {}
2442 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
2444 void visitVAStartInst(VAStartInst &I) override {}
2446 void visitVACopyInst(VACopyInst &I) override {}
2448 void finalizeInstrumentation() override {}
2451 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
2452 MemorySanitizerVisitor &Visitor) {
2453 // VarArg handling is only implemented on AMD64. False positives are possible
2454 // on other platforms.
2455 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
2456 if (TargetTriple.getArch() == llvm::Triple::x86_64)
2457 return new VarArgAMD64Helper(Func, Msan, Visitor);
2459 return new VarArgNoOpHelper(Func, Msan, Visitor);
2464 bool MemorySanitizer::runOnFunction(Function &F) {
2465 MemorySanitizerVisitor Visitor(F, *this);
2467 // Clear out readonly/readnone attributes.
2469 B.addAttribute(Attribute::ReadOnly)
2470 .addAttribute(Attribute::ReadNone);
2471 F.removeAttributes(AttributeSet::FunctionIndex,
2472 AttributeSet::get(F.getContext(),
2473 AttributeSet::FunctionIndex, B));
2475 return Visitor.runOnFunction();