1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwritting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
92 //===----------------------------------------------------------------------===//
94 #include "llvm/Transforms/Instrumentation.h"
95 #include "llvm/ADT/DepthFirstIterator.h"
96 #include "llvm/ADT/SmallString.h"
97 #include "llvm/ADT/SmallVector.h"
98 #include "llvm/ADT/StringExtras.h"
99 #include "llvm/ADT/Triple.h"
100 #include "llvm/IR/DataLayout.h"
101 #include "llvm/IR/Function.h"
102 #include "llvm/IR/IRBuilder.h"
103 #include "llvm/IR/InlineAsm.h"
104 #include "llvm/IR/InstVisitor.h"
105 #include "llvm/IR/IntrinsicInst.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/MDBuilder.h"
108 #include "llvm/IR/Module.h"
109 #include "llvm/IR/Type.h"
110 #include "llvm/IR/ValueMap.h"
111 #include "llvm/Support/CommandLine.h"
112 #include "llvm/Support/Compiler.h"
113 #include "llvm/Support/Debug.h"
114 #include "llvm/Support/raw_ostream.h"
115 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
116 #include "llvm/Transforms/Utils/Local.h"
117 #include "llvm/Transforms/Utils/ModuleUtils.h"
119 using namespace llvm;
121 #define DEBUG_TYPE "msan"
123 static const uint64_t kShadowMask32 = 1ULL << 31;
124 static const uint64_t kShadowMask64 = 1ULL << 46;
125 static const uint64_t kOriginOffset32 = 1ULL << 30;
126 static const uint64_t kOriginOffset64 = 1ULL << 45;
127 static const unsigned kMinOriginAlignment = 4;
128 static const unsigned kShadowTLSAlignment = 8;
130 // Accesses sizes are powers of two: 1, 2, 4, 8.
131 static const size_t kNumberOfAccessSizes = 4;
133 /// \brief Track origins of uninitialized values.
135 /// Adds a section to MemorySanitizer report that points to the allocation
136 /// (stack or heap) the uninitialized bits came from originally.
137 static cl::opt<int> ClTrackOrigins("msan-track-origins",
138 cl::desc("Track origins (allocation sites) of poisoned memory"),
139 cl::Hidden, cl::init(0));
140 static cl::opt<bool> ClKeepGoing("msan-keep-going",
141 cl::desc("keep going after reporting a UMR"),
142 cl::Hidden, cl::init(false));
143 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
144 cl::desc("poison uninitialized stack variables"),
145 cl::Hidden, cl::init(true));
146 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
147 cl::desc("poison uninitialized stack variables with a call"),
148 cl::Hidden, cl::init(false));
149 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
150 cl::desc("poison uninitialized stack variables with the given patter"),
151 cl::Hidden, cl::init(0xff));
152 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
153 cl::desc("poison undef temps"),
154 cl::Hidden, cl::init(true));
156 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
157 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
158 cl::Hidden, cl::init(true));
160 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
161 cl::desc("exact handling of relational integer ICmp"),
162 cl::Hidden, cl::init(false));
164 // This flag controls whether we check the shadow of the address
165 // operand of load or store. Such bugs are very rare, since load from
166 // a garbage address typically results in SEGV, but still happen
167 // (e.g. only lower bits of address are garbage, or the access happens
168 // early at program startup where malloc-ed memory is more likely to
169 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
170 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
171 cl::desc("report accesses through a pointer which has poisoned shadow"),
172 cl::Hidden, cl::init(true));
174 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
175 cl::desc("print out instructions with default strict semantics"),
176 cl::Hidden, cl::init(false));
178 static cl::opt<int> ClInstrumentationWithCallThreshold(
179 "msan-instrumentation-with-call-threshold",
181 "If the function being instrumented requires more than "
182 "this number of checks and origin stores, use callbacks instead of "
183 "inline checks (-1 means never use callbacks)."),
184 cl::Hidden, cl::init(3500));
186 // Experimental. Wraps all indirect calls in the instrumented code with
187 // a call to the given function. This is needed to assist the dynamic
188 // helper tool (MSanDR) to regain control on transition between instrumented and
189 // non-instrumented code.
190 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls",
191 cl::desc("Wrap indirect calls with a given function"),
194 static cl::opt<bool> ClWrapIndirectCallsFast("msan-wrap-indirect-calls-fast",
195 cl::desc("Do not wrap indirect calls with target in the same module"),
196 cl::Hidden, cl::init(true));
200 /// \brief An instrumentation pass implementing detection of uninitialized
203 /// MemorySanitizer: instrument the code in module to find
204 /// uninitialized reads.
205 class MemorySanitizer : public FunctionPass {
207 MemorySanitizer(int TrackOrigins = 0)
209 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
212 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
213 const char *getPassName() const override { return "MemorySanitizer"; }
214 bool runOnFunction(Function &F) override;
215 bool doInitialization(Module &M) override;
216 static char ID; // Pass identification, replacement for typeid.
219 void initializeCallbacks(Module &M);
221 /// \brief Track origins (allocation points) of uninitialized values.
224 const DataLayout *DL;
228 /// \brief Thread-local shadow storage for function parameters.
229 GlobalVariable *ParamTLS;
230 /// \brief Thread-local origin storage for function parameters.
231 GlobalVariable *ParamOriginTLS;
232 /// \brief Thread-local shadow storage for function return value.
233 GlobalVariable *RetvalTLS;
234 /// \brief Thread-local origin storage for function return value.
235 GlobalVariable *RetvalOriginTLS;
236 /// \brief Thread-local shadow storage for in-register va_arg function
237 /// parameters (x86_64-specific).
238 GlobalVariable *VAArgTLS;
239 /// \brief Thread-local shadow storage for va_arg overflow area
240 /// (x86_64-specific).
241 GlobalVariable *VAArgOverflowSizeTLS;
242 /// \brief Thread-local space used to pass origin value to the UMR reporting
244 GlobalVariable *OriginTLS;
246 GlobalVariable *MsandrModuleStart;
247 GlobalVariable *MsandrModuleEnd;
249 /// \brief The run-time callback to print a warning.
251 // These arrays are indexed by log2(AccessSize).
252 Value *MaybeWarningFn[kNumberOfAccessSizes];
253 Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
255 /// \brief Run-time helper that generates a new origin value for a stack
257 Value *MsanSetAllocaOrigin4Fn;
258 /// \brief Run-time helper that poisons stack on function entry.
259 Value *MsanPoisonStackFn;
260 /// \brief Run-time helper that records a store (or any event) of an
261 /// uninitialized value and returns an updated origin id encoding this info.
262 Value *MsanChainOriginFn;
263 /// \brief MSan runtime replacements for memmove, memcpy and memset.
264 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
266 /// \brief Address mask used in application-to-shadow address calculation.
267 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
269 /// \brief Offset of the origin shadow from the "normal" shadow.
270 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
271 uint64_t OriginOffset;
272 /// \brief Branch weights for error reporting.
273 MDNode *ColdCallWeights;
274 /// \brief Branch weights for origin store.
275 MDNode *OriginStoreWeights;
276 /// \brief An empty volatile inline asm that prevents callback merge.
279 bool WrapIndirectCalls;
280 /// \brief Run-time wrapper for indirect calls.
281 Value *IndirectCallWrapperFn;
282 // Argument and return type of IndirectCallWrapperFn: void (*f)(void).
283 Type *AnyFunctionPtrTy;
285 friend struct MemorySanitizerVisitor;
286 friend struct VarArgAMD64Helper;
290 char MemorySanitizer::ID = 0;
291 INITIALIZE_PASS(MemorySanitizer, "msan",
292 "MemorySanitizer: detects uninitialized reads.",
295 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) {
296 return new MemorySanitizer(TrackOrigins);
299 /// \brief Create a non-const global initialized with the given string.
301 /// Creates a writable global for Str so that we can pass it to the
302 /// run-time lib. Runtime uses first 4 bytes of the string to store the
303 /// frame ID, so the string needs to be mutable.
304 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
306 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
307 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
308 GlobalValue::PrivateLinkage, StrConst, "");
312 /// \brief Insert extern declaration of runtime-provided functions and globals.
313 void MemorySanitizer::initializeCallbacks(Module &M) {
314 // Only do this once.
319 // Create the callback.
320 // FIXME: this function should have "Cold" calling conv,
321 // which is not yet implemented.
322 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
323 : "__msan_warning_noreturn";
324 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
326 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
328 unsigned AccessSize = 1 << AccessSizeIndex;
329 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
330 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
331 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
332 IRB.getInt32Ty(), NULL);
334 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
335 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
336 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
337 IRB.getInt8PtrTy(), IRB.getInt32Ty(), NULL);
340 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
341 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
342 IRB.getInt8PtrTy(), IntptrTy, NULL);
343 MsanPoisonStackFn = M.getOrInsertFunction(
344 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
345 MsanChainOriginFn = M.getOrInsertFunction(
346 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), NULL);
347 MemmoveFn = M.getOrInsertFunction(
348 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
349 IRB.getInt8PtrTy(), IntptrTy, NULL);
350 MemcpyFn = M.getOrInsertFunction(
351 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
353 MemsetFn = M.getOrInsertFunction(
354 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
358 RetvalTLS = new GlobalVariable(
359 M, ArrayType::get(IRB.getInt64Ty(), 8), false,
360 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
361 GlobalVariable::InitialExecTLSModel);
362 RetvalOriginTLS = new GlobalVariable(
363 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
364 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
366 ParamTLS = new GlobalVariable(
367 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
368 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
369 GlobalVariable::InitialExecTLSModel);
370 ParamOriginTLS = new GlobalVariable(
371 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
372 nullptr, "__msan_param_origin_tls", nullptr,
373 GlobalVariable::InitialExecTLSModel);
375 VAArgTLS = new GlobalVariable(
376 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
377 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
378 GlobalVariable::InitialExecTLSModel);
379 VAArgOverflowSizeTLS = new GlobalVariable(
380 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
381 "__msan_va_arg_overflow_size_tls", nullptr,
382 GlobalVariable::InitialExecTLSModel);
383 OriginTLS = new GlobalVariable(
384 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
385 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
387 // We insert an empty inline asm after __msan_report* to avoid callback merge.
388 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
389 StringRef(""), StringRef(""),
390 /*hasSideEffects=*/true);
392 if (WrapIndirectCalls) {
394 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false));
395 IndirectCallWrapperFn = M.getOrInsertFunction(
396 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
399 if (WrapIndirectCalls && ClWrapIndirectCallsFast) {
400 MsandrModuleStart = new GlobalVariable(
401 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
402 nullptr, "__executable_start");
403 MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility);
404 MsandrModuleEnd = new GlobalVariable(
405 M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
407 MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility);
411 /// \brief Module-level initialization.
413 /// inserts a call to __msan_init to the module's constructor list.
414 bool MemorySanitizer::doInitialization(Module &M) {
415 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
417 report_fatal_error("data layout missing");
418 DL = &DLP->getDataLayout();
420 C = &(M.getContext());
421 unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);
424 ShadowMask = kShadowMask64;
425 OriginOffset = kOriginOffset64;
428 ShadowMask = kShadowMask32;
429 OriginOffset = kOriginOffset32;
432 report_fatal_error("unsupported pointer size");
437 IntptrTy = IRB.getIntPtrTy(DL);
438 OriginTy = IRB.getInt32Ty();
440 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
441 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
443 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
444 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
445 "__msan_init", IRB.getVoidTy(), NULL)), 0);
448 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
449 IRB.getInt32(TrackOrigins), "__msan_track_origins");
452 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
453 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
460 /// \brief A helper class that handles instrumentation of VarArg
461 /// functions on a particular platform.
463 /// Implementations are expected to insert the instrumentation
464 /// necessary to propagate argument shadow through VarArg function
465 /// calls. Visit* methods are called during an InstVisitor pass over
466 /// the function, and should avoid creating new basic blocks. A new
467 /// instance of this class is created for each instrumented function.
468 struct VarArgHelper {
469 /// \brief Visit a CallSite.
470 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
472 /// \brief Visit a va_start call.
473 virtual void visitVAStartInst(VAStartInst &I) = 0;
475 /// \brief Visit a va_copy call.
476 virtual void visitVACopyInst(VACopyInst &I) = 0;
478 /// \brief Finalize function instrumentation.
480 /// This method is called after visiting all interesting (see above)
481 /// instructions in a function.
482 virtual void finalizeInstrumentation() = 0;
484 virtual ~VarArgHelper() {}
487 struct MemorySanitizerVisitor;
490 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
491 MemorySanitizerVisitor &Visitor);
493 unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
494 if (TypeSize <= 8) return 0;
495 return Log2_32_Ceil(TypeSize / 8);
498 /// This class does all the work for a given function. Store and Load
499 /// instructions store and load corresponding shadow and origin
500 /// values. Most instructions propagate shadow from arguments to their
501 /// return values. Certain instructions (most importantly, BranchInst)
502 /// test their argument shadow and print reports (with a runtime call) if it's
504 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
507 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
508 ValueMap<Value*, Value*> ShadowMap, OriginMap;
509 std::unique_ptr<VarArgHelper> VAHelper;
511 // The following flags disable parts of MSan instrumentation based on
512 // blacklist contents and command-line options.
517 bool CheckReturnValue;
519 struct ShadowOriginAndInsertPoint {
522 Instruction *OrigIns;
523 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
524 : Shadow(S), Origin(O), OrigIns(I) { }
526 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
527 SmallVector<Instruction*, 16> StoreList;
528 SmallVector<CallSite, 16> IndirectCallList;
530 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
531 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
532 bool SanitizeFunction = F.getAttributes().hasAttribute(
533 AttributeSet::FunctionIndex, Attribute::SanitizeMemory);
534 InsertChecks = SanitizeFunction;
535 LoadShadow = SanitizeFunction;
536 PoisonStack = SanitizeFunction && ClPoisonStack;
537 PoisonUndef = SanitizeFunction && ClPoisonUndef;
538 // FIXME: Consider using SpecialCaseList to specify a list of functions that
539 // must always return fully initialized values. For now, we hardcode "main".
540 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
542 DEBUG(if (!InsertChecks)
543 dbgs() << "MemorySanitizer is not inserting checks into '"
544 << F.getName() << "'\n");
547 Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
548 if (MS.TrackOrigins <= 1) return V;
549 return IRB.CreateCall(MS.MsanChainOriginFn, V);
552 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
553 unsigned Alignment, bool AsCall) {
554 if (isa<StructType>(Shadow->getType())) {
555 IRB.CreateAlignedStore(updateOrigin(Origin, IRB), getOriginPtr(Addr, IRB),
558 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
559 // TODO(eugenis): handle non-zero constant shadow by inserting an
560 // unconditional check (can not simply fail compilation as this could
561 // be in the dead code).
562 if (isa<Constant>(ConvertedShadow)) return;
563 unsigned TypeSizeInBits =
564 MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
565 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
566 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
567 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
568 Value *ConvertedShadow2 = IRB.CreateZExt(
569 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
570 IRB.CreateCall3(Fn, ConvertedShadow2,
571 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
572 updateOrigin(Origin, IRB));
574 Value *Cmp = IRB.CreateICmpNE(
575 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
576 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
577 Cmp, IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
578 IRBuilder<> IRBNew(CheckTerm);
579 IRBNew.CreateAlignedStore(updateOrigin(Origin, IRBNew),
580 getOriginPtr(Addr, IRBNew), Alignment);
585 void materializeStores(bool InstrumentWithCalls) {
586 for (auto Inst : StoreList) {
587 StoreInst &SI = *dyn_cast<StoreInst>(Inst);
589 IRBuilder<> IRB(&SI);
590 Value *Val = SI.getValueOperand();
591 Value *Addr = SI.getPointerOperand();
592 Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
593 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
596 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
597 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
600 if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
602 if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
604 if (MS.TrackOrigins) {
605 unsigned Alignment = std::max(kMinOriginAlignment, SI.getAlignment());
606 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), Alignment,
607 InstrumentWithCalls);
612 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
614 IRBuilder<> IRB(OrigIns);
615 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
616 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
617 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
618 // See the comment in materializeStores().
619 if (isa<Constant>(ConvertedShadow)) return;
620 unsigned TypeSizeInBits =
621 MS.DL->getTypeSizeInBits(ConvertedShadow->getType());
622 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
623 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
624 Value *Fn = MS.MaybeWarningFn[SizeIndex];
625 Value *ConvertedShadow2 =
626 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
627 IRB.CreateCall2(Fn, ConvertedShadow2, MS.TrackOrigins && Origin
629 : (Value *)IRB.getInt32(0));
631 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
632 getCleanShadow(ConvertedShadow), "_mscmp");
633 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
635 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights);
637 IRB.SetInsertPoint(CheckTerm);
638 if (MS.TrackOrigins) {
639 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
642 IRB.CreateCall(MS.WarningFn);
643 IRB.CreateCall(MS.EmptyAsm);
644 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
648 void materializeChecks(bool InstrumentWithCalls) {
649 for (const auto &ShadowData : InstrumentationList) {
650 Instruction *OrigIns = ShadowData.OrigIns;
651 Value *Shadow = ShadowData.Shadow;
652 Value *Origin = ShadowData.Origin;
653 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
655 DEBUG(dbgs() << "DONE:\n" << F);
658 void materializeIndirectCalls() {
659 for (auto &CS : IndirectCallList) {
660 Instruction *I = CS.getInstruction();
661 BasicBlock *B = I->getParent();
663 Value *Fn0 = CS.getCalledValue();
664 Value *Fn = IRB.CreateBitCast(Fn0, MS.AnyFunctionPtrTy);
666 if (ClWrapIndirectCallsFast) {
667 // Check that call target is inside this module limits.
669 IRB.CreateBitCast(MS.MsandrModuleStart, MS.AnyFunctionPtrTy);
670 Value *End = IRB.CreateBitCast(MS.MsandrModuleEnd, MS.AnyFunctionPtrTy);
672 Value *NotInThisModule = IRB.CreateOr(IRB.CreateICmpULT(Fn, Start),
673 IRB.CreateICmpUGE(Fn, End));
676 IRB.CreatePHI(Fn0->getType(), 2, "msandr.indirect_target");
678 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
679 NotInThisModule, NewFnPhi,
680 /* Unreachable */ false, MS.ColdCallWeights);
682 IRB.SetInsertPoint(CheckTerm);
683 // Slow path: call wrapper function to possibly transform the call
685 Value *NewFn = IRB.CreateBitCast(
686 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType());
688 NewFnPhi->addIncoming(Fn0, B);
689 NewFnPhi->addIncoming(NewFn, dyn_cast<Instruction>(NewFn)->getParent());
690 CS.setCalledFunction(NewFnPhi);
692 Value *NewFn = IRB.CreateBitCast(
693 IRB.CreateCall(MS.IndirectCallWrapperFn, Fn), Fn0->getType());
694 CS.setCalledFunction(NewFn);
699 /// \brief Add MemorySanitizer instrumentation to a function.
700 bool runOnFunction() {
701 MS.initializeCallbacks(*F.getParent());
702 if (!MS.DL) return false;
704 // In the presence of unreachable blocks, we may see Phi nodes with
705 // incoming nodes from such blocks. Since InstVisitor skips unreachable
706 // blocks, such nodes will not have any shadow value associated with them.
707 // It's easier to remove unreachable blocks than deal with missing shadow.
708 removeUnreachableBlocks(F);
710 // Iterate all BBs in depth-first order and create shadow instructions
711 // for all instructions (where applicable).
712 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
713 for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
717 // Finalize PHI nodes.
718 for (PHINode *PN : ShadowPHINodes) {
719 PHINode *PNS = cast<PHINode>(getShadow(PN));
720 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
721 size_t NumValues = PN->getNumIncomingValues();
722 for (size_t v = 0; v < NumValues; v++) {
723 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
725 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
729 VAHelper->finalizeInstrumentation();
731 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
732 InstrumentationList.size() + StoreList.size() >
733 (unsigned)ClInstrumentationWithCallThreshold;
735 // Delayed instrumentation of StoreInst.
736 // This may add new checks to be inserted later.
737 materializeStores(InstrumentWithCalls);
739 // Insert shadow value checks.
740 materializeChecks(InstrumentWithCalls);
742 // Wrap indirect calls.
743 materializeIndirectCalls();
748 /// \brief Compute the shadow type that corresponds to a given Value.
749 Type *getShadowTy(Value *V) {
750 return getShadowTy(V->getType());
753 /// \brief Compute the shadow type that corresponds to a given Type.
754 Type *getShadowTy(Type *OrigTy) {
755 if (!OrigTy->isSized()) {
758 // For integer type, shadow is the same as the original type.
759 // This may return weird-sized types like i1.
760 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
762 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
763 uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
764 return VectorType::get(IntegerType::get(*MS.C, EltSize),
765 VT->getNumElements());
767 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
768 SmallVector<Type*, 4> Elements;
769 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
770 Elements.push_back(getShadowTy(ST->getElementType(i)));
771 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
772 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
775 uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
776 return IntegerType::get(*MS.C, TypeSize);
779 /// \brief Flatten a vector type.
780 Type *getShadowTyNoVec(Type *ty) {
781 if (VectorType *vt = dyn_cast<VectorType>(ty))
782 return IntegerType::get(*MS.C, vt->getBitWidth());
786 /// \brief Convert a shadow value to it's flattened variant.
787 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
788 Type *Ty = V->getType();
789 Type *NoVecTy = getShadowTyNoVec(Ty);
790 if (Ty == NoVecTy) return V;
791 return IRB.CreateBitCast(V, NoVecTy);
794 /// \brief Compute the shadow address that corresponds to a given application
797 /// Shadow = Addr & ~ShadowMask.
798 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
801 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
802 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
803 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
806 /// \brief Compute the origin address that corresponds to a given application
809 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
810 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
812 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
813 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
815 IRB.CreateAdd(ShadowLong,
816 ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
818 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
819 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
822 /// \brief Compute the shadow address for a given function argument.
824 /// Shadow = ParamTLS+ArgOffset.
825 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
827 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
828 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
829 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
833 /// \brief Compute the origin address for a given function argument.
834 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
836 if (!MS.TrackOrigins) return nullptr;
837 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
838 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
839 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
843 /// \brief Compute the shadow address for a retval.
844 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
845 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
846 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
850 /// \brief Compute the origin address for a retval.
851 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
852 // We keep a single origin for the entire retval. Might be too optimistic.
853 return MS.RetvalOriginTLS;
856 /// \brief Set SV to be the shadow value for V.
857 void setShadow(Value *V, Value *SV) {
858 assert(!ShadowMap.count(V) && "Values may only have one shadow");
862 /// \brief Set Origin to be the origin value for V.
863 void setOrigin(Value *V, Value *Origin) {
864 if (!MS.TrackOrigins) return;
865 assert(!OriginMap.count(V) && "Values may only have one origin");
866 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
867 OriginMap[V] = Origin;
870 /// \brief Create a clean shadow value for a given value.
872 /// Clean shadow (all zeroes) means all bits of the value are defined
874 Constant *getCleanShadow(Value *V) {
875 Type *ShadowTy = getShadowTy(V);
878 return Constant::getNullValue(ShadowTy);
881 /// \brief Create a dirty shadow of a given shadow type.
882 Constant *getPoisonedShadow(Type *ShadowTy) {
884 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
885 return Constant::getAllOnesValue(ShadowTy);
886 StructType *ST = cast<StructType>(ShadowTy);
887 SmallVector<Constant *, 4> Vals;
888 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
889 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
890 return ConstantStruct::get(ST, Vals);
893 /// \brief Create a dirty shadow for a given value.
894 Constant *getPoisonedShadow(Value *V) {
895 Type *ShadowTy = getShadowTy(V);
898 return getPoisonedShadow(ShadowTy);
901 /// \brief Create a clean (zero) origin.
902 Value *getCleanOrigin() {
903 return Constant::getNullValue(MS.OriginTy);
906 /// \brief Get the shadow value for a given Value.
908 /// This function either returns the value set earlier with setShadow,
909 /// or extracts if from ParamTLS (for function arguments).
910 Value *getShadow(Value *V) {
911 if (Instruction *I = dyn_cast<Instruction>(V)) {
912 // For instructions the shadow is already stored in the map.
913 Value *Shadow = ShadowMap[V];
915 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
917 assert(Shadow && "No shadow for a value");
921 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
922 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
923 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
927 if (Argument *A = dyn_cast<Argument>(V)) {
928 // For arguments we compute the shadow on demand and store it in the map.
929 Value **ShadowPtr = &ShadowMap[V];
932 Function *F = A->getParent();
933 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
934 unsigned ArgOffset = 0;
935 for (auto &FArg : F->args()) {
936 if (!FArg.getType()->isSized()) {
937 DEBUG(dbgs() << "Arg is not sized\n");
940 unsigned Size = FArg.hasByValAttr()
941 ? MS.DL->getTypeAllocSize(FArg.getType()->getPointerElementType())
942 : MS.DL->getTypeAllocSize(FArg.getType());
944 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
945 if (FArg.hasByValAttr()) {
946 // ByVal pointer itself has clean shadow. We copy the actual
947 // argument shadow to the underlying memory.
948 // Figure out maximal valid memcpy alignment.
949 unsigned ArgAlign = FArg.getParamAlignment();
951 Type *EltType = A->getType()->getPointerElementType();
952 ArgAlign = MS.DL->getABITypeAlignment(EltType);
954 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
955 Value *Cpy = EntryIRB.CreateMemCpy(
956 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
958 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
960 *ShadowPtr = getCleanShadow(V);
962 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
964 DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
965 **ShadowPtr << "\n");
966 if (MS.TrackOrigins) {
968 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
969 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
972 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment);
974 assert(*ShadowPtr && "Could not find shadow for an argument");
977 // For everything else the shadow is zero.
978 return getCleanShadow(V);
981 /// \brief Get the shadow for i-th argument of the instruction I.
982 Value *getShadow(Instruction *I, int i) {
983 return getShadow(I->getOperand(i));
986 /// \brief Get the origin for a value.
987 Value *getOrigin(Value *V) {
988 if (!MS.TrackOrigins) return nullptr;
989 if (isa<Instruction>(V) || isa<Argument>(V)) {
990 Value *Origin = OriginMap[V];
992 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
993 Origin = getCleanOrigin();
997 return getCleanOrigin();
1000 /// \brief Get the origin for i-th argument of the instruction I.
1001 Value *getOrigin(Instruction *I, int i) {
1002 return getOrigin(I->getOperand(i));
1005 /// \brief Remember the place where a shadow check should be inserted.
1007 /// This location will be later instrumented with a check that will print a
1008 /// UMR warning in runtime if the shadow value is not 0.
1009 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1011 if (!InsertChecks) return;
1013 Type *ShadowTy = Shadow->getType();
1014 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1015 "Can only insert checks for integer and vector shadow types");
1017 InstrumentationList.push_back(
1018 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1021 /// \brief Remember the place where a shadow check should be inserted.
1023 /// This location will be later instrumented with a check that will print a
1024 /// UMR warning in runtime if the value is not fully defined.
1025 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1027 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1028 if (!Shadow) return;
1029 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1030 insertShadowCheck(Shadow, Origin, OrigIns);
1033 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1042 case AcquireRelease:
1043 return AcquireRelease;
1044 case SequentiallyConsistent:
1045 return SequentiallyConsistent;
1047 llvm_unreachable("Unknown ordering");
1050 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1059 case AcquireRelease:
1060 return AcquireRelease;
1061 case SequentiallyConsistent:
1062 return SequentiallyConsistent;
1064 llvm_unreachable("Unknown ordering");
1067 // ------------------- Visitors.
1069 /// \brief Instrument LoadInst
1071 /// Loads the corresponding shadow and (optionally) origin.
1072 /// Optionally, checks that the load address is fully defined.
1073 void visitLoadInst(LoadInst &I) {
1074 assert(I.getType()->isSized() && "Load type must have size");
1075 IRBuilder<> IRB(I.getNextNode());
1076 Type *ShadowTy = getShadowTy(&I);
1077 Value *Addr = I.getPointerOperand();
1079 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1081 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
1083 setShadow(&I, getCleanShadow(&I));
1086 if (ClCheckAccessAddress)
1087 insertShadowCheck(I.getPointerOperand(), &I);
1090 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1092 if (MS.TrackOrigins) {
1094 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
1096 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
1098 setOrigin(&I, getCleanOrigin());
1103 /// \brief Instrument StoreInst
1105 /// Stores the corresponding shadow and (optionally) origin.
1106 /// Optionally, checks that the store address is fully defined.
1107 void visitStoreInst(StoreInst &I) {
1108 StoreList.push_back(&I);
1111 void handleCASOrRMW(Instruction &I) {
1112 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1114 IRBuilder<> IRB(&I);
1115 Value *Addr = I.getOperand(0);
1116 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1118 if (ClCheckAccessAddress)
1119 insertShadowCheck(Addr, &I);
1121 // Only test the conditional argument of cmpxchg instruction.
1122 // The other argument can potentially be uninitialized, but we can not
1123 // detect this situation reliably without possible false positives.
1124 if (isa<AtomicCmpXchgInst>(I))
1125 insertShadowCheck(I.getOperand(1), &I);
1127 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1129 setShadow(&I, getCleanShadow(&I));
1132 void visitAtomicRMWInst(AtomicRMWInst &I) {
1134 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1137 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1139 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1142 // Vector manipulation.
1143 void visitExtractElementInst(ExtractElementInst &I) {
1144 insertShadowCheck(I.getOperand(1), &I);
1145 IRBuilder<> IRB(&I);
1146 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1148 setOrigin(&I, getOrigin(&I, 0));
1151 void visitInsertElementInst(InsertElementInst &I) {
1152 insertShadowCheck(I.getOperand(2), &I);
1153 IRBuilder<> IRB(&I);
1154 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1155 I.getOperand(2), "_msprop"));
1156 setOriginForNaryOp(I);
1159 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1160 insertShadowCheck(I.getOperand(2), &I);
1161 IRBuilder<> IRB(&I);
1162 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1163 I.getOperand(2), "_msprop"));
1164 setOriginForNaryOp(I);
1168 void visitSExtInst(SExtInst &I) {
1169 IRBuilder<> IRB(&I);
1170 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1171 setOrigin(&I, getOrigin(&I, 0));
1174 void visitZExtInst(ZExtInst &I) {
1175 IRBuilder<> IRB(&I);
1176 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1177 setOrigin(&I, getOrigin(&I, 0));
1180 void visitTruncInst(TruncInst &I) {
1181 IRBuilder<> IRB(&I);
1182 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1183 setOrigin(&I, getOrigin(&I, 0));
1186 void visitBitCastInst(BitCastInst &I) {
1187 IRBuilder<> IRB(&I);
1188 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1189 setOrigin(&I, getOrigin(&I, 0));
1192 void visitPtrToIntInst(PtrToIntInst &I) {
1193 IRBuilder<> IRB(&I);
1194 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1195 "_msprop_ptrtoint"));
1196 setOrigin(&I, getOrigin(&I, 0));
1199 void visitIntToPtrInst(IntToPtrInst &I) {
1200 IRBuilder<> IRB(&I);
1201 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1202 "_msprop_inttoptr"));
1203 setOrigin(&I, getOrigin(&I, 0));
1206 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1207 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1208 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1209 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1210 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1211 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1213 /// \brief Propagate shadow for bitwise AND.
1215 /// This code is exact, i.e. if, for example, a bit in the left argument
1216 /// is defined and 0, then neither the value not definedness of the
1217 /// corresponding bit in B don't affect the resulting shadow.
1218 void visitAnd(BinaryOperator &I) {
1219 IRBuilder<> IRB(&I);
1220 // "And" of 0 and a poisoned value results in unpoisoned value.
1221 // 1&1 => 1; 0&1 => 0; p&1 => p;
1222 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1223 // 1&p => p; 0&p => 0; p&p => p;
1224 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1225 Value *S1 = getShadow(&I, 0);
1226 Value *S2 = getShadow(&I, 1);
1227 Value *V1 = I.getOperand(0);
1228 Value *V2 = I.getOperand(1);
1229 if (V1->getType() != S1->getType()) {
1230 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1231 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1233 Value *S1S2 = IRB.CreateAnd(S1, S2);
1234 Value *V1S2 = IRB.CreateAnd(V1, S2);
1235 Value *S1V2 = IRB.CreateAnd(S1, V2);
1236 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1237 setOriginForNaryOp(I);
1240 void visitOr(BinaryOperator &I) {
1241 IRBuilder<> IRB(&I);
1242 // "Or" of 1 and a poisoned value results in unpoisoned value.
1243 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1244 // 1|0 => 1; 0|0 => 0; p|0 => p;
1245 // 1|p => 1; 0|p => p; p|p => p;
1246 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1247 Value *S1 = getShadow(&I, 0);
1248 Value *S2 = getShadow(&I, 1);
1249 Value *V1 = IRB.CreateNot(I.getOperand(0));
1250 Value *V2 = IRB.CreateNot(I.getOperand(1));
1251 if (V1->getType() != S1->getType()) {
1252 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1253 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1255 Value *S1S2 = IRB.CreateAnd(S1, S2);
1256 Value *V1S2 = IRB.CreateAnd(V1, S2);
1257 Value *S1V2 = IRB.CreateAnd(S1, V2);
1258 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1259 setOriginForNaryOp(I);
1262 /// \brief Default propagation of shadow and/or origin.
1264 /// This class implements the general case of shadow propagation, used in all
1265 /// cases where we don't know and/or don't care about what the operation
1266 /// actually does. It converts all input shadow values to a common type
1267 /// (extending or truncating as necessary), and bitwise OR's them.
1269 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1270 /// fully initialized), and less prone to false positives.
1272 /// This class also implements the general case of origin propagation. For a
1273 /// Nary operation, result origin is set to the origin of an argument that is
1274 /// not entirely initialized. If there is more than one such arguments, the
1275 /// rightmost of them is picked. It does not matter which one is picked if all
1276 /// arguments are initialized.
1277 template <bool CombineShadow>
1282 MemorySanitizerVisitor *MSV;
1285 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1286 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
1288 /// \brief Add a pair of shadow and origin values to the mix.
1289 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1290 if (CombineShadow) {
1295 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1296 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1300 if (MSV->MS.TrackOrigins) {
1305 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1306 Value *Cond = IRB.CreateICmpNE(FlatShadow,
1307 MSV->getCleanShadow(FlatShadow));
1308 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1314 /// \brief Add an application value to the mix.
1315 Combiner &Add(Value *V) {
1316 Value *OpShadow = MSV->getShadow(V);
1317 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
1318 return Add(OpShadow, OpOrigin);
1321 /// \brief Set the current combined values as the given instruction's shadow
1323 void Done(Instruction *I) {
1324 if (CombineShadow) {
1326 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1327 MSV->setShadow(I, Shadow);
1329 if (MSV->MS.TrackOrigins) {
1331 MSV->setOrigin(I, Origin);
1336 typedef Combiner<true> ShadowAndOriginCombiner;
1337 typedef Combiner<false> OriginCombiner;
1339 /// \brief Propagate origin for arbitrary operation.
1340 void setOriginForNaryOp(Instruction &I) {
1341 if (!MS.TrackOrigins) return;
1342 IRBuilder<> IRB(&I);
1343 OriginCombiner OC(this, IRB);
1344 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1349 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1350 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1351 "Vector of pointers is not a valid shadow type");
1352 return Ty->isVectorTy() ?
1353 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1354 Ty->getPrimitiveSizeInBits();
1357 /// \brief Cast between two shadow types, extending or truncating as
1359 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1360 bool Signed = false) {
1361 Type *srcTy = V->getType();
1362 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1363 return IRB.CreateIntCast(V, dstTy, Signed);
1364 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1365 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1366 return IRB.CreateIntCast(V, dstTy, Signed);
1367 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1368 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1369 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1371 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1372 return IRB.CreateBitCast(V2, dstTy);
1373 // TODO: handle struct types.
1376 /// \brief Cast an application value to the type of its own shadow.
1377 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
1378 Type *ShadowTy = getShadowTy(V);
1379 if (V->getType() == ShadowTy)
1381 if (V->getType()->isPtrOrPtrVectorTy())
1382 return IRB.CreatePtrToInt(V, ShadowTy);
1384 return IRB.CreateBitCast(V, ShadowTy);
1387 /// \brief Propagate shadow for arbitrary operation.
1388 void handleShadowOr(Instruction &I) {
1389 IRBuilder<> IRB(&I);
1390 ShadowAndOriginCombiner SC(this, IRB);
1391 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1396 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1397 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1398 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1399 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1400 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1401 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1402 void visitMul(BinaryOperator &I) { handleShadowOr(I); }
1404 void handleDiv(Instruction &I) {
1405 IRBuilder<> IRB(&I);
1406 // Strict on the second argument.
1407 insertShadowCheck(I.getOperand(1), &I);
1408 setShadow(&I, getShadow(&I, 0));
1409 setOrigin(&I, getOrigin(&I, 0));
1412 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1413 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1414 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1415 void visitURem(BinaryOperator &I) { handleDiv(I); }
1416 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1417 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1419 /// \brief Instrument == and != comparisons.
1421 /// Sometimes the comparison result is known even if some of the bits of the
1422 /// arguments are not.
1423 void handleEqualityComparison(ICmpInst &I) {
1424 IRBuilder<> IRB(&I);
1425 Value *A = I.getOperand(0);
1426 Value *B = I.getOperand(1);
1427 Value *Sa = getShadow(A);
1428 Value *Sb = getShadow(B);
1430 // Get rid of pointers and vectors of pointers.
1431 // For ints (and vectors of ints), types of A and Sa match,
1432 // and this is a no-op.
1433 A = IRB.CreatePointerCast(A, Sa->getType());
1434 B = IRB.CreatePointerCast(B, Sb->getType());
1436 // A == B <==> (C = A^B) == 0
1437 // A != B <==> (C = A^B) != 0
1439 Value *C = IRB.CreateXor(A, B);
1440 Value *Sc = IRB.CreateOr(Sa, Sb);
1441 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1442 // Result is defined if one of the following is true
1443 // * there is a defined 1 bit in C
1444 // * C is fully defined
1445 // Si = !(C & ~Sc) && Sc
1446 Value *Zero = Constant::getNullValue(Sc->getType());
1447 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1449 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1451 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1452 Si->setName("_msprop_icmp");
1454 setOriginForNaryOp(I);
1457 /// \brief Build the lowest possible value of V, taking into account V's
1458 /// uninitialized bits.
1459 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1462 // Split shadow into sign bit and other bits.
1463 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1464 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1465 // Maximise the undefined shadow bit, minimize other undefined bits.
1467 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1469 // Minimize undefined bits.
1470 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1474 /// \brief Build the highest possible value of V, taking into account V's
1475 /// uninitialized bits.
1476 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1479 // Split shadow into sign bit and other bits.
1480 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1481 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1482 // Minimise the undefined shadow bit, maximise other undefined bits.
1484 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1486 // Maximize undefined bits.
1487 return IRB.CreateOr(A, Sa);
1491 /// \brief Instrument relational comparisons.
1493 /// This function does exact shadow propagation for all relational
1494 /// comparisons of integers, pointers and vectors of those.
1495 /// FIXME: output seems suboptimal when one of the operands is a constant
1496 void handleRelationalComparisonExact(ICmpInst &I) {
1497 IRBuilder<> IRB(&I);
1498 Value *A = I.getOperand(0);
1499 Value *B = I.getOperand(1);
1500 Value *Sa = getShadow(A);
1501 Value *Sb = getShadow(B);
1503 // Get rid of pointers and vectors of pointers.
1504 // For ints (and vectors of ints), types of A and Sa match,
1505 // and this is a no-op.
1506 A = IRB.CreatePointerCast(A, Sa->getType());
1507 B = IRB.CreatePointerCast(B, Sb->getType());
1509 // Let [a0, a1] be the interval of possible values of A, taking into account
1510 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1511 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1512 bool IsSigned = I.isSigned();
1513 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1514 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1515 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1516 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1517 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1518 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1519 Value *Si = IRB.CreateXor(S1, S2);
1521 setOriginForNaryOp(I);
1524 /// \brief Instrument signed relational comparisons.
1526 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1527 /// propagating the highest bit of the shadow. Everything else is delegated
1528 /// to handleShadowOr().
1529 void handleSignedRelationalComparison(ICmpInst &I) {
1530 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1531 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1532 Value* op = nullptr;
1533 CmpInst::Predicate pre = I.getPredicate();
1534 if (constOp0 && constOp0->isNullValue() &&
1535 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1536 op = I.getOperand(1);
1537 } else if (constOp1 && constOp1->isNullValue() &&
1538 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1539 op = I.getOperand(0);
1542 IRBuilder<> IRB(&I);
1544 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1545 setShadow(&I, Shadow);
1546 setOrigin(&I, getOrigin(op));
1552 void visitICmpInst(ICmpInst &I) {
1553 if (!ClHandleICmp) {
1557 if (I.isEquality()) {
1558 handleEqualityComparison(I);
1562 assert(I.isRelational());
1563 if (ClHandleICmpExact) {
1564 handleRelationalComparisonExact(I);
1568 handleSignedRelationalComparison(I);
1572 assert(I.isUnsigned());
1573 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1574 handleRelationalComparisonExact(I);
1581 void visitFCmpInst(FCmpInst &I) {
1585 void handleShift(BinaryOperator &I) {
1586 IRBuilder<> IRB(&I);
1587 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1588 // Otherwise perform the same shift on S1.
1589 Value *S1 = getShadow(&I, 0);
1590 Value *S2 = getShadow(&I, 1);
1591 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1593 Value *V2 = I.getOperand(1);
1594 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1595 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1596 setOriginForNaryOp(I);
1599 void visitShl(BinaryOperator &I) { handleShift(I); }
1600 void visitAShr(BinaryOperator &I) { handleShift(I); }
1601 void visitLShr(BinaryOperator &I) { handleShift(I); }
1603 /// \brief Instrument llvm.memmove
1605 /// At this point we don't know if llvm.memmove will be inlined or not.
1606 /// If we don't instrument it and it gets inlined,
1607 /// our interceptor will not kick in and we will lose the memmove.
1608 /// If we instrument the call here, but it does not get inlined,
1609 /// we will memove the shadow twice: which is bad in case
1610 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1612 /// Similar situation exists for memcpy and memset.
1613 void visitMemMoveInst(MemMoveInst &I) {
1614 IRBuilder<> IRB(&I);
1617 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1618 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1619 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1620 I.eraseFromParent();
1623 // Similar to memmove: avoid copying shadow twice.
1624 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1625 // FIXME: consider doing manual inline for small constant sizes and proper
1627 void visitMemCpyInst(MemCpyInst &I) {
1628 IRBuilder<> IRB(&I);
1631 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1632 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1633 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1634 I.eraseFromParent();
1638 void visitMemSetInst(MemSetInst &I) {
1639 IRBuilder<> IRB(&I);
1642 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1643 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1644 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1645 I.eraseFromParent();
1648 void visitVAStartInst(VAStartInst &I) {
1649 VAHelper->visitVAStartInst(I);
1652 void visitVACopyInst(VACopyInst &I) {
1653 VAHelper->visitVACopyInst(I);
1656 enum IntrinsicKind {
1657 IK_DoesNotAccessMemory,
1662 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1663 const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1664 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1665 const int OnlyReadsMemory = IK_OnlyReadsMemory;
1666 const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1667 const int UnknownModRefBehavior = IK_WritesMemory;
1668 #define GET_INTRINSIC_MODREF_BEHAVIOR
1669 #define ModRefBehavior IntrinsicKind
1670 #include "llvm/IR/Intrinsics.gen"
1671 #undef ModRefBehavior
1672 #undef GET_INTRINSIC_MODREF_BEHAVIOR
1675 /// \brief Handle vector store-like intrinsics.
1677 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1678 /// has 1 pointer argument and 1 vector argument, returns void.
1679 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1680 IRBuilder<> IRB(&I);
1681 Value* Addr = I.getArgOperand(0);
1682 Value *Shadow = getShadow(&I, 1);
1683 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1685 // We don't know the pointer alignment (could be unaligned SSE store!).
1686 // Have to assume to worst case.
1687 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1689 if (ClCheckAccessAddress)
1690 insertShadowCheck(Addr, &I);
1692 // FIXME: use ClStoreCleanOrigin
1693 // FIXME: factor out common code from materializeStores
1694 if (MS.TrackOrigins)
1695 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
1699 /// \brief Handle vector load-like intrinsics.
1701 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1702 /// has 1 pointer argument, returns a vector.
1703 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1704 IRBuilder<> IRB(&I);
1705 Value *Addr = I.getArgOperand(0);
1707 Type *ShadowTy = getShadowTy(&I);
1709 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1710 // We don't know the pointer alignment (could be unaligned SSE load!).
1711 // Have to assume to worst case.
1712 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1714 setShadow(&I, getCleanShadow(&I));
1717 if (ClCheckAccessAddress)
1718 insertShadowCheck(Addr, &I);
1720 if (MS.TrackOrigins) {
1722 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
1724 setOrigin(&I, getCleanOrigin());
1729 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1731 /// Instrument intrinsics with any number of arguments of the same type,
1732 /// equal to the return type. The type should be simple (no aggregates or
1733 /// pointers; vectors are fine).
1734 /// Caller guarantees that this intrinsic does not access memory.
1735 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1736 Type *RetTy = I.getType();
1737 if (!(RetTy->isIntOrIntVectorTy() ||
1738 RetTy->isFPOrFPVectorTy() ||
1739 RetTy->isX86_MMXTy()))
1742 unsigned NumArgOperands = I.getNumArgOperands();
1744 for (unsigned i = 0; i < NumArgOperands; ++i) {
1745 Type *Ty = I.getArgOperand(i)->getType();
1750 IRBuilder<> IRB(&I);
1751 ShadowAndOriginCombiner SC(this, IRB);
1752 for (unsigned i = 0; i < NumArgOperands; ++i)
1753 SC.Add(I.getArgOperand(i));
1759 /// \brief Heuristically instrument unknown intrinsics.
1761 /// The main purpose of this code is to do something reasonable with all
1762 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1763 /// We recognize several classes of intrinsics by their argument types and
1764 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1765 /// sure that we know what the intrinsic does.
1767 /// We special-case intrinsics where this approach fails. See llvm.bswap
1768 /// handling as an example of that.
1769 bool handleUnknownIntrinsic(IntrinsicInst &I) {
1770 unsigned NumArgOperands = I.getNumArgOperands();
1771 if (NumArgOperands == 0)
1774 Intrinsic::ID iid = I.getIntrinsicID();
1775 IntrinsicKind IK = getIntrinsicKind(iid);
1776 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1777 bool WritesMemory = IK == IK_WritesMemory;
1778 assert(!(OnlyReadsMemory && WritesMemory));
1780 if (NumArgOperands == 2 &&
1781 I.getArgOperand(0)->getType()->isPointerTy() &&
1782 I.getArgOperand(1)->getType()->isVectorTy() &&
1783 I.getType()->isVoidTy() &&
1785 // This looks like a vector store.
1786 return handleVectorStoreIntrinsic(I);
1789 if (NumArgOperands == 1 &&
1790 I.getArgOperand(0)->getType()->isPointerTy() &&
1791 I.getType()->isVectorTy() &&
1793 // This looks like a vector load.
1794 return handleVectorLoadIntrinsic(I);
1797 if (!OnlyReadsMemory && !WritesMemory)
1798 if (maybeHandleSimpleNomemIntrinsic(I))
1801 // FIXME: detect and handle SSE maskstore/maskload
1805 void handleBswap(IntrinsicInst &I) {
1806 IRBuilder<> IRB(&I);
1807 Value *Op = I.getArgOperand(0);
1808 Type *OpType = Op->getType();
1809 Function *BswapFunc = Intrinsic::getDeclaration(
1810 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
1811 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1812 setOrigin(&I, getOrigin(Op));
1815 // \brief Instrument vector convert instrinsic.
1817 // This function instruments intrinsics like cvtsi2ss:
1818 // %Out = int_xxx_cvtyyy(%ConvertOp)
1820 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
1821 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
1822 // number \p Out elements, and (if has 2 arguments) copies the rest of the
1823 // elements from \p CopyOp.
1824 // In most cases conversion involves floating-point value which may trigger a
1825 // hardware exception when not fully initialized. For this reason we require
1826 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
1827 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
1828 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
1829 // return a fully initialized value.
1830 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
1831 IRBuilder<> IRB(&I);
1832 Value *CopyOp, *ConvertOp;
1834 switch (I.getNumArgOperands()) {
1836 CopyOp = I.getArgOperand(0);
1837 ConvertOp = I.getArgOperand(1);
1840 ConvertOp = I.getArgOperand(0);
1844 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
1847 // The first *NumUsedElements* elements of ConvertOp are converted to the
1848 // same number of output elements. The rest of the output is copied from
1849 // CopyOp, or (if not available) filled with zeroes.
1850 // Combine shadow for elements of ConvertOp that are used in this operation,
1851 // and insert a check.
1852 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
1853 // int->any conversion.
1854 Value *ConvertShadow = getShadow(ConvertOp);
1855 Value *AggShadow = nullptr;
1856 if (ConvertOp->getType()->isVectorTy()) {
1857 AggShadow = IRB.CreateExtractElement(
1858 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
1859 for (int i = 1; i < NumUsedElements; ++i) {
1860 Value *MoreShadow = IRB.CreateExtractElement(
1861 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
1862 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
1865 AggShadow = ConvertShadow;
1867 assert(AggShadow->getType()->isIntegerTy());
1868 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
1870 // Build result shadow by zero-filling parts of CopyOp shadow that come from
1873 assert(CopyOp->getType() == I.getType());
1874 assert(CopyOp->getType()->isVectorTy());
1875 Value *ResultShadow = getShadow(CopyOp);
1876 Type *EltTy = ResultShadow->getType()->getVectorElementType();
1877 for (int i = 0; i < NumUsedElements; ++i) {
1878 ResultShadow = IRB.CreateInsertElement(
1879 ResultShadow, ConstantInt::getNullValue(EltTy),
1880 ConstantInt::get(IRB.getInt32Ty(), i));
1882 setShadow(&I, ResultShadow);
1883 setOrigin(&I, getOrigin(CopyOp));
1885 setShadow(&I, getCleanShadow(&I));
1889 // Given a scalar or vector, extract lower 64 bits (or less), and return all
1890 // zeroes if it is zero, and all ones otherwise.
1891 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
1892 if (S->getType()->isVectorTy())
1893 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
1894 assert(S->getType()->getPrimitiveSizeInBits() <= 64);
1895 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
1896 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
1899 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
1900 Type *T = S->getType();
1901 assert(T->isVectorTy());
1902 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
1903 return IRB.CreateSExt(S2, T);
1906 // \brief Instrument vector shift instrinsic.
1908 // This function instruments intrinsics like int_x86_avx2_psll_w.
1909 // Intrinsic shifts %In by %ShiftSize bits.
1910 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
1911 // size, and the rest is ignored. Behavior is defined even if shift size is
1912 // greater than register (or field) width.
1913 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
1914 assert(I.getNumArgOperands() == 2);
1915 IRBuilder<> IRB(&I);
1916 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1917 // Otherwise perform the same shift on S1.
1918 Value *S1 = getShadow(&I, 0);
1919 Value *S2 = getShadow(&I, 1);
1920 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
1921 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
1922 Value *V1 = I.getOperand(0);
1923 Value *V2 = I.getOperand(1);
1924 Value *Shift = IRB.CreateCall2(I.getCalledValue(),
1925 IRB.CreateBitCast(S1, V1->getType()), V2);
1926 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
1927 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1928 setOriginForNaryOp(I);
1931 // \brief Get an X86_MMX-sized vector type.
1932 Type *getMMXVectorTy(unsigned EltSizeInBits) {
1933 const unsigned X86_MMXSizeInBits = 64;
1934 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
1935 X86_MMXSizeInBits / EltSizeInBits);
1938 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
1940 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
1942 case llvm::Intrinsic::x86_sse2_packsswb_128:
1943 case llvm::Intrinsic::x86_sse2_packuswb_128:
1944 return llvm::Intrinsic::x86_sse2_packsswb_128;
1946 case llvm::Intrinsic::x86_sse2_packssdw_128:
1947 case llvm::Intrinsic::x86_sse41_packusdw:
1948 return llvm::Intrinsic::x86_sse2_packssdw_128;
1950 case llvm::Intrinsic::x86_avx2_packsswb:
1951 case llvm::Intrinsic::x86_avx2_packuswb:
1952 return llvm::Intrinsic::x86_avx2_packsswb;
1954 case llvm::Intrinsic::x86_avx2_packssdw:
1955 case llvm::Intrinsic::x86_avx2_packusdw:
1956 return llvm::Intrinsic::x86_avx2_packssdw;
1958 case llvm::Intrinsic::x86_mmx_packsswb:
1959 case llvm::Intrinsic::x86_mmx_packuswb:
1960 return llvm::Intrinsic::x86_mmx_packsswb;
1962 case llvm::Intrinsic::x86_mmx_packssdw:
1963 return llvm::Intrinsic::x86_mmx_packssdw;
1965 llvm_unreachable("unexpected intrinsic id");
1969 // \brief Instrument vector shift instrinsic.
1971 // This function instruments intrinsics like x86_mmx_packsswb, that
1972 // packs elements of 2 input vectors into half as much bits with saturation.
1973 // Shadow is propagated with the signed variant of the same intrinsic applied
1974 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
1975 // EltSizeInBits is used only for x86mmx arguments.
1976 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
1977 assert(I.getNumArgOperands() == 2);
1978 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
1979 IRBuilder<> IRB(&I);
1980 Value *S1 = getShadow(&I, 0);
1981 Value *S2 = getShadow(&I, 1);
1982 assert(isX86_MMX || S1->getType()->isVectorTy());
1984 // SExt and ICmpNE below must apply to individual elements of input vectors.
1985 // In case of x86mmx arguments, cast them to appropriate vector types and
1987 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
1989 S1 = IRB.CreateBitCast(S1, T);
1990 S2 = IRB.CreateBitCast(S2, T);
1992 Value *S1_ext = IRB.CreateSExt(
1993 IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
1994 Value *S2_ext = IRB.CreateSExt(
1995 IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
1997 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
1998 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
1999 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2002 Function *ShadowFn = Intrinsic::getDeclaration(
2003 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2005 Value *S = IRB.CreateCall2(ShadowFn, S1_ext, S2_ext, "_msprop_vector_pack");
2006 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2008 setOriginForNaryOp(I);
2011 void visitIntrinsicInst(IntrinsicInst &I) {
2012 switch (I.getIntrinsicID()) {
2013 case llvm::Intrinsic::bswap:
2016 case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
2017 case llvm::Intrinsic::x86_avx512_cvtsd2usi:
2018 case llvm::Intrinsic::x86_avx512_cvtss2usi64:
2019 case llvm::Intrinsic::x86_avx512_cvtss2usi:
2020 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
2021 case llvm::Intrinsic::x86_avx512_cvttss2usi:
2022 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
2023 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
2024 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
2025 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
2026 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
2027 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
2028 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
2029 case llvm::Intrinsic::x86_sse2_cvtsd2si:
2030 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
2031 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
2032 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
2033 case llvm::Intrinsic::x86_sse2_cvtss2sd:
2034 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
2035 case llvm::Intrinsic::x86_sse2_cvttsd2si:
2036 case llvm::Intrinsic::x86_sse_cvtsi2ss:
2037 case llvm::Intrinsic::x86_sse_cvtsi642ss:
2038 case llvm::Intrinsic::x86_sse_cvtss2si64:
2039 case llvm::Intrinsic::x86_sse_cvtss2si:
2040 case llvm::Intrinsic::x86_sse_cvttss2si64:
2041 case llvm::Intrinsic::x86_sse_cvttss2si:
2042 handleVectorConvertIntrinsic(I, 1);
2044 case llvm::Intrinsic::x86_sse2_cvtdq2pd:
2045 case llvm::Intrinsic::x86_sse2_cvtps2pd:
2046 case llvm::Intrinsic::x86_sse_cvtps2pi:
2047 case llvm::Intrinsic::x86_sse_cvttps2pi:
2048 handleVectorConvertIntrinsic(I, 2);
2050 case llvm::Intrinsic::x86_avx512_psll_dq:
2051 case llvm::Intrinsic::x86_avx512_psrl_dq:
2052 case llvm::Intrinsic::x86_avx2_psll_w:
2053 case llvm::Intrinsic::x86_avx2_psll_d:
2054 case llvm::Intrinsic::x86_avx2_psll_q:
2055 case llvm::Intrinsic::x86_avx2_pslli_w:
2056 case llvm::Intrinsic::x86_avx2_pslli_d:
2057 case llvm::Intrinsic::x86_avx2_pslli_q:
2058 case llvm::Intrinsic::x86_avx2_psll_dq:
2059 case llvm::Intrinsic::x86_avx2_psrl_w:
2060 case llvm::Intrinsic::x86_avx2_psrl_d:
2061 case llvm::Intrinsic::x86_avx2_psrl_q:
2062 case llvm::Intrinsic::x86_avx2_psra_w:
2063 case llvm::Intrinsic::x86_avx2_psra_d:
2064 case llvm::Intrinsic::x86_avx2_psrli_w:
2065 case llvm::Intrinsic::x86_avx2_psrli_d:
2066 case llvm::Intrinsic::x86_avx2_psrli_q:
2067 case llvm::Intrinsic::x86_avx2_psrai_w:
2068 case llvm::Intrinsic::x86_avx2_psrai_d:
2069 case llvm::Intrinsic::x86_avx2_psrl_dq:
2070 case llvm::Intrinsic::x86_sse2_psll_w:
2071 case llvm::Intrinsic::x86_sse2_psll_d:
2072 case llvm::Intrinsic::x86_sse2_psll_q:
2073 case llvm::Intrinsic::x86_sse2_pslli_w:
2074 case llvm::Intrinsic::x86_sse2_pslli_d:
2075 case llvm::Intrinsic::x86_sse2_pslli_q:
2076 case llvm::Intrinsic::x86_sse2_psll_dq:
2077 case llvm::Intrinsic::x86_sse2_psrl_w:
2078 case llvm::Intrinsic::x86_sse2_psrl_d:
2079 case llvm::Intrinsic::x86_sse2_psrl_q:
2080 case llvm::Intrinsic::x86_sse2_psra_w:
2081 case llvm::Intrinsic::x86_sse2_psra_d:
2082 case llvm::Intrinsic::x86_sse2_psrli_w:
2083 case llvm::Intrinsic::x86_sse2_psrli_d:
2084 case llvm::Intrinsic::x86_sse2_psrli_q:
2085 case llvm::Intrinsic::x86_sse2_psrai_w:
2086 case llvm::Intrinsic::x86_sse2_psrai_d:
2087 case llvm::Intrinsic::x86_sse2_psrl_dq:
2088 case llvm::Intrinsic::x86_mmx_psll_w:
2089 case llvm::Intrinsic::x86_mmx_psll_d:
2090 case llvm::Intrinsic::x86_mmx_psll_q:
2091 case llvm::Intrinsic::x86_mmx_pslli_w:
2092 case llvm::Intrinsic::x86_mmx_pslli_d:
2093 case llvm::Intrinsic::x86_mmx_pslli_q:
2094 case llvm::Intrinsic::x86_mmx_psrl_w:
2095 case llvm::Intrinsic::x86_mmx_psrl_d:
2096 case llvm::Intrinsic::x86_mmx_psrl_q:
2097 case llvm::Intrinsic::x86_mmx_psra_w:
2098 case llvm::Intrinsic::x86_mmx_psra_d:
2099 case llvm::Intrinsic::x86_mmx_psrli_w:
2100 case llvm::Intrinsic::x86_mmx_psrli_d:
2101 case llvm::Intrinsic::x86_mmx_psrli_q:
2102 case llvm::Intrinsic::x86_mmx_psrai_w:
2103 case llvm::Intrinsic::x86_mmx_psrai_d:
2104 handleVectorShiftIntrinsic(I, /* Variable */ false);
2106 case llvm::Intrinsic::x86_avx2_psllv_d:
2107 case llvm::Intrinsic::x86_avx2_psllv_d_256:
2108 case llvm::Intrinsic::x86_avx2_psllv_q:
2109 case llvm::Intrinsic::x86_avx2_psllv_q_256:
2110 case llvm::Intrinsic::x86_avx2_psrlv_d:
2111 case llvm::Intrinsic::x86_avx2_psrlv_d_256:
2112 case llvm::Intrinsic::x86_avx2_psrlv_q:
2113 case llvm::Intrinsic::x86_avx2_psrlv_q_256:
2114 case llvm::Intrinsic::x86_avx2_psrav_d:
2115 case llvm::Intrinsic::x86_avx2_psrav_d_256:
2116 handleVectorShiftIntrinsic(I, /* Variable */ true);
2119 // Byte shifts are not implemented.
2120 // case llvm::Intrinsic::x86_avx512_psll_dq_bs:
2121 // case llvm::Intrinsic::x86_avx512_psrl_dq_bs:
2122 // case llvm::Intrinsic::x86_avx2_psll_dq_bs:
2123 // case llvm::Intrinsic::x86_avx2_psrl_dq_bs:
2124 // case llvm::Intrinsic::x86_sse2_psll_dq_bs:
2125 // case llvm::Intrinsic::x86_sse2_psrl_dq_bs:
2127 case llvm::Intrinsic::x86_sse2_packsswb_128:
2128 case llvm::Intrinsic::x86_sse2_packssdw_128:
2129 case llvm::Intrinsic::x86_sse2_packuswb_128:
2130 case llvm::Intrinsic::x86_sse41_packusdw:
2131 case llvm::Intrinsic::x86_avx2_packsswb:
2132 case llvm::Intrinsic::x86_avx2_packssdw:
2133 case llvm::Intrinsic::x86_avx2_packuswb:
2134 case llvm::Intrinsic::x86_avx2_packusdw:
2135 handleVectorPackIntrinsic(I);
2138 case llvm::Intrinsic::x86_mmx_packsswb:
2139 case llvm::Intrinsic::x86_mmx_packuswb:
2140 handleVectorPackIntrinsic(I, 16);
2143 case llvm::Intrinsic::x86_mmx_packssdw:
2144 handleVectorPackIntrinsic(I, 32);
2148 if (!handleUnknownIntrinsic(I))
2149 visitInstruction(I);
2154 void visitCallSite(CallSite CS) {
2155 Instruction &I = *CS.getInstruction();
2156 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
2158 CallInst *Call = cast<CallInst>(&I);
2160 // For inline asm, do the usual thing: check argument shadow and mark all
2161 // outputs as clean. Note that any side effects of the inline asm that are
2162 // not immediately visible in its constraints are not handled.
2163 if (Call->isInlineAsm()) {
2164 visitInstruction(I);
2168 // Allow only tail calls with the same types, otherwise
2169 // we may have a false positive: shadow for a non-void RetVal
2170 // will get propagated to a void RetVal.
2171 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
2172 Call->setTailCall(false);
2174 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
2176 // We are going to insert code that relies on the fact that the callee
2177 // will become a non-readonly function after it is instrumented by us. To
2178 // prevent this code from being optimized out, mark that function
2179 // non-readonly in advance.
2180 if (Function *Func = Call->getCalledFunction()) {
2181 // Clear out readonly/readnone attributes.
2183 B.addAttribute(Attribute::ReadOnly)
2184 .addAttribute(Attribute::ReadNone);
2185 Func->removeAttributes(AttributeSet::FunctionIndex,
2186 AttributeSet::get(Func->getContext(),
2187 AttributeSet::FunctionIndex,
2191 IRBuilder<> IRB(&I);
2193 if (MS.WrapIndirectCalls && !CS.getCalledFunction())
2194 IndirectCallList.push_back(CS);
2196 unsigned ArgOffset = 0;
2197 DEBUG(dbgs() << " CallSite: " << I << "\n");
2198 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2199 ArgIt != End; ++ArgIt) {
2201 unsigned i = ArgIt - CS.arg_begin();
2202 if (!A->getType()->isSized()) {
2203 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
2207 Value *Store = nullptr;
2208 // Compute the Shadow for arg even if it is ByVal, because
2209 // in that case getShadow() will copy the actual arg shadow to
2210 // __msan_param_tls.
2211 Value *ArgShadow = getShadow(A);
2212 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2213 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
2214 " Shadow: " << *ArgShadow << "\n");
2215 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
2216 assert(A->getType()->isPointerTy() &&
2217 "ByVal argument is not a pointer!");
2218 Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
2219 unsigned Alignment = CS.getParamAlignment(i + 1);
2220 Store = IRB.CreateMemCpy(ArgShadowBase,
2221 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2224 Size = MS.DL->getTypeAllocSize(A->getType());
2225 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2226 kShadowTLSAlignment);
2228 if (MS.TrackOrigins)
2229 IRB.CreateStore(getOrigin(A),
2230 getOriginPtrForArgument(A, IRB, ArgOffset));
2232 assert(Size != 0 && Store != nullptr);
2233 DEBUG(dbgs() << " Param:" << *Store << "\n");
2234 ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
2236 DEBUG(dbgs() << " done with call args\n");
2239 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2240 if (FT->isVarArg()) {
2241 VAHelper->visitCallSite(CS, IRB);
2244 // Now, get the shadow for the RetVal.
2245 if (!I.getType()->isSized()) return;
2246 IRBuilder<> IRBBefore(&I);
2247 // Until we have full dynamic coverage, make sure the retval shadow is 0.
2248 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2249 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2250 Instruction *NextInsn = nullptr;
2252 NextInsn = I.getNextNode();
2254 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2255 if (!NormalDest->getSinglePredecessor()) {
2256 // FIXME: this case is tricky, so we are just conservative here.
2257 // Perhaps we need to split the edge between this BB and NormalDest,
2258 // but a naive attempt to use SplitEdge leads to a crash.
2259 setShadow(&I, getCleanShadow(&I));
2260 setOrigin(&I, getCleanOrigin());
2263 NextInsn = NormalDest->getFirstInsertionPt();
2265 "Could not find insertion point for retval shadow load");
2267 IRBuilder<> IRBAfter(NextInsn);
2268 Value *RetvalShadow =
2269 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2270 kShadowTLSAlignment, "_msret");
2271 setShadow(&I, RetvalShadow);
2272 if (MS.TrackOrigins)
2273 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2276 void visitReturnInst(ReturnInst &I) {
2277 IRBuilder<> IRB(&I);
2278 Value *RetVal = I.getReturnValue();
2279 if (!RetVal) return;
2280 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2281 if (CheckReturnValue) {
2282 insertShadowCheck(RetVal, &I);
2283 Value *Shadow = getCleanShadow(RetVal);
2284 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2286 Value *Shadow = getShadow(RetVal);
2287 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2288 // FIXME: make it conditional if ClStoreCleanOrigin==0
2289 if (MS.TrackOrigins)
2290 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2294 void visitPHINode(PHINode &I) {
2295 IRBuilder<> IRB(&I);
2296 ShadowPHINodes.push_back(&I);
2297 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2299 if (MS.TrackOrigins)
2300 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2304 void visitAllocaInst(AllocaInst &I) {
2305 setShadow(&I, getCleanShadow(&I));
2306 IRBuilder<> IRB(I.getNextNode());
2307 uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
2308 if (PoisonStack && ClPoisonStackWithCall) {
2309 IRB.CreateCall2(MS.MsanPoisonStackFn,
2310 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2311 ConstantInt::get(MS.IntptrTy, Size));
2313 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2314 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2315 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
2318 if (PoisonStack && MS.TrackOrigins) {
2319 setOrigin(&I, getCleanOrigin());
2320 SmallString<2048> StackDescriptionStorage;
2321 raw_svector_ostream StackDescription(StackDescriptionStorage);
2322 // We create a string with a description of the stack allocation and
2323 // pass it into __msan_set_alloca_origin.
2324 // It will be printed by the run-time if stack-originated UMR is found.
2325 // The first 4 bytes of the string are set to '----' and will be replaced
2326 // by __msan_va_arg_overflow_size_tls at the first call.
2327 StackDescription << "----" << I.getName() << "@" << F.getName();
2329 createPrivateNonConstGlobalForString(*F.getParent(),
2330 StackDescription.str());
2332 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
2333 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2334 ConstantInt::get(MS.IntptrTy, Size),
2335 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2336 IRB.CreatePointerCast(&F, MS.IntptrTy));
2340 void visitSelectInst(SelectInst& I) {
2341 IRBuilder<> IRB(&I);
2342 // a = select b, c, d
2343 Value *B = I.getCondition();
2344 Value *C = I.getTrueValue();
2345 Value *D = I.getFalseValue();
2346 Value *Sb = getShadow(B);
2347 Value *Sc = getShadow(C);
2348 Value *Sd = getShadow(D);
2350 // Result shadow if condition shadow is 0.
2351 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
2353 if (I.getType()->isAggregateType()) {
2354 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2355 // an extra "select". This results in much more compact IR.
2356 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2357 Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
2359 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
2360 // If Sb (condition is poisoned), look for bits in c and d that are equal
2361 // and both unpoisoned.
2362 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
2364 // Cast arguments to shadow-compatible type.
2365 C = CreateAppToShadowCast(IRB, C);
2366 D = CreateAppToShadowCast(IRB, D);
2368 // Result shadow if condition shadow is 1.
2369 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
2371 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
2373 if (MS.TrackOrigins) {
2374 // Origins are always i32, so any vector conditions must be flattened.
2375 // FIXME: consider tracking vector origins for app vectors?
2376 if (B->getType()->isVectorTy()) {
2377 Type *FlatTy = getShadowTyNoVec(B->getType());
2378 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
2379 ConstantInt::getNullValue(FlatTy));
2380 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
2381 ConstantInt::getNullValue(FlatTy));
2383 // a = select b, c, d
2384 // Oa = Sb ? Ob : (b ? Oc : Od)
2385 setOrigin(&I, IRB.CreateSelect(
2386 Sb, getOrigin(I.getCondition()),
2387 IRB.CreateSelect(B, getOrigin(C), getOrigin(D))));
2391 void visitLandingPadInst(LandingPadInst &I) {
2393 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2394 setShadow(&I, getCleanShadow(&I));
2395 setOrigin(&I, getCleanOrigin());
2398 void visitGetElementPtrInst(GetElementPtrInst &I) {
2402 void visitExtractValueInst(ExtractValueInst &I) {
2403 IRBuilder<> IRB(&I);
2404 Value *Agg = I.getAggregateOperand();
2405 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
2406 Value *AggShadow = getShadow(Agg);
2407 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2408 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2409 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
2410 setShadow(&I, ResShadow);
2411 setOriginForNaryOp(I);
2414 void visitInsertValueInst(InsertValueInst &I) {
2415 IRBuilder<> IRB(&I);
2416 DEBUG(dbgs() << "InsertValue: " << I << "\n");
2417 Value *AggShadow = getShadow(I.getAggregateOperand());
2418 Value *InsShadow = getShadow(I.getInsertedValueOperand());
2419 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2420 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
2421 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2422 DEBUG(dbgs() << " Res: " << *Res << "\n");
2424 setOriginForNaryOp(I);
2427 void dumpInst(Instruction &I) {
2428 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2429 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2431 errs() << "ZZZ " << I.getOpcodeName() << "\n";
2433 errs() << "QQQ " << I << "\n";
2436 void visitResumeInst(ResumeInst &I) {
2437 DEBUG(dbgs() << "Resume: " << I << "\n");
2438 // Nothing to do here.
2441 void visitInstruction(Instruction &I) {
2442 // Everything else: stop propagating and check for poisoned shadow.
2443 if (ClDumpStrictInstructions)
2445 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
2446 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2447 insertShadowCheck(I.getOperand(i), &I);
2448 setShadow(&I, getCleanShadow(&I));
2449 setOrigin(&I, getCleanOrigin());
2453 /// \brief AMD64-specific implementation of VarArgHelper.
2454 struct VarArgAMD64Helper : public VarArgHelper {
2455 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2456 // See a comment in visitCallSite for more details.
2457 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
2458 static const unsigned AMD64FpEndOffset = 176;
2461 MemorySanitizer &MS;
2462 MemorySanitizerVisitor &MSV;
2463 Value *VAArgTLSCopy;
2464 Value *VAArgOverflowSize;
2466 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2468 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2469 MemorySanitizerVisitor &MSV)
2470 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
2471 VAArgOverflowSize(nullptr) {}
2473 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2475 ArgKind classifyArgument(Value* arg) {
2476 // A very rough approximation of X86_64 argument classification rules.
2477 Type *T = arg->getType();
2478 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2479 return AK_FloatingPoint;
2480 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2481 return AK_GeneralPurpose;
2482 if (T->isPointerTy())
2483 return AK_GeneralPurpose;
2487 // For VarArg functions, store the argument shadow in an ABI-specific format
2488 // that corresponds to va_list layout.
2489 // We do this because Clang lowers va_arg in the frontend, and this pass
2490 // only sees the low level code that deals with va_list internals.
2491 // A much easier alternative (provided that Clang emits va_arg instructions)
2492 // would have been to associate each live instance of va_list with a copy of
2493 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2495 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2496 unsigned GpOffset = 0;
2497 unsigned FpOffset = AMD64GpEndOffset;
2498 unsigned OverflowOffset = AMD64FpEndOffset;
2499 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2500 ArgIt != End; ++ArgIt) {
2502 unsigned ArgNo = CS.getArgumentNo(ArgIt);
2503 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
2505 // ByVal arguments always go to the overflow area.
2506 assert(A->getType()->isPointerTy());
2507 Type *RealTy = A->getType()->getPointerElementType();
2508 uint64_t ArgSize = MS.DL->getTypeAllocSize(RealTy);
2509 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2510 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
2511 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
2512 ArgSize, kShadowTLSAlignment);
2514 ArgKind AK = classifyArgument(A);
2515 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2517 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2521 case AK_GeneralPurpose:
2522 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
2525 case AK_FloatingPoint:
2526 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
2530 uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
2531 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
2532 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
2534 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2537 Constant *OverflowSize =
2538 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2539 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2542 /// \brief Compute the shadow address for a given va_arg.
2543 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
2545 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2546 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2547 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
2551 void visitVAStartInst(VAStartInst &I) override {
2552 IRBuilder<> IRB(&I);
2553 VAStartInstrumentationList.push_back(&I);
2554 Value *VAListTag = I.getArgOperand(0);
2555 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2557 // Unpoison the whole __va_list_tag.
2558 // FIXME: magic ABI constants.
2559 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2560 /* size */24, /* alignment */8, false);
2563 void visitVACopyInst(VACopyInst &I) override {
2564 IRBuilder<> IRB(&I);
2565 Value *VAListTag = I.getArgOperand(0);
2566 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2568 // Unpoison the whole __va_list_tag.
2569 // FIXME: magic ABI constants.
2570 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2571 /* size */24, /* alignment */8, false);
2574 void finalizeInstrumentation() override {
2575 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
2576 "finalizeInstrumentation called twice");
2577 if (!VAStartInstrumentationList.empty()) {
2578 // If there is a va_start in this function, make a backup copy of
2579 // va_arg_tls somewhere in the function entry block.
2580 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2581 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2583 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2585 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2586 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2589 // Instrument va_start.
2590 // Copy va_list shadow from the backup copy of the TLS contents.
2591 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2592 CallInst *OrigInst = VAStartInstrumentationList[i];
2593 IRBuilder<> IRB(OrigInst->getNextNode());
2594 Value *VAListTag = OrigInst->getArgOperand(0);
2596 Value *RegSaveAreaPtrPtr =
2598 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2599 ConstantInt::get(MS.IntptrTy, 16)),
2600 Type::getInt64PtrTy(*MS.C));
2601 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
2602 Value *RegSaveAreaShadowPtr =
2603 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
2604 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
2605 AMD64FpEndOffset, 16);
2607 Value *OverflowArgAreaPtrPtr =
2609 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2610 ConstantInt::get(MS.IntptrTy, 8)),
2611 Type::getInt64PtrTy(*MS.C));
2612 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
2613 Value *OverflowArgAreaShadowPtr =
2614 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
2615 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
2616 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
2621 /// \brief A no-op implementation of VarArgHelper.
2622 struct VarArgNoOpHelper : public VarArgHelper {
2623 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
2624 MemorySanitizerVisitor &MSV) {}
2626 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
2628 void visitVAStartInst(VAStartInst &I) override {}
2630 void visitVACopyInst(VACopyInst &I) override {}
2632 void finalizeInstrumentation() override {}
2635 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
2636 MemorySanitizerVisitor &Visitor) {
2637 // VarArg handling is only implemented on AMD64. False positives are possible
2638 // on other platforms.
2639 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
2640 if (TargetTriple.getArch() == llvm::Triple::x86_64)
2641 return new VarArgAMD64Helper(Func, Msan, Visitor);
2643 return new VarArgNoOpHelper(Func, Msan, Visitor);
2648 bool MemorySanitizer::runOnFunction(Function &F) {
2649 MemorySanitizerVisitor Visitor(F, *this);
2651 // Clear out readonly/readnone attributes.
2653 B.addAttribute(Attribute::ReadOnly)
2654 .addAttribute(Attribute::ReadNone);
2655 F.removeAttributes(AttributeSet::FunctionIndex,
2656 AttributeSet::get(F.getContext(),
2657 AttributeSet::FunctionIndex, B));
2659 return Visitor.runOnFunction();