1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwritting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
92 //===----------------------------------------------------------------------===//
94 #include "llvm/Transforms/Instrumentation.h"
95 #include "llvm/ADT/DepthFirstIterator.h"
96 #include "llvm/ADT/SmallString.h"
97 #include "llvm/ADT/SmallVector.h"
98 #include "llvm/ADT/StringExtras.h"
99 #include "llvm/ADT/Triple.h"
100 #include "llvm/IR/DataLayout.h"
101 #include "llvm/IR/Function.h"
102 #include "llvm/IR/IRBuilder.h"
103 #include "llvm/IR/InlineAsm.h"
104 #include "llvm/IR/InstVisitor.h"
105 #include "llvm/IR/IntrinsicInst.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/MDBuilder.h"
108 #include "llvm/IR/Module.h"
109 #include "llvm/IR/Type.h"
110 #include "llvm/IR/ValueMap.h"
111 #include "llvm/Support/CommandLine.h"
112 #include "llvm/Support/Compiler.h"
113 #include "llvm/Support/Debug.h"
114 #include "llvm/Support/raw_ostream.h"
115 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
116 #include "llvm/Transforms/Utils/Local.h"
117 #include "llvm/Transforms/Utils/ModuleUtils.h"
119 using namespace llvm;
121 #define DEBUG_TYPE "msan"
123 static const unsigned kOriginSize = 4;
124 static const unsigned kMinOriginAlignment = 4;
125 static const unsigned kShadowTLSAlignment = 8;
127 // These constants must be kept in sync with the ones in msan.h.
128 static const unsigned kParamTLSSize = 800;
129 static const unsigned kRetvalTLSSize = 800;
131 // Accesses sizes are powers of two: 1, 2, 4, 8.
132 static const size_t kNumberOfAccessSizes = 4;
134 /// \brief Track origins of uninitialized values.
136 /// Adds a section to MemorySanitizer report that points to the allocation
137 /// (stack or heap) the uninitialized bits came from originally.
138 static cl::opt<int> ClTrackOrigins("msan-track-origins",
139 cl::desc("Track origins (allocation sites) of poisoned memory"),
140 cl::Hidden, cl::init(0));
141 static cl::opt<bool> ClKeepGoing("msan-keep-going",
142 cl::desc("keep going after reporting a UMR"),
143 cl::Hidden, cl::init(false));
144 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
145 cl::desc("poison uninitialized stack variables"),
146 cl::Hidden, cl::init(true));
147 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
148 cl::desc("poison uninitialized stack variables with a call"),
149 cl::Hidden, cl::init(false));
150 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
151 cl::desc("poison uninitialized stack variables with the given pattern"),
152 cl::Hidden, cl::init(0xff));
153 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
154 cl::desc("poison undef temps"),
155 cl::Hidden, cl::init(true));
157 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
158 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
159 cl::Hidden, cl::init(true));
161 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
162 cl::desc("exact handling of relational integer ICmp"),
163 cl::Hidden, cl::init(false));
165 // This flag controls whether we check the shadow of the address
166 // operand of load or store. Such bugs are very rare, since load from
167 // a garbage address typically results in SEGV, but still happen
168 // (e.g. only lower bits of address are garbage, or the access happens
169 // early at program startup where malloc-ed memory is more likely to
170 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
171 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
172 cl::desc("report accesses through a pointer which has poisoned shadow"),
173 cl::Hidden, cl::init(true));
175 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
176 cl::desc("print out instructions with default strict semantics"),
177 cl::Hidden, cl::init(false));
179 static cl::opt<int> ClInstrumentationWithCallThreshold(
180 "msan-instrumentation-with-call-threshold",
182 "If the function being instrumented requires more than "
183 "this number of checks and origin stores, use callbacks instead of "
184 "inline checks (-1 means never use callbacks)."),
185 cl::Hidden, cl::init(3500));
187 // This is an experiment to enable handling of cases where shadow is a non-zero
188 // compile-time constant. For some unexplainable reason they were silently
189 // ignored in the instrumentation.
190 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
191 cl::desc("Insert checks for constant shadow values"),
192 cl::Hidden, cl::init(false));
194 static const char *const kMsanModuleCtorName = "msan.module_ctor";
195 static const char *const kMsanInitName = "__msan_init";
199 // Memory map parameters used in application-to-shadow address calculation.
200 // Offset = (Addr & ~AndMask) ^ XorMask
201 // Shadow = ShadowBase + Offset
202 // Origin = OriginBase + Offset
203 struct MemoryMapParams {
210 struct PlatformMemoryMapParams {
211 const MemoryMapParams *bits32;
212 const MemoryMapParams *bits64;
216 static const MemoryMapParams Linux_I386_MemoryMapParams = {
217 0x000080000000, // AndMask
218 0, // XorMask (not used)
219 0, // ShadowBase (not used)
220 0x000040000000, // OriginBase
224 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
225 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING
226 0x400000000000, // AndMask
227 0, // XorMask (not used)
228 0, // ShadowBase (not used)
229 0x200000000000, // OriginBase
231 0, // AndMask (not used)
232 0x500000000000, // XorMask
233 0, // ShadowBase (not used)
234 0x100000000000, // OriginBase
239 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
240 0x004000000000, // AndMask
241 0, // XorMask (not used)
242 0, // ShadowBase (not used)
243 0x002000000000, // OriginBase
247 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
248 0x200000000000, // AndMask
249 0x100000000000, // XorMask
250 0x080000000000, // ShadowBase
251 0x1C0000000000, // OriginBase
255 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
256 0, // AndMask (not used)
257 0x06000000000, // XorMask
258 0, // ShadowBase (not used)
259 0x01000000000, // OriginBase
263 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
264 0x000180000000, // AndMask
265 0x000040000000, // XorMask
266 0x000020000000, // ShadowBase
267 0x000700000000, // OriginBase
271 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
272 0xc00000000000, // AndMask
273 0x200000000000, // XorMask
274 0x100000000000, // ShadowBase
275 0x380000000000, // OriginBase
278 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
279 &Linux_I386_MemoryMapParams,
280 &Linux_X86_64_MemoryMapParams,
283 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
285 &Linux_MIPS64_MemoryMapParams,
288 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
290 &Linux_PowerPC64_MemoryMapParams,
293 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
295 &Linux_AArch64_MemoryMapParams,
298 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
299 &FreeBSD_I386_MemoryMapParams,
300 &FreeBSD_X86_64_MemoryMapParams,
303 /// \brief An instrumentation pass implementing detection of uninitialized
306 /// MemorySanitizer: instrument the code in module to find
307 /// uninitialized reads.
308 class MemorySanitizer : public FunctionPass {
310 MemorySanitizer(int TrackOrigins = 0)
312 TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
313 WarningFn(nullptr) {}
314 const char *getPassName() const override { return "MemorySanitizer"; }
315 bool runOnFunction(Function &F) override;
316 bool doInitialization(Module &M) override;
317 static char ID; // Pass identification, replacement for typeid.
320 void initializeCallbacks(Module &M);
322 /// \brief Track origins (allocation points) of uninitialized values.
328 /// \brief Thread-local shadow storage for function parameters.
329 GlobalVariable *ParamTLS;
330 /// \brief Thread-local origin storage for function parameters.
331 GlobalVariable *ParamOriginTLS;
332 /// \brief Thread-local shadow storage for function return value.
333 GlobalVariable *RetvalTLS;
334 /// \brief Thread-local origin storage for function return value.
335 GlobalVariable *RetvalOriginTLS;
336 /// \brief Thread-local shadow storage for in-register va_arg function
337 /// parameters (x86_64-specific).
338 GlobalVariable *VAArgTLS;
339 /// \brief Thread-local shadow storage for va_arg overflow area
340 /// (x86_64-specific).
341 GlobalVariable *VAArgOverflowSizeTLS;
342 /// \brief Thread-local space used to pass origin value to the UMR reporting
344 GlobalVariable *OriginTLS;
346 /// \brief The run-time callback to print a warning.
348 // These arrays are indexed by log2(AccessSize).
349 Value *MaybeWarningFn[kNumberOfAccessSizes];
350 Value *MaybeStoreOriginFn[kNumberOfAccessSizes];
352 /// \brief Run-time helper that generates a new origin value for a stack
354 Value *MsanSetAllocaOrigin4Fn;
355 /// \brief Run-time helper that poisons stack on function entry.
356 Value *MsanPoisonStackFn;
357 /// \brief Run-time helper that records a store (or any event) of an
358 /// uninitialized value and returns an updated origin id encoding this info.
359 Value *MsanChainOriginFn;
360 /// \brief MSan runtime replacements for memmove, memcpy and memset.
361 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
363 /// \brief Memory map parameters used in application-to-shadow calculation.
364 const MemoryMapParams *MapParams;
366 MDNode *ColdCallWeights;
367 /// \brief Branch weights for origin store.
368 MDNode *OriginStoreWeights;
369 /// \brief An empty volatile inline asm that prevents callback merge.
371 Function *MsanCtorFunction;
373 friend struct MemorySanitizerVisitor;
374 friend struct VarArgAMD64Helper;
375 friend struct VarArgMIPS64Helper;
376 friend struct VarArgAArch64Helper;
378 } // anonymous namespace
380 char MemorySanitizer::ID = 0;
381 INITIALIZE_PASS(MemorySanitizer, "msan",
382 "MemorySanitizer: detects uninitialized reads.",
385 FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins) {
386 return new MemorySanitizer(TrackOrigins);
389 /// \brief Create a non-const global initialized with the given string.
391 /// Creates a writable global for Str so that we can pass it to the
392 /// run-time lib. Runtime uses first 4 bytes of the string to store the
393 /// frame ID, so the string needs to be mutable.
394 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
396 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
397 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
398 GlobalValue::PrivateLinkage, StrConst, "");
401 /// \brief Insert extern declaration of runtime-provided functions and globals.
402 void MemorySanitizer::initializeCallbacks(Module &M) {
403 // Only do this once.
408 // Create the callback.
409 // FIXME: this function should have "Cold" calling conv,
410 // which is not yet implemented.
411 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
412 : "__msan_warning_noreturn";
413 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), nullptr);
415 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
417 unsigned AccessSize = 1 << AccessSizeIndex;
418 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
419 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
420 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
421 IRB.getInt32Ty(), nullptr);
423 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
424 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
425 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
426 IRB.getInt8PtrTy(), IRB.getInt32Ty(), nullptr);
429 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
430 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
431 IRB.getInt8PtrTy(), IntptrTy, nullptr);
433 M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
434 IRB.getInt8PtrTy(), IntptrTy, nullptr);
435 MsanChainOriginFn = M.getOrInsertFunction(
436 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty(), nullptr);
437 MemmoveFn = M.getOrInsertFunction(
438 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
439 IRB.getInt8PtrTy(), IntptrTy, nullptr);
440 MemcpyFn = M.getOrInsertFunction(
441 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
443 MemsetFn = M.getOrInsertFunction(
444 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
448 RetvalTLS = new GlobalVariable(
449 M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false,
450 GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
451 GlobalVariable::InitialExecTLSModel);
452 RetvalOriginTLS = new GlobalVariable(
453 M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
454 "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
456 ParamTLS = new GlobalVariable(
457 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
458 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
459 GlobalVariable::InitialExecTLSModel);
460 ParamOriginTLS = new GlobalVariable(
461 M, ArrayType::get(OriginTy, kParamTLSSize / 4), false,
462 GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls",
463 nullptr, GlobalVariable::InitialExecTLSModel);
465 VAArgTLS = new GlobalVariable(
466 M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false,
467 GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
468 GlobalVariable::InitialExecTLSModel);
469 VAArgOverflowSizeTLS = new GlobalVariable(
470 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
471 "__msan_va_arg_overflow_size_tls", nullptr,
472 GlobalVariable::InitialExecTLSModel);
473 OriginTLS = new GlobalVariable(
474 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
475 "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
477 // We insert an empty inline asm after __msan_report* to avoid callback merge.
478 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
479 StringRef(""), StringRef(""),
480 /*hasSideEffects=*/true);
483 /// \brief Module-level initialization.
485 /// inserts a call to __msan_init to the module's constructor list.
486 bool MemorySanitizer::doInitialization(Module &M) {
487 auto &DL = M.getDataLayout();
489 Triple TargetTriple(M.getTargetTriple());
490 switch (TargetTriple.getOS()) {
491 case Triple::FreeBSD:
492 switch (TargetTriple.getArch()) {
494 MapParams = FreeBSD_X86_MemoryMapParams.bits64;
497 MapParams = FreeBSD_X86_MemoryMapParams.bits32;
500 report_fatal_error("unsupported architecture");
504 switch (TargetTriple.getArch()) {
506 MapParams = Linux_X86_MemoryMapParams.bits64;
509 MapParams = Linux_X86_MemoryMapParams.bits32;
512 case Triple::mips64el:
513 MapParams = Linux_MIPS_MemoryMapParams.bits64;
516 case Triple::ppc64le:
517 MapParams = Linux_PowerPC_MemoryMapParams.bits64;
519 case Triple::aarch64:
520 case Triple::aarch64_be:
521 MapParams = Linux_ARM_MemoryMapParams.bits64;
524 report_fatal_error("unsupported architecture");
528 report_fatal_error("unsupported operating system");
531 C = &(M.getContext());
533 IntptrTy = IRB.getIntPtrTy(DL);
534 OriginTy = IRB.getInt32Ty();
536 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
537 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
539 std::tie(MsanCtorFunction, std::ignore) =
540 createSanitizerCtorAndInitFunctions(M, kMsanModuleCtorName, kMsanInitName,
544 appendToGlobalCtors(M, MsanCtorFunction, 0);
547 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
548 IRB.getInt32(TrackOrigins), "__msan_track_origins");
551 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
552 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
559 /// \brief A helper class that handles instrumentation of VarArg
560 /// functions on a particular platform.
562 /// Implementations are expected to insert the instrumentation
563 /// necessary to propagate argument shadow through VarArg function
564 /// calls. Visit* methods are called during an InstVisitor pass over
565 /// the function, and should avoid creating new basic blocks. A new
566 /// instance of this class is created for each instrumented function.
567 struct VarArgHelper {
568 /// \brief Visit a CallSite.
569 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
571 /// \brief Visit a va_start call.
572 virtual void visitVAStartInst(VAStartInst &I) = 0;
574 /// \brief Visit a va_copy call.
575 virtual void visitVACopyInst(VACopyInst &I) = 0;
577 /// \brief Finalize function instrumentation.
579 /// This method is called after visiting all interesting (see above)
580 /// instructions in a function.
581 virtual void finalizeInstrumentation() = 0;
583 virtual ~VarArgHelper() {}
586 struct MemorySanitizerVisitor;
589 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
590 MemorySanitizerVisitor &Visitor);
592 unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
593 if (TypeSize <= 8) return 0;
594 return Log2_32_Ceil(TypeSize / 8);
597 /// This class does all the work for a given function. Store and Load
598 /// instructions store and load corresponding shadow and origin
599 /// values. Most instructions propagate shadow from arguments to their
600 /// return values. Certain instructions (most importantly, BranchInst)
601 /// test their argument shadow and print reports (with a runtime call) if it's
603 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
606 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
607 ValueMap<Value*, Value*> ShadowMap, OriginMap;
608 std::unique_ptr<VarArgHelper> VAHelper;
610 // The following flags disable parts of MSan instrumentation based on
611 // blacklist contents and command-line options.
613 bool PropagateShadow;
616 bool CheckReturnValue;
618 struct ShadowOriginAndInsertPoint {
621 Instruction *OrigIns;
622 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
623 : Shadow(S), Origin(O), OrigIns(I) { }
625 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
626 SmallVector<Instruction*, 16> StoreList;
628 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
629 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
630 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
631 InsertChecks = SanitizeFunction;
632 PropagateShadow = SanitizeFunction;
633 PoisonStack = SanitizeFunction && ClPoisonStack;
634 PoisonUndef = SanitizeFunction && ClPoisonUndef;
635 // FIXME: Consider using SpecialCaseList to specify a list of functions that
636 // must always return fully initialized values. For now, we hardcode "main".
637 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
639 DEBUG(if (!InsertChecks)
640 dbgs() << "MemorySanitizer is not inserting checks into '"
641 << F.getName() << "'\n");
644 Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
645 if (MS.TrackOrigins <= 1) return V;
646 return IRB.CreateCall(MS.MsanChainOriginFn, V);
649 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
650 const DataLayout &DL = F.getParent()->getDataLayout();
651 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
652 if (IntptrSize == kOriginSize) return Origin;
653 assert(IntptrSize == kOriginSize * 2);
654 Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
655 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
658 /// \brief Fill memory range with the given origin value.
659 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
660 unsigned Size, unsigned Alignment) {
661 const DataLayout &DL = F.getParent()->getDataLayout();
662 unsigned IntptrAlignment = DL.getABITypeAlignment(MS.IntptrTy);
663 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
664 assert(IntptrAlignment >= kMinOriginAlignment);
665 assert(IntptrSize >= kOriginSize);
668 unsigned CurrentAlignment = Alignment;
669 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
670 Value *IntptrOrigin = originToIntptr(IRB, Origin);
671 Value *IntptrOriginPtr =
672 IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
673 for (unsigned i = 0; i < Size / IntptrSize; ++i) {
674 Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
676 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment);
677 Ofs += IntptrSize / kOriginSize;
678 CurrentAlignment = IntptrAlignment;
682 for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
684 i ? IRB.CreateConstGEP1_32(nullptr, OriginPtr, i) : OriginPtr;
685 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment);
686 CurrentAlignment = kMinOriginAlignment;
690 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
691 unsigned Alignment, bool AsCall) {
692 const DataLayout &DL = F.getParent()->getDataLayout();
693 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
694 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
695 if (isa<StructType>(Shadow->getType())) {
696 paintOrigin(IRB, updateOrigin(Origin, IRB),
697 getOriginPtr(Addr, IRB, Alignment), StoreSize,
700 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
701 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
702 if (ConstantShadow) {
703 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
704 paintOrigin(IRB, updateOrigin(Origin, IRB),
705 getOriginPtr(Addr, IRB, Alignment), StoreSize,
710 unsigned TypeSizeInBits =
711 DL.getTypeSizeInBits(ConvertedShadow->getType());
712 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
713 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
714 Value *Fn = MS.MaybeStoreOriginFn[SizeIndex];
715 Value *ConvertedShadow2 = IRB.CreateZExt(
716 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
717 IRB.CreateCall(Fn, {ConvertedShadow2,
718 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
721 Value *Cmp = IRB.CreateICmpNE(
722 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
723 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
724 Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
725 IRBuilder<> IRBNew(CheckTerm);
726 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew),
727 getOriginPtr(Addr, IRBNew, Alignment), StoreSize,
733 void materializeStores(bool InstrumentWithCalls) {
734 for (auto Inst : StoreList) {
735 StoreInst &SI = *dyn_cast<StoreInst>(Inst);
737 IRBuilder<> IRB(&SI);
738 Value *Val = SI.getValueOperand();
739 Value *Addr = SI.getPointerOperand();
740 Value *Shadow = SI.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
741 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
744 IRB.CreateAlignedStore(Shadow, ShadowPtr, SI.getAlignment());
745 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
748 if (ClCheckAccessAddress) insertShadowCheck(Addr, &SI);
750 if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
752 if (MS.TrackOrigins && !SI.isAtomic())
753 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(),
754 InstrumentWithCalls);
758 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
760 IRBuilder<> IRB(OrigIns);
761 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
762 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
763 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
765 Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow);
766 if (ConstantShadow) {
767 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
768 if (MS.TrackOrigins) {
769 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
772 IRB.CreateCall(MS.WarningFn, {});
773 IRB.CreateCall(MS.EmptyAsm, {});
774 // FIXME: Insert UnreachableInst if !ClKeepGoing?
775 // This may invalidate some of the following checks and needs to be done
781 const DataLayout &DL = OrigIns->getModule()->getDataLayout();
783 unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
784 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
785 if (AsCall && SizeIndex < kNumberOfAccessSizes) {
786 Value *Fn = MS.MaybeWarningFn[SizeIndex];
787 Value *ConvertedShadow2 =
788 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
789 IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
791 : (Value *)IRB.getInt32(0)});
793 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
794 getCleanShadow(ConvertedShadow), "_mscmp");
795 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
797 /* Unreachable */ !ClKeepGoing, MS.ColdCallWeights);
799 IRB.SetInsertPoint(CheckTerm);
800 if (MS.TrackOrigins) {
801 IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0),
804 IRB.CreateCall(MS.WarningFn, {});
805 IRB.CreateCall(MS.EmptyAsm, {});
806 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
810 void materializeChecks(bool InstrumentWithCalls) {
811 for (const auto &ShadowData : InstrumentationList) {
812 Instruction *OrigIns = ShadowData.OrigIns;
813 Value *Shadow = ShadowData.Shadow;
814 Value *Origin = ShadowData.Origin;
815 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
817 DEBUG(dbgs() << "DONE:\n" << F);
820 /// \brief Add MemorySanitizer instrumentation to a function.
821 bool runOnFunction() {
822 MS.initializeCallbacks(*F.getParent());
824 // In the presence of unreachable blocks, we may see Phi nodes with
825 // incoming nodes from such blocks. Since InstVisitor skips unreachable
826 // blocks, such nodes will not have any shadow value associated with them.
827 // It's easier to remove unreachable blocks than deal with missing shadow.
828 removeUnreachableBlocks(F);
830 // Iterate all BBs in depth-first order and create shadow instructions
831 // for all instructions (where applicable).
832 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
833 for (BasicBlock *BB : depth_first(&F.getEntryBlock()))
837 // Finalize PHI nodes.
838 for (PHINode *PN : ShadowPHINodes) {
839 PHINode *PNS = cast<PHINode>(getShadow(PN));
840 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
841 size_t NumValues = PN->getNumIncomingValues();
842 for (size_t v = 0; v < NumValues; v++) {
843 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
844 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
848 VAHelper->finalizeInstrumentation();
850 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
851 InstrumentationList.size() + StoreList.size() >
852 (unsigned)ClInstrumentationWithCallThreshold;
854 // Delayed instrumentation of StoreInst.
855 // This may add new checks to be inserted later.
856 materializeStores(InstrumentWithCalls);
858 // Insert shadow value checks.
859 materializeChecks(InstrumentWithCalls);
864 /// \brief Compute the shadow type that corresponds to a given Value.
865 Type *getShadowTy(Value *V) {
866 return getShadowTy(V->getType());
869 /// \brief Compute the shadow type that corresponds to a given Type.
870 Type *getShadowTy(Type *OrigTy) {
871 if (!OrigTy->isSized()) {
874 // For integer type, shadow is the same as the original type.
875 // This may return weird-sized types like i1.
876 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
878 const DataLayout &DL = F.getParent()->getDataLayout();
879 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
880 uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
881 return VectorType::get(IntegerType::get(*MS.C, EltSize),
882 VT->getNumElements());
884 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
885 return ArrayType::get(getShadowTy(AT->getElementType()),
886 AT->getNumElements());
888 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
889 SmallVector<Type*, 4> Elements;
890 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
891 Elements.push_back(getShadowTy(ST->getElementType(i)));
892 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
893 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
896 uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
897 return IntegerType::get(*MS.C, TypeSize);
900 /// \brief Flatten a vector type.
901 Type *getShadowTyNoVec(Type *ty) {
902 if (VectorType *vt = dyn_cast<VectorType>(ty))
903 return IntegerType::get(*MS.C, vt->getBitWidth());
907 /// \brief Convert a shadow value to it's flattened variant.
908 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
909 Type *Ty = V->getType();
910 Type *NoVecTy = getShadowTyNoVec(Ty);
911 if (Ty == NoVecTy) return V;
912 return IRB.CreateBitCast(V, NoVecTy);
915 /// \brief Compute the integer shadow offset that corresponds to a given
916 /// application address.
918 /// Offset = (Addr & ~AndMask) ^ XorMask
919 Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
920 Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
922 uint64_t AndMask = MS.MapParams->AndMask;
925 IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
927 uint64_t XorMask = MS.MapParams->XorMask;
930 IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
934 /// \brief Compute the shadow address that corresponds to a given application
937 /// Shadow = ShadowBase + Offset
938 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
940 Value *ShadowLong = getShadowPtrOffset(Addr, IRB);
941 uint64_t ShadowBase = MS.MapParams->ShadowBase;
944 IRB.CreateAdd(ShadowLong,
945 ConstantInt::get(MS.IntptrTy, ShadowBase));
946 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
949 /// \brief Compute the origin address that corresponds to a given application
952 /// OriginAddr = (OriginBase + Offset) & ~3ULL
953 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB, unsigned Alignment) {
954 Value *OriginLong = getShadowPtrOffset(Addr, IRB);
955 uint64_t OriginBase = MS.MapParams->OriginBase;
958 IRB.CreateAdd(OriginLong,
959 ConstantInt::get(MS.IntptrTy, OriginBase));
960 if (Alignment < kMinOriginAlignment) {
961 uint64_t Mask = kMinOriginAlignment - 1;
962 OriginLong = IRB.CreateAnd(OriginLong,
963 ConstantInt::get(MS.IntptrTy, ~Mask));
965 return IRB.CreateIntToPtr(OriginLong,
966 PointerType::get(IRB.getInt32Ty(), 0));
969 /// \brief Compute the shadow address for a given function argument.
971 /// Shadow = ParamTLS+ArgOffset.
972 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
974 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
975 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
976 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
980 /// \brief Compute the origin address for a given function argument.
981 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
983 if (!MS.TrackOrigins) return nullptr;
984 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
985 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
986 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
990 /// \brief Compute the shadow address for a retval.
991 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
992 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
993 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
997 /// \brief Compute the origin address for a retval.
998 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
999 // We keep a single origin for the entire retval. Might be too optimistic.
1000 return MS.RetvalOriginTLS;
1003 /// \brief Set SV to be the shadow value for V.
1004 void setShadow(Value *V, Value *SV) {
1005 assert(!ShadowMap.count(V) && "Values may only have one shadow");
1006 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1009 /// \brief Set Origin to be the origin value for V.
1010 void setOrigin(Value *V, Value *Origin) {
1011 if (!MS.TrackOrigins) return;
1012 assert(!OriginMap.count(V) && "Values may only have one origin");
1013 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
1014 OriginMap[V] = Origin;
1017 /// \brief Create a clean shadow value for a given value.
1019 /// Clean shadow (all zeroes) means all bits of the value are defined
1021 Constant *getCleanShadow(Value *V) {
1022 Type *ShadowTy = getShadowTy(V);
1025 return Constant::getNullValue(ShadowTy);
1028 /// \brief Create a dirty shadow of a given shadow type.
1029 Constant *getPoisonedShadow(Type *ShadowTy) {
1031 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1032 return Constant::getAllOnesValue(ShadowTy);
1033 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1034 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1035 getPoisonedShadow(AT->getElementType()));
1036 return ConstantArray::get(AT, Vals);
1038 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1039 SmallVector<Constant *, 4> Vals;
1040 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1041 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1042 return ConstantStruct::get(ST, Vals);
1044 llvm_unreachable("Unexpected shadow type");
1047 /// \brief Create a dirty shadow for a given value.
1048 Constant *getPoisonedShadow(Value *V) {
1049 Type *ShadowTy = getShadowTy(V);
1052 return getPoisonedShadow(ShadowTy);
1055 /// \brief Create a clean (zero) origin.
1056 Value *getCleanOrigin() {
1057 return Constant::getNullValue(MS.OriginTy);
1060 /// \brief Get the shadow value for a given Value.
1062 /// This function either returns the value set earlier with setShadow,
1063 /// or extracts if from ParamTLS (for function arguments).
1064 Value *getShadow(Value *V) {
1065 if (!PropagateShadow) return getCleanShadow(V);
1066 if (Instruction *I = dyn_cast<Instruction>(V)) {
1067 // For instructions the shadow is already stored in the map.
1068 Value *Shadow = ShadowMap[V];
1070 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1072 assert(Shadow && "No shadow for a value");
1076 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1077 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1078 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1082 if (Argument *A = dyn_cast<Argument>(V)) {
1083 // For arguments we compute the shadow on demand and store it in the map.
1084 Value **ShadowPtr = &ShadowMap[V];
1087 Function *F = A->getParent();
1088 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
1089 unsigned ArgOffset = 0;
1090 const DataLayout &DL = F->getParent()->getDataLayout();
1091 for (auto &FArg : F->args()) {
1092 if (!FArg.getType()->isSized()) {
1093 DEBUG(dbgs() << "Arg is not sized\n");
1098 ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
1099 : DL.getTypeAllocSize(FArg.getType());
1101 bool Overflow = ArgOffset + Size > kParamTLSSize;
1102 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1103 if (FArg.hasByValAttr()) {
1104 // ByVal pointer itself has clean shadow. We copy the actual
1105 // argument shadow to the underlying memory.
1106 // Figure out maximal valid memcpy alignment.
1107 unsigned ArgAlign = FArg.getParamAlignment();
1108 if (ArgAlign == 0) {
1109 Type *EltType = A->getType()->getPointerElementType();
1110 ArgAlign = DL.getABITypeAlignment(EltType);
1113 // ParamTLS overflow.
1114 EntryIRB.CreateMemSet(
1115 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB),
1116 Constant::getNullValue(EntryIRB.getInt8Ty()), Size, ArgAlign);
1118 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1119 Value *Cpy = EntryIRB.CreateMemCpy(
1120 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
1122 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
1125 *ShadowPtr = getCleanShadow(V);
1128 // ParamTLS overflow.
1129 *ShadowPtr = getCleanShadow(V);
1132 EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
1135 DEBUG(dbgs() << " ARG: " << FArg << " ==> " <<
1136 **ShadowPtr << "\n");
1137 if (MS.TrackOrigins && !Overflow) {
1139 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1140 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
1142 setOrigin(A, getCleanOrigin());
1145 ArgOffset += RoundUpToAlignment(Size, kShadowTLSAlignment);
1147 assert(*ShadowPtr && "Could not find shadow for an argument");
1150 // For everything else the shadow is zero.
1151 return getCleanShadow(V);
1154 /// \brief Get the shadow for i-th argument of the instruction I.
1155 Value *getShadow(Instruction *I, int i) {
1156 return getShadow(I->getOperand(i));
1159 /// \brief Get the origin for a value.
1160 Value *getOrigin(Value *V) {
1161 if (!MS.TrackOrigins) return nullptr;
1162 if (!PropagateShadow) return getCleanOrigin();
1163 if (isa<Constant>(V)) return getCleanOrigin();
1164 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1165 "Unexpected value type in getOrigin()");
1166 Value *Origin = OriginMap[V];
1167 assert(Origin && "Missing origin");
1171 /// \brief Get the origin for i-th argument of the instruction I.
1172 Value *getOrigin(Instruction *I, int i) {
1173 return getOrigin(I->getOperand(i));
1176 /// \brief Remember the place where a shadow check should be inserted.
1178 /// This location will be later instrumented with a check that will print a
1179 /// UMR warning in runtime if the shadow value is not 0.
1180 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1182 if (!InsertChecks) return;
1184 Type *ShadowTy = Shadow->getType();
1185 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1186 "Can only insert checks for integer and vector shadow types");
1188 InstrumentationList.push_back(
1189 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1192 /// \brief Remember the place where a shadow check should be inserted.
1194 /// This location will be later instrumented with a check that will print a
1195 /// UMR warning in runtime if the value is not fully defined.
1196 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1198 Value *Shadow, *Origin;
1199 if (ClCheckConstantShadow) {
1200 Shadow = getShadow(Val);
1201 if (!Shadow) return;
1202 Origin = getOrigin(Val);
1204 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1205 if (!Shadow) return;
1206 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1208 insertShadowCheck(Shadow, Origin, OrigIns);
1211 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1220 case AcquireRelease:
1221 return AcquireRelease;
1222 case SequentiallyConsistent:
1223 return SequentiallyConsistent;
1225 llvm_unreachable("Unknown ordering");
1228 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1237 case AcquireRelease:
1238 return AcquireRelease;
1239 case SequentiallyConsistent:
1240 return SequentiallyConsistent;
1242 llvm_unreachable("Unknown ordering");
1245 // ------------------- Visitors.
1247 /// \brief Instrument LoadInst
1249 /// Loads the corresponding shadow and (optionally) origin.
1250 /// Optionally, checks that the load address is fully defined.
1251 void visitLoadInst(LoadInst &I) {
1252 assert(I.getType()->isSized() && "Load type must have size");
1253 IRBuilder<> IRB(I.getNextNode());
1254 Type *ShadowTy = getShadowTy(&I);
1255 Value *Addr = I.getPointerOperand();
1256 if (PropagateShadow && !I.getMetadata("nosanitize")) {
1257 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1259 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
1261 setShadow(&I, getCleanShadow(&I));
1264 if (ClCheckAccessAddress)
1265 insertShadowCheck(I.getPointerOperand(), &I);
1268 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1270 if (MS.TrackOrigins) {
1271 if (PropagateShadow) {
1272 unsigned Alignment = I.getAlignment();
1273 unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1274 setOrigin(&I, IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB, Alignment),
1277 setOrigin(&I, getCleanOrigin());
1282 /// \brief Instrument StoreInst
1284 /// Stores the corresponding shadow and (optionally) origin.
1285 /// Optionally, checks that the store address is fully defined.
1286 void visitStoreInst(StoreInst &I) {
1287 StoreList.push_back(&I);
1290 void handleCASOrRMW(Instruction &I) {
1291 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1293 IRBuilder<> IRB(&I);
1294 Value *Addr = I.getOperand(0);
1295 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1297 if (ClCheckAccessAddress)
1298 insertShadowCheck(Addr, &I);
1300 // Only test the conditional argument of cmpxchg instruction.
1301 // The other argument can potentially be uninitialized, but we can not
1302 // detect this situation reliably without possible false positives.
1303 if (isa<AtomicCmpXchgInst>(I))
1304 insertShadowCheck(I.getOperand(1), &I);
1306 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1308 setShadow(&I, getCleanShadow(&I));
1309 setOrigin(&I, getCleanOrigin());
1312 void visitAtomicRMWInst(AtomicRMWInst &I) {
1314 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1317 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1319 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1322 // Vector manipulation.
1323 void visitExtractElementInst(ExtractElementInst &I) {
1324 insertShadowCheck(I.getOperand(1), &I);
1325 IRBuilder<> IRB(&I);
1326 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1328 setOrigin(&I, getOrigin(&I, 0));
1331 void visitInsertElementInst(InsertElementInst &I) {
1332 insertShadowCheck(I.getOperand(2), &I);
1333 IRBuilder<> IRB(&I);
1334 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1335 I.getOperand(2), "_msprop"));
1336 setOriginForNaryOp(I);
1339 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1340 insertShadowCheck(I.getOperand(2), &I);
1341 IRBuilder<> IRB(&I);
1342 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1343 I.getOperand(2), "_msprop"));
1344 setOriginForNaryOp(I);
1348 void visitSExtInst(SExtInst &I) {
1349 IRBuilder<> IRB(&I);
1350 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1351 setOrigin(&I, getOrigin(&I, 0));
1354 void visitZExtInst(ZExtInst &I) {
1355 IRBuilder<> IRB(&I);
1356 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1357 setOrigin(&I, getOrigin(&I, 0));
1360 void visitTruncInst(TruncInst &I) {
1361 IRBuilder<> IRB(&I);
1362 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1363 setOrigin(&I, getOrigin(&I, 0));
1366 void visitBitCastInst(BitCastInst &I) {
1367 // Special case: if this is the bitcast (there is exactly 1 allowed) between
1368 // a musttail call and a ret, don't instrument. New instructions are not
1369 // allowed after a musttail call.
1370 if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
1371 if (CI->isMustTailCall())
1373 IRBuilder<> IRB(&I);
1374 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1375 setOrigin(&I, getOrigin(&I, 0));
1378 void visitPtrToIntInst(PtrToIntInst &I) {
1379 IRBuilder<> IRB(&I);
1380 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1381 "_msprop_ptrtoint"));
1382 setOrigin(&I, getOrigin(&I, 0));
1385 void visitIntToPtrInst(IntToPtrInst &I) {
1386 IRBuilder<> IRB(&I);
1387 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1388 "_msprop_inttoptr"));
1389 setOrigin(&I, getOrigin(&I, 0));
1392 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1393 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1394 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1395 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1396 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1397 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1399 /// \brief Propagate shadow for bitwise AND.
1401 /// This code is exact, i.e. if, for example, a bit in the left argument
1402 /// is defined and 0, then neither the value not definedness of the
1403 /// corresponding bit in B don't affect the resulting shadow.
1404 void visitAnd(BinaryOperator &I) {
1405 IRBuilder<> IRB(&I);
1406 // "And" of 0 and a poisoned value results in unpoisoned value.
1407 // 1&1 => 1; 0&1 => 0; p&1 => p;
1408 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1409 // 1&p => p; 0&p => 0; p&p => p;
1410 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1411 Value *S1 = getShadow(&I, 0);
1412 Value *S2 = getShadow(&I, 1);
1413 Value *V1 = I.getOperand(0);
1414 Value *V2 = I.getOperand(1);
1415 if (V1->getType() != S1->getType()) {
1416 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1417 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1419 Value *S1S2 = IRB.CreateAnd(S1, S2);
1420 Value *V1S2 = IRB.CreateAnd(V1, S2);
1421 Value *S1V2 = IRB.CreateAnd(S1, V2);
1422 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1423 setOriginForNaryOp(I);
1426 void visitOr(BinaryOperator &I) {
1427 IRBuilder<> IRB(&I);
1428 // "Or" of 1 and a poisoned value results in unpoisoned value.
1429 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1430 // 1|0 => 1; 0|0 => 0; p|0 => p;
1431 // 1|p => 1; 0|p => p; p|p => p;
1432 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1433 Value *S1 = getShadow(&I, 0);
1434 Value *S2 = getShadow(&I, 1);
1435 Value *V1 = IRB.CreateNot(I.getOperand(0));
1436 Value *V2 = IRB.CreateNot(I.getOperand(1));
1437 if (V1->getType() != S1->getType()) {
1438 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1439 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1441 Value *S1S2 = IRB.CreateAnd(S1, S2);
1442 Value *V1S2 = IRB.CreateAnd(V1, S2);
1443 Value *S1V2 = IRB.CreateAnd(S1, V2);
1444 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1445 setOriginForNaryOp(I);
1448 /// \brief Default propagation of shadow and/or origin.
1450 /// This class implements the general case of shadow propagation, used in all
1451 /// cases where we don't know and/or don't care about what the operation
1452 /// actually does. It converts all input shadow values to a common type
1453 /// (extending or truncating as necessary), and bitwise OR's them.
1455 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1456 /// fully initialized), and less prone to false positives.
1458 /// This class also implements the general case of origin propagation. For a
1459 /// Nary operation, result origin is set to the origin of an argument that is
1460 /// not entirely initialized. If there is more than one such arguments, the
1461 /// rightmost of them is picked. It does not matter which one is picked if all
1462 /// arguments are initialized.
1463 template <bool CombineShadow>
1468 MemorySanitizerVisitor *MSV;
1471 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1472 Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
1474 /// \brief Add a pair of shadow and origin values to the mix.
1475 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1476 if (CombineShadow) {
1481 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1482 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1486 if (MSV->MS.TrackOrigins) {
1491 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
1492 // No point in adding something that might result in 0 origin value.
1493 if (!ConstOrigin || !ConstOrigin->isNullValue()) {
1494 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1496 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
1497 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1504 /// \brief Add an application value to the mix.
1505 Combiner &Add(Value *V) {
1506 Value *OpShadow = MSV->getShadow(V);
1507 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
1508 return Add(OpShadow, OpOrigin);
1511 /// \brief Set the current combined values as the given instruction's shadow
1513 void Done(Instruction *I) {
1514 if (CombineShadow) {
1516 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1517 MSV->setShadow(I, Shadow);
1519 if (MSV->MS.TrackOrigins) {
1521 MSV->setOrigin(I, Origin);
1526 typedef Combiner<true> ShadowAndOriginCombiner;
1527 typedef Combiner<false> OriginCombiner;
1529 /// \brief Propagate origin for arbitrary operation.
1530 void setOriginForNaryOp(Instruction &I) {
1531 if (!MS.TrackOrigins) return;
1532 IRBuilder<> IRB(&I);
1533 OriginCombiner OC(this, IRB);
1534 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1539 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1540 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1541 "Vector of pointers is not a valid shadow type");
1542 return Ty->isVectorTy() ?
1543 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1544 Ty->getPrimitiveSizeInBits();
1547 /// \brief Cast between two shadow types, extending or truncating as
1549 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
1550 bool Signed = false) {
1551 Type *srcTy = V->getType();
1552 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1553 return IRB.CreateIntCast(V, dstTy, Signed);
1554 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1555 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1556 return IRB.CreateIntCast(V, dstTy, Signed);
1557 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1558 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1559 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1561 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
1562 return IRB.CreateBitCast(V2, dstTy);
1563 // TODO: handle struct types.
1566 /// \brief Cast an application value to the type of its own shadow.
1567 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
1568 Type *ShadowTy = getShadowTy(V);
1569 if (V->getType() == ShadowTy)
1571 if (V->getType()->isPtrOrPtrVectorTy())
1572 return IRB.CreatePtrToInt(V, ShadowTy);
1574 return IRB.CreateBitCast(V, ShadowTy);
1577 /// \brief Propagate shadow for arbitrary operation.
1578 void handleShadowOr(Instruction &I) {
1579 IRBuilder<> IRB(&I);
1580 ShadowAndOriginCombiner SC(this, IRB);
1581 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1586 // \brief Handle multiplication by constant.
1588 // Handle a special case of multiplication by constant that may have one or
1589 // more zeros in the lower bits. This makes corresponding number of lower bits
1590 // of the result zero as well. We model it by shifting the other operand
1591 // shadow left by the required number of bits. Effectively, we transform
1592 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
1593 // We use multiplication by 2**N instead of shift to cover the case of
1594 // multiplication by 0, which may occur in some elements of a vector operand.
1595 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
1597 Constant *ShadowMul;
1598 Type *Ty = ConstArg->getType();
1599 if (Ty->isVectorTy()) {
1600 unsigned NumElements = Ty->getVectorNumElements();
1601 Type *EltTy = Ty->getSequentialElementType();
1602 SmallVector<Constant *, 16> Elements;
1603 for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
1604 if (ConstantInt *Elt =
1605 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
1606 APInt V = Elt->getValue();
1607 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1608 Elements.push_back(ConstantInt::get(EltTy, V2));
1610 Elements.push_back(ConstantInt::get(EltTy, 1));
1613 ShadowMul = ConstantVector::get(Elements);
1615 if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
1616 APInt V = Elt->getValue();
1617 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
1618 ShadowMul = ConstantInt::get(Ty, V2);
1620 ShadowMul = ConstantInt::get(Ty, 1);
1624 IRBuilder<> IRB(&I);
1626 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
1627 setOrigin(&I, getOrigin(OtherArg));
1630 void visitMul(BinaryOperator &I) {
1631 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1632 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1633 if (constOp0 && !constOp1)
1634 handleMulByConstant(I, constOp0, I.getOperand(1));
1635 else if (constOp1 && !constOp0)
1636 handleMulByConstant(I, constOp1, I.getOperand(0));
1641 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1642 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1643 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1644 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1645 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1646 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1648 void handleDiv(Instruction &I) {
1649 IRBuilder<> IRB(&I);
1650 // Strict on the second argument.
1651 insertShadowCheck(I.getOperand(1), &I);
1652 setShadow(&I, getShadow(&I, 0));
1653 setOrigin(&I, getOrigin(&I, 0));
1656 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1657 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1658 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1659 void visitURem(BinaryOperator &I) { handleDiv(I); }
1660 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1661 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1663 /// \brief Instrument == and != comparisons.
1665 /// Sometimes the comparison result is known even if some of the bits of the
1666 /// arguments are not.
1667 void handleEqualityComparison(ICmpInst &I) {
1668 IRBuilder<> IRB(&I);
1669 Value *A = I.getOperand(0);
1670 Value *B = I.getOperand(1);
1671 Value *Sa = getShadow(A);
1672 Value *Sb = getShadow(B);
1674 // Get rid of pointers and vectors of pointers.
1675 // For ints (and vectors of ints), types of A and Sa match,
1676 // and this is a no-op.
1677 A = IRB.CreatePointerCast(A, Sa->getType());
1678 B = IRB.CreatePointerCast(B, Sb->getType());
1680 // A == B <==> (C = A^B) == 0
1681 // A != B <==> (C = A^B) != 0
1683 Value *C = IRB.CreateXor(A, B);
1684 Value *Sc = IRB.CreateOr(Sa, Sb);
1685 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1686 // Result is defined if one of the following is true
1687 // * there is a defined 1 bit in C
1688 // * C is fully defined
1689 // Si = !(C & ~Sc) && Sc
1690 Value *Zero = Constant::getNullValue(Sc->getType());
1691 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1693 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1695 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1696 Si->setName("_msprop_icmp");
1698 setOriginForNaryOp(I);
1701 /// \brief Build the lowest possible value of V, taking into account V's
1702 /// uninitialized bits.
1703 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1706 // Split shadow into sign bit and other bits.
1707 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1708 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1709 // Maximise the undefined shadow bit, minimize other undefined bits.
1711 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1713 // Minimize undefined bits.
1714 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1718 /// \brief Build the highest possible value of V, taking into account V's
1719 /// uninitialized bits.
1720 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1723 // Split shadow into sign bit and other bits.
1724 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1725 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1726 // Minimise the undefined shadow bit, maximise other undefined bits.
1728 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1730 // Maximize undefined bits.
1731 return IRB.CreateOr(A, Sa);
1735 /// \brief Instrument relational comparisons.
1737 /// This function does exact shadow propagation for all relational
1738 /// comparisons of integers, pointers and vectors of those.
1739 /// FIXME: output seems suboptimal when one of the operands is a constant
1740 void handleRelationalComparisonExact(ICmpInst &I) {
1741 IRBuilder<> IRB(&I);
1742 Value *A = I.getOperand(0);
1743 Value *B = I.getOperand(1);
1744 Value *Sa = getShadow(A);
1745 Value *Sb = getShadow(B);
1747 // Get rid of pointers and vectors of pointers.
1748 // For ints (and vectors of ints), types of A and Sa match,
1749 // and this is a no-op.
1750 A = IRB.CreatePointerCast(A, Sa->getType());
1751 B = IRB.CreatePointerCast(B, Sb->getType());
1753 // Let [a0, a1] be the interval of possible values of A, taking into account
1754 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1755 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1756 bool IsSigned = I.isSigned();
1757 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1758 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1759 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1760 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1761 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1762 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1763 Value *Si = IRB.CreateXor(S1, S2);
1765 setOriginForNaryOp(I);
1768 /// \brief Instrument signed relational comparisons.
1770 /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
1771 /// bit of the shadow. Everything else is delegated to handleShadowOr().
1772 void handleSignedRelationalComparison(ICmpInst &I) {
1774 Value *op = nullptr;
1775 CmpInst::Predicate pre;
1776 if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
1777 op = I.getOperand(0);
1778 pre = I.getPredicate();
1779 } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
1780 op = I.getOperand(1);
1781 pre = I.getSwappedPredicate();
1787 if ((constOp->isNullValue() &&
1788 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
1789 (constOp->isAllOnesValue() &&
1790 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
1791 IRBuilder<> IRB(&I);
1792 Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
1794 setShadow(&I, Shadow);
1795 setOrigin(&I, getOrigin(op));
1801 void visitICmpInst(ICmpInst &I) {
1802 if (!ClHandleICmp) {
1806 if (I.isEquality()) {
1807 handleEqualityComparison(I);
1811 assert(I.isRelational());
1812 if (ClHandleICmpExact) {
1813 handleRelationalComparisonExact(I);
1817 handleSignedRelationalComparison(I);
1821 assert(I.isUnsigned());
1822 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1823 handleRelationalComparisonExact(I);
1830 void visitFCmpInst(FCmpInst &I) {
1834 void handleShift(BinaryOperator &I) {
1835 IRBuilder<> IRB(&I);
1836 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1837 // Otherwise perform the same shift on S1.
1838 Value *S1 = getShadow(&I, 0);
1839 Value *S2 = getShadow(&I, 1);
1840 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1842 Value *V2 = I.getOperand(1);
1843 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1844 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1845 setOriginForNaryOp(I);
1848 void visitShl(BinaryOperator &I) { handleShift(I); }
1849 void visitAShr(BinaryOperator &I) { handleShift(I); }
1850 void visitLShr(BinaryOperator &I) { handleShift(I); }
1852 /// \brief Instrument llvm.memmove
1854 /// At this point we don't know if llvm.memmove will be inlined or not.
1855 /// If we don't instrument it and it gets inlined,
1856 /// our interceptor will not kick in and we will lose the memmove.
1857 /// If we instrument the call here, but it does not get inlined,
1858 /// we will memove the shadow twice: which is bad in case
1859 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1861 /// Similar situation exists for memcpy and memset.
1862 void visitMemMoveInst(MemMoveInst &I) {
1863 IRBuilder<> IRB(&I);
1866 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1867 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1868 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1869 I.eraseFromParent();
1872 // Similar to memmove: avoid copying shadow twice.
1873 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1874 // FIXME: consider doing manual inline for small constant sizes and proper
1876 void visitMemCpyInst(MemCpyInst &I) {
1877 IRBuilder<> IRB(&I);
1880 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1881 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1882 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1883 I.eraseFromParent();
1887 void visitMemSetInst(MemSetInst &I) {
1888 IRBuilder<> IRB(&I);
1891 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1892 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1893 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
1894 I.eraseFromParent();
1897 void visitVAStartInst(VAStartInst &I) {
1898 VAHelper->visitVAStartInst(I);
1901 void visitVACopyInst(VACopyInst &I) {
1902 VAHelper->visitVACopyInst(I);
1905 /// \brief Handle vector store-like intrinsics.
1907 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1908 /// has 1 pointer argument and 1 vector argument, returns void.
1909 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1910 IRBuilder<> IRB(&I);
1911 Value* Addr = I.getArgOperand(0);
1912 Value *Shadow = getShadow(&I, 1);
1913 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1915 // We don't know the pointer alignment (could be unaligned SSE store!).
1916 // Have to assume to worst case.
1917 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1919 if (ClCheckAccessAddress)
1920 insertShadowCheck(Addr, &I);
1922 // FIXME: use ClStoreCleanOrigin
1923 // FIXME: factor out common code from materializeStores
1924 if (MS.TrackOrigins)
1925 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB, 1));
1929 /// \brief Handle vector load-like intrinsics.
1931 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1932 /// has 1 pointer argument, returns a vector.
1933 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1934 IRBuilder<> IRB(&I);
1935 Value *Addr = I.getArgOperand(0);
1937 Type *ShadowTy = getShadowTy(&I);
1938 if (PropagateShadow) {
1939 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1940 // We don't know the pointer alignment (could be unaligned SSE load!).
1941 // Have to assume to worst case.
1942 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1944 setShadow(&I, getCleanShadow(&I));
1947 if (ClCheckAccessAddress)
1948 insertShadowCheck(Addr, &I);
1950 if (MS.TrackOrigins) {
1951 if (PropagateShadow)
1952 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB, 1)));
1954 setOrigin(&I, getCleanOrigin());
1959 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1961 /// Instrument intrinsics with any number of arguments of the same type,
1962 /// equal to the return type. The type should be simple (no aggregates or
1963 /// pointers; vectors are fine).
1964 /// Caller guarantees that this intrinsic does not access memory.
1965 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1966 Type *RetTy = I.getType();
1967 if (!(RetTy->isIntOrIntVectorTy() ||
1968 RetTy->isFPOrFPVectorTy() ||
1969 RetTy->isX86_MMXTy()))
1972 unsigned NumArgOperands = I.getNumArgOperands();
1974 for (unsigned i = 0; i < NumArgOperands; ++i) {
1975 Type *Ty = I.getArgOperand(i)->getType();
1980 IRBuilder<> IRB(&I);
1981 ShadowAndOriginCombiner SC(this, IRB);
1982 for (unsigned i = 0; i < NumArgOperands; ++i)
1983 SC.Add(I.getArgOperand(i));
1989 /// \brief Heuristically instrument unknown intrinsics.
1991 /// The main purpose of this code is to do something reasonable with all
1992 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1993 /// We recognize several classes of intrinsics by their argument types and
1994 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1995 /// sure that we know what the intrinsic does.
1997 /// We special-case intrinsics where this approach fails. See llvm.bswap
1998 /// handling as an example of that.
1999 bool handleUnknownIntrinsic(IntrinsicInst &I) {
2000 unsigned NumArgOperands = I.getNumArgOperands();
2001 if (NumArgOperands == 0)
2004 if (NumArgOperands == 2 &&
2005 I.getArgOperand(0)->getType()->isPointerTy() &&
2006 I.getArgOperand(1)->getType()->isVectorTy() &&
2007 I.getType()->isVoidTy() &&
2008 !I.onlyReadsMemory()) {
2009 // This looks like a vector store.
2010 return handleVectorStoreIntrinsic(I);
2013 if (NumArgOperands == 1 &&
2014 I.getArgOperand(0)->getType()->isPointerTy() &&
2015 I.getType()->isVectorTy() &&
2016 I.onlyReadsMemory()) {
2017 // This looks like a vector load.
2018 return handleVectorLoadIntrinsic(I);
2021 if (I.doesNotAccessMemory())
2022 if (maybeHandleSimpleNomemIntrinsic(I))
2025 // FIXME: detect and handle SSE maskstore/maskload
2029 void handleBswap(IntrinsicInst &I) {
2030 IRBuilder<> IRB(&I);
2031 Value *Op = I.getArgOperand(0);
2032 Type *OpType = Op->getType();
2033 Function *BswapFunc = Intrinsic::getDeclaration(
2034 F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2035 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2036 setOrigin(&I, getOrigin(Op));
2039 // \brief Instrument vector convert instrinsic.
2041 // This function instruments intrinsics like cvtsi2ss:
2042 // %Out = int_xxx_cvtyyy(%ConvertOp)
2044 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2045 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2046 // number \p Out elements, and (if has 2 arguments) copies the rest of the
2047 // elements from \p CopyOp.
2048 // In most cases conversion involves floating-point value which may trigger a
2049 // hardware exception when not fully initialized. For this reason we require
2050 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2051 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2052 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2053 // return a fully initialized value.
2054 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2055 IRBuilder<> IRB(&I);
2056 Value *CopyOp, *ConvertOp;
2058 switch (I.getNumArgOperands()) {
2060 assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
2062 CopyOp = I.getArgOperand(0);
2063 ConvertOp = I.getArgOperand(1);
2066 ConvertOp = I.getArgOperand(0);
2070 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
2073 // The first *NumUsedElements* elements of ConvertOp are converted to the
2074 // same number of output elements. The rest of the output is copied from
2075 // CopyOp, or (if not available) filled with zeroes.
2076 // Combine shadow for elements of ConvertOp that are used in this operation,
2077 // and insert a check.
2078 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2079 // int->any conversion.
2080 Value *ConvertShadow = getShadow(ConvertOp);
2081 Value *AggShadow = nullptr;
2082 if (ConvertOp->getType()->isVectorTy()) {
2083 AggShadow = IRB.CreateExtractElement(
2084 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2085 for (int i = 1; i < NumUsedElements; ++i) {
2086 Value *MoreShadow = IRB.CreateExtractElement(
2087 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2088 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2091 AggShadow = ConvertShadow;
2093 assert(AggShadow->getType()->isIntegerTy());
2094 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2096 // Build result shadow by zero-filling parts of CopyOp shadow that come from
2099 assert(CopyOp->getType() == I.getType());
2100 assert(CopyOp->getType()->isVectorTy());
2101 Value *ResultShadow = getShadow(CopyOp);
2102 Type *EltTy = ResultShadow->getType()->getVectorElementType();
2103 for (int i = 0; i < NumUsedElements; ++i) {
2104 ResultShadow = IRB.CreateInsertElement(
2105 ResultShadow, ConstantInt::getNullValue(EltTy),
2106 ConstantInt::get(IRB.getInt32Ty(), i));
2108 setShadow(&I, ResultShadow);
2109 setOrigin(&I, getOrigin(CopyOp));
2111 setShadow(&I, getCleanShadow(&I));
2112 setOrigin(&I, getCleanOrigin());
2116 // Given a scalar or vector, extract lower 64 bits (or less), and return all
2117 // zeroes if it is zero, and all ones otherwise.
2118 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2119 if (S->getType()->isVectorTy())
2120 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2121 assert(S->getType()->getPrimitiveSizeInBits() <= 64);
2122 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2123 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2126 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2127 Type *T = S->getType();
2128 assert(T->isVectorTy());
2129 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2130 return IRB.CreateSExt(S2, T);
2133 // \brief Instrument vector shift instrinsic.
2135 // This function instruments intrinsics like int_x86_avx2_psll_w.
2136 // Intrinsic shifts %In by %ShiftSize bits.
2137 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2138 // size, and the rest is ignored. Behavior is defined even if shift size is
2139 // greater than register (or field) width.
2140 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2141 assert(I.getNumArgOperands() == 2);
2142 IRBuilder<> IRB(&I);
2143 // If any of the S2 bits are poisoned, the whole thing is poisoned.
2144 // Otherwise perform the same shift on S1.
2145 Value *S1 = getShadow(&I, 0);
2146 Value *S2 = getShadow(&I, 1);
2147 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2148 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2149 Value *V1 = I.getOperand(0);
2150 Value *V2 = I.getOperand(1);
2151 Value *Shift = IRB.CreateCall(I.getCalledValue(),
2152 {IRB.CreateBitCast(S1, V1->getType()), V2});
2153 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2154 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2155 setOriginForNaryOp(I);
2158 // \brief Get an X86_MMX-sized vector type.
2159 Type *getMMXVectorTy(unsigned EltSizeInBits) {
2160 const unsigned X86_MMXSizeInBits = 64;
2161 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2162 X86_MMXSizeInBits / EltSizeInBits);
2165 // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack
2167 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2169 case llvm::Intrinsic::x86_sse2_packsswb_128:
2170 case llvm::Intrinsic::x86_sse2_packuswb_128:
2171 return llvm::Intrinsic::x86_sse2_packsswb_128;
2173 case llvm::Intrinsic::x86_sse2_packssdw_128:
2174 case llvm::Intrinsic::x86_sse41_packusdw:
2175 return llvm::Intrinsic::x86_sse2_packssdw_128;
2177 case llvm::Intrinsic::x86_avx2_packsswb:
2178 case llvm::Intrinsic::x86_avx2_packuswb:
2179 return llvm::Intrinsic::x86_avx2_packsswb;
2181 case llvm::Intrinsic::x86_avx2_packssdw:
2182 case llvm::Intrinsic::x86_avx2_packusdw:
2183 return llvm::Intrinsic::x86_avx2_packssdw;
2185 case llvm::Intrinsic::x86_mmx_packsswb:
2186 case llvm::Intrinsic::x86_mmx_packuswb:
2187 return llvm::Intrinsic::x86_mmx_packsswb;
2189 case llvm::Intrinsic::x86_mmx_packssdw:
2190 return llvm::Intrinsic::x86_mmx_packssdw;
2192 llvm_unreachable("unexpected intrinsic id");
2196 // \brief Instrument vector pack instrinsic.
2198 // This function instruments intrinsics like x86_mmx_packsswb, that
2199 // packs elements of 2 input vectors into half as many bits with saturation.
2200 // Shadow is propagated with the signed variant of the same intrinsic applied
2201 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2202 // EltSizeInBits is used only for x86mmx arguments.
2203 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2204 assert(I.getNumArgOperands() == 2);
2205 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2206 IRBuilder<> IRB(&I);
2207 Value *S1 = getShadow(&I, 0);
2208 Value *S2 = getShadow(&I, 1);
2209 assert(isX86_MMX || S1->getType()->isVectorTy());
2211 // SExt and ICmpNE below must apply to individual elements of input vectors.
2212 // In case of x86mmx arguments, cast them to appropriate vector types and
2214 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2216 S1 = IRB.CreateBitCast(S1, T);
2217 S2 = IRB.CreateBitCast(S2, T);
2219 Value *S1_ext = IRB.CreateSExt(
2220 IRB.CreateICmpNE(S1, llvm::Constant::getNullValue(T)), T);
2221 Value *S2_ext = IRB.CreateSExt(
2222 IRB.CreateICmpNE(S2, llvm::Constant::getNullValue(T)), T);
2224 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2225 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2226 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2229 Function *ShadowFn = Intrinsic::getDeclaration(
2230 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2233 IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2234 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2236 setOriginForNaryOp(I);
2239 // \brief Instrument sum-of-absolute-differencies intrinsic.
2240 void handleVectorSadIntrinsic(IntrinsicInst &I) {
2241 const unsigned SignificantBitsPerResultElement = 16;
2242 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2243 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2244 unsigned ZeroBitsPerResultElement =
2245 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2247 IRBuilder<> IRB(&I);
2248 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2249 S = IRB.CreateBitCast(S, ResTy);
2250 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2252 S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2253 S = IRB.CreateBitCast(S, getShadowTy(&I));
2255 setOriginForNaryOp(I);
2258 // \brief Instrument multiply-add intrinsic.
2259 void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2260 unsigned EltSizeInBits = 0) {
2261 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2262 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
2263 IRBuilder<> IRB(&I);
2264 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2265 S = IRB.CreateBitCast(S, ResTy);
2266 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2268 S = IRB.CreateBitCast(S, getShadowTy(&I));
2270 setOriginForNaryOp(I);
2273 void visitIntrinsicInst(IntrinsicInst &I) {
2274 switch (I.getIntrinsicID()) {
2275 case llvm::Intrinsic::bswap:
2278 case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
2279 case llvm::Intrinsic::x86_avx512_cvtsd2usi:
2280 case llvm::Intrinsic::x86_avx512_cvtss2usi64:
2281 case llvm::Intrinsic::x86_avx512_cvtss2usi:
2282 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
2283 case llvm::Intrinsic::x86_avx512_cvttss2usi:
2284 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
2285 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
2286 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
2287 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
2288 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
2289 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
2290 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
2291 case llvm::Intrinsic::x86_sse2_cvtsd2si:
2292 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
2293 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
2294 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
2295 case llvm::Intrinsic::x86_sse2_cvtss2sd:
2296 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
2297 case llvm::Intrinsic::x86_sse2_cvttsd2si:
2298 case llvm::Intrinsic::x86_sse_cvtsi2ss:
2299 case llvm::Intrinsic::x86_sse_cvtsi642ss:
2300 case llvm::Intrinsic::x86_sse_cvtss2si64:
2301 case llvm::Intrinsic::x86_sse_cvtss2si:
2302 case llvm::Intrinsic::x86_sse_cvttss2si64:
2303 case llvm::Intrinsic::x86_sse_cvttss2si:
2304 handleVectorConvertIntrinsic(I, 1);
2306 case llvm::Intrinsic::x86_sse2_cvtdq2pd:
2307 case llvm::Intrinsic::x86_sse2_cvtps2pd:
2308 case llvm::Intrinsic::x86_sse_cvtps2pi:
2309 case llvm::Intrinsic::x86_sse_cvttps2pi:
2310 handleVectorConvertIntrinsic(I, 2);
2312 case llvm::Intrinsic::x86_avx2_psll_w:
2313 case llvm::Intrinsic::x86_avx2_psll_d:
2314 case llvm::Intrinsic::x86_avx2_psll_q:
2315 case llvm::Intrinsic::x86_avx2_pslli_w:
2316 case llvm::Intrinsic::x86_avx2_pslli_d:
2317 case llvm::Intrinsic::x86_avx2_pslli_q:
2318 case llvm::Intrinsic::x86_avx2_psrl_w:
2319 case llvm::Intrinsic::x86_avx2_psrl_d:
2320 case llvm::Intrinsic::x86_avx2_psrl_q:
2321 case llvm::Intrinsic::x86_avx2_psra_w:
2322 case llvm::Intrinsic::x86_avx2_psra_d:
2323 case llvm::Intrinsic::x86_avx2_psrli_w:
2324 case llvm::Intrinsic::x86_avx2_psrli_d:
2325 case llvm::Intrinsic::x86_avx2_psrli_q:
2326 case llvm::Intrinsic::x86_avx2_psrai_w:
2327 case llvm::Intrinsic::x86_avx2_psrai_d:
2328 case llvm::Intrinsic::x86_sse2_psll_w:
2329 case llvm::Intrinsic::x86_sse2_psll_d:
2330 case llvm::Intrinsic::x86_sse2_psll_q:
2331 case llvm::Intrinsic::x86_sse2_pslli_w:
2332 case llvm::Intrinsic::x86_sse2_pslli_d:
2333 case llvm::Intrinsic::x86_sse2_pslli_q:
2334 case llvm::Intrinsic::x86_sse2_psrl_w:
2335 case llvm::Intrinsic::x86_sse2_psrl_d:
2336 case llvm::Intrinsic::x86_sse2_psrl_q:
2337 case llvm::Intrinsic::x86_sse2_psra_w:
2338 case llvm::Intrinsic::x86_sse2_psra_d:
2339 case llvm::Intrinsic::x86_sse2_psrli_w:
2340 case llvm::Intrinsic::x86_sse2_psrli_d:
2341 case llvm::Intrinsic::x86_sse2_psrli_q:
2342 case llvm::Intrinsic::x86_sse2_psrai_w:
2343 case llvm::Intrinsic::x86_sse2_psrai_d:
2344 case llvm::Intrinsic::x86_mmx_psll_w:
2345 case llvm::Intrinsic::x86_mmx_psll_d:
2346 case llvm::Intrinsic::x86_mmx_psll_q:
2347 case llvm::Intrinsic::x86_mmx_pslli_w:
2348 case llvm::Intrinsic::x86_mmx_pslli_d:
2349 case llvm::Intrinsic::x86_mmx_pslli_q:
2350 case llvm::Intrinsic::x86_mmx_psrl_w:
2351 case llvm::Intrinsic::x86_mmx_psrl_d:
2352 case llvm::Intrinsic::x86_mmx_psrl_q:
2353 case llvm::Intrinsic::x86_mmx_psra_w:
2354 case llvm::Intrinsic::x86_mmx_psra_d:
2355 case llvm::Intrinsic::x86_mmx_psrli_w:
2356 case llvm::Intrinsic::x86_mmx_psrli_d:
2357 case llvm::Intrinsic::x86_mmx_psrli_q:
2358 case llvm::Intrinsic::x86_mmx_psrai_w:
2359 case llvm::Intrinsic::x86_mmx_psrai_d:
2360 handleVectorShiftIntrinsic(I, /* Variable */ false);
2362 case llvm::Intrinsic::x86_avx2_psllv_d:
2363 case llvm::Intrinsic::x86_avx2_psllv_d_256:
2364 case llvm::Intrinsic::x86_avx2_psllv_q:
2365 case llvm::Intrinsic::x86_avx2_psllv_q_256:
2366 case llvm::Intrinsic::x86_avx2_psrlv_d:
2367 case llvm::Intrinsic::x86_avx2_psrlv_d_256:
2368 case llvm::Intrinsic::x86_avx2_psrlv_q:
2369 case llvm::Intrinsic::x86_avx2_psrlv_q_256:
2370 case llvm::Intrinsic::x86_avx2_psrav_d:
2371 case llvm::Intrinsic::x86_avx2_psrav_d_256:
2372 handleVectorShiftIntrinsic(I, /* Variable */ true);
2375 case llvm::Intrinsic::x86_sse2_packsswb_128:
2376 case llvm::Intrinsic::x86_sse2_packssdw_128:
2377 case llvm::Intrinsic::x86_sse2_packuswb_128:
2378 case llvm::Intrinsic::x86_sse41_packusdw:
2379 case llvm::Intrinsic::x86_avx2_packsswb:
2380 case llvm::Intrinsic::x86_avx2_packssdw:
2381 case llvm::Intrinsic::x86_avx2_packuswb:
2382 case llvm::Intrinsic::x86_avx2_packusdw:
2383 handleVectorPackIntrinsic(I);
2386 case llvm::Intrinsic::x86_mmx_packsswb:
2387 case llvm::Intrinsic::x86_mmx_packuswb:
2388 handleVectorPackIntrinsic(I, 16);
2391 case llvm::Intrinsic::x86_mmx_packssdw:
2392 handleVectorPackIntrinsic(I, 32);
2395 case llvm::Intrinsic::x86_mmx_psad_bw:
2396 case llvm::Intrinsic::x86_sse2_psad_bw:
2397 case llvm::Intrinsic::x86_avx2_psad_bw:
2398 handleVectorSadIntrinsic(I);
2401 case llvm::Intrinsic::x86_sse2_pmadd_wd:
2402 case llvm::Intrinsic::x86_avx2_pmadd_wd:
2403 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw_128:
2404 case llvm::Intrinsic::x86_avx2_pmadd_ub_sw:
2405 handleVectorPmaddIntrinsic(I);
2408 case llvm::Intrinsic::x86_ssse3_pmadd_ub_sw:
2409 handleVectorPmaddIntrinsic(I, 8);
2412 case llvm::Intrinsic::x86_mmx_pmadd_wd:
2413 handleVectorPmaddIntrinsic(I, 16);
2417 if (!handleUnknownIntrinsic(I))
2418 visitInstruction(I);
2423 void visitCallSite(CallSite CS) {
2424 Instruction &I = *CS.getInstruction();
2425 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
2427 CallInst *Call = cast<CallInst>(&I);
2429 // For inline asm, do the usual thing: check argument shadow and mark all
2430 // outputs as clean. Note that any side effects of the inline asm that are
2431 // not immediately visible in its constraints are not handled.
2432 if (Call->isInlineAsm()) {
2433 visitInstruction(I);
2437 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
2439 // We are going to insert code that relies on the fact that the callee
2440 // will become a non-readonly function after it is instrumented by us. To
2441 // prevent this code from being optimized out, mark that function
2442 // non-readonly in advance.
2443 if (Function *Func = Call->getCalledFunction()) {
2444 // Clear out readonly/readnone attributes.
2446 B.addAttribute(Attribute::ReadOnly)
2447 .addAttribute(Attribute::ReadNone);
2448 Func->removeAttributes(AttributeSet::FunctionIndex,
2449 AttributeSet::get(Func->getContext(),
2450 AttributeSet::FunctionIndex,
2454 IRBuilder<> IRB(&I);
2456 unsigned ArgOffset = 0;
2457 DEBUG(dbgs() << " CallSite: " << I << "\n");
2458 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2459 ArgIt != End; ++ArgIt) {
2461 unsigned i = ArgIt - CS.arg_begin();
2462 if (!A->getType()->isSized()) {
2463 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
2467 Value *Store = nullptr;
2468 // Compute the Shadow for arg even if it is ByVal, because
2469 // in that case getShadow() will copy the actual arg shadow to
2470 // __msan_param_tls.
2471 Value *ArgShadow = getShadow(A);
2472 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
2473 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
2474 " Shadow: " << *ArgShadow << "\n");
2475 bool ArgIsInitialized = false;
2476 const DataLayout &DL = F.getParent()->getDataLayout();
2477 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
2478 assert(A->getType()->isPointerTy() &&
2479 "ByVal argument is not a pointer!");
2480 Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
2481 if (ArgOffset + Size > kParamTLSSize) break;
2482 unsigned ParamAlignment = CS.getParamAlignment(i + 1);
2483 unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
2484 Store = IRB.CreateMemCpy(ArgShadowBase,
2485 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
2488 Size = DL.getTypeAllocSize(A->getType());
2489 if (ArgOffset + Size > kParamTLSSize) break;
2490 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
2491 kShadowTLSAlignment);
2492 Constant *Cst = dyn_cast<Constant>(ArgShadow);
2493 if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
2495 if (MS.TrackOrigins && !ArgIsInitialized)
2496 IRB.CreateStore(getOrigin(A),
2497 getOriginPtrForArgument(A, IRB, ArgOffset));
2499 assert(Size != 0 && Store != nullptr);
2500 DEBUG(dbgs() << " Param:" << *Store << "\n");
2501 ArgOffset += RoundUpToAlignment(Size, 8);
2503 DEBUG(dbgs() << " done with call args\n");
2506 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
2507 if (FT->isVarArg()) {
2508 VAHelper->visitCallSite(CS, IRB);
2511 // Now, get the shadow for the RetVal.
2512 if (!I.getType()->isSized()) return;
2513 // Don't emit the epilogue for musttail call returns.
2514 if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
2515 IRBuilder<> IRBBefore(&I);
2516 // Until we have full dynamic coverage, make sure the retval shadow is 0.
2517 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
2518 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
2519 BasicBlock::iterator NextInsn;
2521 NextInsn = ++I.getIterator();
2522 assert(NextInsn != I.getParent()->end());
2524 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
2525 if (!NormalDest->getSinglePredecessor()) {
2526 // FIXME: this case is tricky, so we are just conservative here.
2527 // Perhaps we need to split the edge between this BB and NormalDest,
2528 // but a naive attempt to use SplitEdge leads to a crash.
2529 setShadow(&I, getCleanShadow(&I));
2530 setOrigin(&I, getCleanOrigin());
2533 NextInsn = NormalDest->getFirstInsertionPt();
2534 assert(NextInsn != NormalDest->end() &&
2535 "Could not find insertion point for retval shadow load");
2537 IRBuilder<> IRBAfter(&*NextInsn);
2538 Value *RetvalShadow =
2539 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
2540 kShadowTLSAlignment, "_msret");
2541 setShadow(&I, RetvalShadow);
2542 if (MS.TrackOrigins)
2543 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
2546 bool isAMustTailRetVal(Value *RetVal) {
2547 if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
2548 RetVal = I->getOperand(0);
2550 if (auto *I = dyn_cast<CallInst>(RetVal)) {
2551 return I->isMustTailCall();
2556 void visitReturnInst(ReturnInst &I) {
2557 IRBuilder<> IRB(&I);
2558 Value *RetVal = I.getReturnValue();
2559 if (!RetVal) return;
2560 // Don't emit the epilogue for musttail call returns.
2561 if (isAMustTailRetVal(RetVal)) return;
2562 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
2563 if (CheckReturnValue) {
2564 insertShadowCheck(RetVal, &I);
2565 Value *Shadow = getCleanShadow(RetVal);
2566 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2568 Value *Shadow = getShadow(RetVal);
2569 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
2570 // FIXME: make it conditional if ClStoreCleanOrigin==0
2571 if (MS.TrackOrigins)
2572 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
2576 void visitPHINode(PHINode &I) {
2577 IRBuilder<> IRB(&I);
2578 if (!PropagateShadow) {
2579 setShadow(&I, getCleanShadow(&I));
2580 setOrigin(&I, getCleanOrigin());
2584 ShadowPHINodes.push_back(&I);
2585 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
2587 if (MS.TrackOrigins)
2588 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
2592 void visitAllocaInst(AllocaInst &I) {
2593 setShadow(&I, getCleanShadow(&I));
2594 setOrigin(&I, getCleanOrigin());
2595 IRBuilder<> IRB(I.getNextNode());
2596 const DataLayout &DL = F.getParent()->getDataLayout();
2597 uint64_t Size = DL.getTypeAllocSize(I.getAllocatedType());
2598 if (PoisonStack && ClPoisonStackWithCall) {
2599 IRB.CreateCall(MS.MsanPoisonStackFn,
2600 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2601 ConstantInt::get(MS.IntptrTy, Size)});
2603 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
2604 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
2605 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
2608 if (PoisonStack && MS.TrackOrigins) {
2609 SmallString<2048> StackDescriptionStorage;
2610 raw_svector_ostream StackDescription(StackDescriptionStorage);
2611 // We create a string with a description of the stack allocation and
2612 // pass it into __msan_set_alloca_origin.
2613 // It will be printed by the run-time if stack-originated UMR is found.
2614 // The first 4 bytes of the string are set to '----' and will be replaced
2615 // by __msan_va_arg_overflow_size_tls at the first call.
2616 StackDescription << "----" << I.getName() << "@" << F.getName();
2618 createPrivateNonConstGlobalForString(*F.getParent(),
2619 StackDescription.str());
2621 IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
2622 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2623 ConstantInt::get(MS.IntptrTy, Size),
2624 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2625 IRB.CreatePointerCast(&F, MS.IntptrTy)});
2629 void visitSelectInst(SelectInst& I) {
2630 IRBuilder<> IRB(&I);
2631 // a = select b, c, d
2632 Value *B = I.getCondition();
2633 Value *C = I.getTrueValue();
2634 Value *D = I.getFalseValue();
2635 Value *Sb = getShadow(B);
2636 Value *Sc = getShadow(C);
2637 Value *Sd = getShadow(D);
2639 // Result shadow if condition shadow is 0.
2640 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
2642 if (I.getType()->isAggregateType()) {
2643 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2644 // an extra "select". This results in much more compact IR.
2645 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2646 Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
2648 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
2649 // If Sb (condition is poisoned), look for bits in c and d that are equal
2650 // and both unpoisoned.
2651 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
2653 // Cast arguments to shadow-compatible type.
2654 C = CreateAppToShadowCast(IRB, C);
2655 D = CreateAppToShadowCast(IRB, D);
2657 // Result shadow if condition shadow is 1.
2658 Sa1 = IRB.CreateOr(IRB.CreateXor(C, D), IRB.CreateOr(Sc, Sd));
2660 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
2662 if (MS.TrackOrigins) {
2663 // Origins are always i32, so any vector conditions must be flattened.
2664 // FIXME: consider tracking vector origins for app vectors?
2665 if (B->getType()->isVectorTy()) {
2666 Type *FlatTy = getShadowTyNoVec(B->getType());
2667 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
2668 ConstantInt::getNullValue(FlatTy));
2669 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
2670 ConstantInt::getNullValue(FlatTy));
2672 // a = select b, c, d
2673 // Oa = Sb ? Ob : (b ? Oc : Od)
2675 &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
2676 IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
2677 getOrigin(I.getFalseValue()))));
2681 void visitLandingPadInst(LandingPadInst &I) {
2683 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2684 setShadow(&I, getCleanShadow(&I));
2685 setOrigin(&I, getCleanOrigin());
2688 void visitCatchSwitchInst(CatchSwitchInst &I) {
2689 setShadow(&I, getCleanShadow(&I));
2690 setOrigin(&I, getCleanOrigin());
2693 void visitFuncletPadInst(FuncletPadInst &I) {
2694 setShadow(&I, getCleanShadow(&I));
2695 setOrigin(&I, getCleanOrigin());
2698 void visitGetElementPtrInst(GetElementPtrInst &I) {
2702 void visitExtractValueInst(ExtractValueInst &I) {
2703 IRBuilder<> IRB(&I);
2704 Value *Agg = I.getAggregateOperand();
2705 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
2706 Value *AggShadow = getShadow(Agg);
2707 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2708 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2709 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
2710 setShadow(&I, ResShadow);
2711 setOriginForNaryOp(I);
2714 void visitInsertValueInst(InsertValueInst &I) {
2715 IRBuilder<> IRB(&I);
2716 DEBUG(dbgs() << "InsertValue: " << I << "\n");
2717 Value *AggShadow = getShadow(I.getAggregateOperand());
2718 Value *InsShadow = getShadow(I.getInsertedValueOperand());
2719 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2720 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
2721 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2722 DEBUG(dbgs() << " Res: " << *Res << "\n");
2724 setOriginForNaryOp(I);
2727 void dumpInst(Instruction &I) {
2728 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2729 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2731 errs() << "ZZZ " << I.getOpcodeName() << "\n";
2733 errs() << "QQQ " << I << "\n";
2736 void visitResumeInst(ResumeInst &I) {
2737 DEBUG(dbgs() << "Resume: " << I << "\n");
2738 // Nothing to do here.
2741 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
2742 DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
2743 // Nothing to do here.
2746 void visitCatchReturnInst(CatchReturnInst &CRI) {
2747 DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
2748 // Nothing to do here.
2751 void visitInstruction(Instruction &I) {
2752 // Everything else: stop propagating and check for poisoned shadow.
2753 if (ClDumpStrictInstructions)
2755 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
2756 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2757 insertShadowCheck(I.getOperand(i), &I);
2758 setShadow(&I, getCleanShadow(&I));
2759 setOrigin(&I, getCleanOrigin());
2763 /// \brief AMD64-specific implementation of VarArgHelper.
2764 struct VarArgAMD64Helper : public VarArgHelper {
2765 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2766 // See a comment in visitCallSite for more details.
2767 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
2768 static const unsigned AMD64FpEndOffset = 176;
2771 MemorySanitizer &MS;
2772 MemorySanitizerVisitor &MSV;
2773 Value *VAArgTLSCopy;
2774 Value *VAArgOverflowSize;
2776 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2778 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2779 MemorySanitizerVisitor &MSV)
2780 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
2781 VAArgOverflowSize(nullptr) {}
2783 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2785 ArgKind classifyArgument(Value* arg) {
2786 // A very rough approximation of X86_64 argument classification rules.
2787 Type *T = arg->getType();
2788 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2789 return AK_FloatingPoint;
2790 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2791 return AK_GeneralPurpose;
2792 if (T->isPointerTy())
2793 return AK_GeneralPurpose;
2797 // For VarArg functions, store the argument shadow in an ABI-specific format
2798 // that corresponds to va_list layout.
2799 // We do this because Clang lowers va_arg in the frontend, and this pass
2800 // only sees the low level code that deals with va_list internals.
2801 // A much easier alternative (provided that Clang emits va_arg instructions)
2802 // would have been to associate each live instance of va_list with a copy of
2803 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2805 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2806 unsigned GpOffset = 0;
2807 unsigned FpOffset = AMD64GpEndOffset;
2808 unsigned OverflowOffset = AMD64FpEndOffset;
2809 const DataLayout &DL = F.getParent()->getDataLayout();
2810 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2811 ArgIt != End; ++ArgIt) {
2813 unsigned ArgNo = CS.getArgumentNo(ArgIt);
2814 bool IsByVal = CS.paramHasAttr(ArgNo + 1, Attribute::ByVal);
2816 // ByVal arguments always go to the overflow area.
2817 assert(A->getType()->isPointerTy());
2818 Type *RealTy = A->getType()->getPointerElementType();
2819 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
2820 Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
2821 OverflowOffset += RoundUpToAlignment(ArgSize, 8);
2822 IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
2823 ArgSize, kShadowTLSAlignment);
2825 ArgKind AK = classifyArgument(A);
2826 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2828 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2832 case AK_GeneralPurpose:
2833 Base = getShadowPtrForVAArgument(A->getType(), IRB, GpOffset);
2836 case AK_FloatingPoint:
2837 Base = getShadowPtrForVAArgument(A->getType(), IRB, FpOffset);
2841 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
2842 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
2843 OverflowOffset += RoundUpToAlignment(ArgSize, 8);
2845 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2848 Constant *OverflowSize =
2849 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2850 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2853 /// \brief Compute the shadow address for a given va_arg.
2854 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
2856 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2857 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2858 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
2862 void visitVAStartInst(VAStartInst &I) override {
2863 if (F.getCallingConv() == CallingConv::X86_64_Win64)
2865 IRBuilder<> IRB(&I);
2866 VAStartInstrumentationList.push_back(&I);
2867 Value *VAListTag = I.getArgOperand(0);
2868 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2870 // Unpoison the whole __va_list_tag.
2871 // FIXME: magic ABI constants.
2872 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2873 /* size */24, /* alignment */8, false);
2876 void visitVACopyInst(VACopyInst &I) override {
2877 if (F.getCallingConv() == CallingConv::X86_64_Win64)
2879 IRBuilder<> IRB(&I);
2880 Value *VAListTag = I.getArgOperand(0);
2881 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2883 // Unpoison the whole __va_list_tag.
2884 // FIXME: magic ABI constants.
2885 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2886 /* size */24, /* alignment */8, false);
2889 void finalizeInstrumentation() override {
2890 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
2891 "finalizeInstrumentation called twice");
2892 if (!VAStartInstrumentationList.empty()) {
2893 // If there is a va_start in this function, make a backup copy of
2894 // va_arg_tls somewhere in the function entry block.
2895 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2896 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2898 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2900 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2901 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2904 // Instrument va_start.
2905 // Copy va_list shadow from the backup copy of the TLS contents.
2906 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2907 CallInst *OrigInst = VAStartInstrumentationList[i];
2908 IRBuilder<> IRB(OrigInst->getNextNode());
2909 Value *VAListTag = OrigInst->getArgOperand(0);
2911 Value *RegSaveAreaPtrPtr =
2913 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2914 ConstantInt::get(MS.IntptrTy, 16)),
2915 Type::getInt64PtrTy(*MS.C));
2916 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
2917 Value *RegSaveAreaShadowPtr =
2918 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
2919 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
2920 AMD64FpEndOffset, 16);
2922 Value *OverflowArgAreaPtrPtr =
2924 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2925 ConstantInt::get(MS.IntptrTy, 8)),
2926 Type::getInt64PtrTy(*MS.C));
2927 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
2928 Value *OverflowArgAreaShadowPtr =
2929 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
2930 Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
2932 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
2937 /// \brief MIPS64-specific implementation of VarArgHelper.
2938 struct VarArgMIPS64Helper : public VarArgHelper {
2940 MemorySanitizer &MS;
2941 MemorySanitizerVisitor &MSV;
2942 Value *VAArgTLSCopy;
2945 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2947 VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
2948 MemorySanitizerVisitor &MSV)
2949 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
2950 VAArgSize(nullptr) {}
2952 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
2953 unsigned VAArgOffset = 0;
2954 const DataLayout &DL = F.getParent()->getDataLayout();
2955 for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end();
2956 ArgIt != End; ++ArgIt) {
2959 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
2960 #if defined(__MIPSEB__) || defined(MIPSEB)
2961 // Adjusting the shadow for argument with size < 8 to match the placement
2962 // of bits in big endian system
2964 VAArgOffset += (8 - ArgSize);
2966 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset);
2967 VAArgOffset += ArgSize;
2968 VAArgOffset = RoundUpToAlignment(VAArgOffset, 8);
2969 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2972 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
2973 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
2974 // a new class member i.e. it is the total size of all VarArgs.
2975 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
2978 /// \brief Compute the shadow address for a given va_arg.
2979 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
2981 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2982 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2983 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
2987 void visitVAStartInst(VAStartInst &I) override {
2988 IRBuilder<> IRB(&I);
2989 VAStartInstrumentationList.push_back(&I);
2990 Value *VAListTag = I.getArgOperand(0);
2991 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2992 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2993 /* size */8, /* alignment */8, false);
2996 void visitVACopyInst(VACopyInst &I) override {
2997 IRBuilder<> IRB(&I);
2998 Value *VAListTag = I.getArgOperand(0);
2999 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3000 // Unpoison the whole __va_list_tag.
3001 // FIXME: magic ABI constants.
3002 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3003 /* size */8, /* alignment */8, false);
3006 void finalizeInstrumentation() override {
3007 assert(!VAArgSize && !VAArgTLSCopy &&
3008 "finalizeInstrumentation called twice");
3009 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3010 VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3011 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
3014 if (!VAStartInstrumentationList.empty()) {
3015 // If there is a va_start in this function, make a backup copy of
3016 // va_arg_tls somewhere in the function entry block.
3017 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3018 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3021 // Instrument va_start.
3022 // Copy va_list shadow from the backup copy of the TLS contents.
3023 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3024 CallInst *OrigInst = VAStartInstrumentationList[i];
3025 IRBuilder<> IRB(OrigInst->getNextNode());
3026 Value *VAListTag = OrigInst->getArgOperand(0);
3027 Value *RegSaveAreaPtrPtr =
3028 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3029 Type::getInt64PtrTy(*MS.C));
3030 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
3031 Value *RegSaveAreaShadowPtr =
3032 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3033 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
3039 /// \brief AArch64-specific implementation of VarArgHelper.
3040 struct VarArgAArch64Helper : public VarArgHelper {
3041 static const unsigned kAArch64GrArgSize = 56;
3042 static const unsigned kAArch64VrArgSize = 128;
3044 static const unsigned AArch64GrBegOffset = 0;
3045 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
3046 // Make VR space aligned to 16 bytes.
3047 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset + 8;
3048 static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
3049 + kAArch64VrArgSize;
3050 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
3053 MemorySanitizer &MS;
3054 MemorySanitizerVisitor &MSV;
3055 Value *VAArgTLSCopy;
3056 Value *VAArgOverflowSize;
3058 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3060 VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
3061 MemorySanitizerVisitor &MSV)
3062 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
3063 VAArgOverflowSize(nullptr) {}
3065 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3067 ArgKind classifyArgument(Value* arg) {
3068 Type *T = arg->getType();
3069 if (T->isFPOrFPVectorTy())
3070 return AK_FloatingPoint;
3071 if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3072 || (T->isPointerTy()))
3073 return AK_GeneralPurpose;
3077 // The instrumentation stores the argument shadow in a non ABI-specific
3078 // format because it does not know which argument is named (since Clang,
3079 // like x86_64 case, lowers the va_args in the frontend and this pass only
3080 // sees the low level code that deals with va_list internals).
3081 // The first seven GR registers are saved in the first 56 bytes of the
3082 // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
3083 // the remaining arguments.
3084 // Using constant offset within the va_arg TLS array allows fast copy
3085 // in the finalize instrumentation.
3086 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3087 unsigned GrOffset = AArch64GrBegOffset;
3088 unsigned VrOffset = AArch64VrBegOffset;
3089 unsigned OverflowOffset = AArch64VAEndOffset;
3091 const DataLayout &DL = F.getParent()->getDataLayout();
3092 for (CallSite::arg_iterator ArgIt = CS.arg_begin() + 1, End = CS.arg_end();
3093 ArgIt != End; ++ArgIt) {
3095 ArgKind AK = classifyArgument(A);
3096 if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
3098 if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
3102 case AK_GeneralPurpose:
3103 Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset);
3106 case AK_FloatingPoint:
3107 Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset);
3111 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3112 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3113 OverflowOffset += RoundUpToAlignment(ArgSize, 8);
3116 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
3118 Constant *OverflowSize =
3119 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
3120 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3123 /// Compute the shadow address for a given va_arg.
3124 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3126 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3127 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3128 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3132 void visitVAStartInst(VAStartInst &I) override {
3133 IRBuilder<> IRB(&I);
3134 VAStartInstrumentationList.push_back(&I);
3135 Value *VAListTag = I.getArgOperand(0);
3136 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3137 // Unpoison the whole __va_list_tag.
3138 // FIXME: magic ABI constants (size of va_list).
3139 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3140 /* size */32, /* alignment */8, false);
3143 void visitVACopyInst(VACopyInst &I) override {
3144 IRBuilder<> IRB(&I);
3145 Value *VAListTag = I.getArgOperand(0);
3146 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
3147 // Unpoison the whole __va_list_tag.
3148 // FIXME: magic ABI constants (size of va_list).
3149 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3150 /* size */32, /* alignment */8, false);
3153 // Retrieve a va_list field of 'void*' size.
3154 Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3155 Value *SaveAreaPtrPtr =
3157 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3158 ConstantInt::get(MS.IntptrTy, offset)),
3159 Type::getInt64PtrTy(*MS.C));
3160 return IRB.CreateLoad(SaveAreaPtrPtr);
3163 // Retrieve a va_list field of 'int' size.
3164 Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
3165 Value *SaveAreaPtr =
3167 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
3168 ConstantInt::get(MS.IntptrTy, offset)),
3169 Type::getInt32PtrTy(*MS.C));
3170 Value *SaveArea32 = IRB.CreateLoad(SaveAreaPtr);
3171 return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
3174 void finalizeInstrumentation() override {
3175 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
3176 "finalizeInstrumentation called twice");
3177 if (!VAStartInstrumentationList.empty()) {
3178 // If there is a va_start in this function, make a backup copy of
3179 // va_arg_tls somewhere in the function entry block.
3180 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
3181 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
3183 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
3185 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3186 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
3189 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
3190 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
3192 // Instrument va_start, copy va_list shadow from the backup copy of
3193 // the TLS contents.
3194 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
3195 CallInst *OrigInst = VAStartInstrumentationList[i];
3196 IRBuilder<> IRB(OrigInst->getNextNode());
3198 Value *VAListTag = OrigInst->getArgOperand(0);
3200 // The variadic ABI for AArch64 creates two areas to save the incoming
3201 // argument registers (one for 64-bit general register xn-x7 and another
3202 // for 128-bit FP/SIMD vn-v7).
3203 // We need then to propagate the shadow arguments on both regions
3204 // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
3205 // The remaning arguments are saved on shadow for 'va::stack'.
3206 // One caveat is it requires only to propagate the non-named arguments,
3207 // however on the call site instrumentation 'all' the arguments are
3208 // saved. So to copy the shadow values from the va_arg TLS array
3209 // we need to adjust the offset for both GR and VR fields based on
3210 // the __{gr,vr}_offs value (since they are stores based on incoming
3211 // named arguments).
3213 // Read the stack pointer from the va_list.
3214 Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
3216 // Read both the __gr_top and __gr_off and add them up.
3217 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
3218 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
3220 Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
3222 // Read both the __vr_top and __vr_off and add them up.
3223 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
3224 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
3226 Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
3228 // It does not know how many named arguments is being used and, on the
3229 // callsite all the arguments were saved. Since __gr_off is defined as
3230 // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
3231 // argument by ignoring the bytes of shadow from named arguments.
3232 Value *GrRegSaveAreaShadowPtrOff =
3233 IRB.CreateAdd(GrArgSize, GrOffSaveArea);
3235 Value *GrRegSaveAreaShadowPtr =
3236 MSV.getShadowPtr(GrRegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3238 Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3239 GrRegSaveAreaShadowPtrOff);
3240 Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
3242 IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, GrSrcPtr, GrCopySize, 8);
3244 // Again, but for FP/SIMD values.
3245 Value *VrRegSaveAreaShadowPtrOff =
3246 IRB.CreateAdd(VrArgSize, VrOffSaveArea);
3248 Value *VrRegSaveAreaShadowPtr =
3249 MSV.getShadowPtr(VrRegSaveAreaPtr, IRB.getInt8Ty(), IRB);
3251 Value *VrSrcPtr = IRB.CreateInBoundsGEP(
3253 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3254 IRB.getInt32(AArch64VrBegOffset)),
3255 VrRegSaveAreaShadowPtrOff);
3256 Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
3258 IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, VrSrcPtr, VrCopySize, 8);
3260 // And finally for remaining arguments.
3261 Value *StackSaveAreaShadowPtr =
3262 MSV.getShadowPtr(StackSaveAreaPtr, IRB.getInt8Ty(), IRB);
3264 Value *StackSrcPtr =
3265 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
3266 IRB.getInt32(AArch64VAEndOffset));
3268 IRB.CreateMemCpy(StackSaveAreaShadowPtr, StackSrcPtr,
3269 VAArgOverflowSize, 16);
3274 /// \brief A no-op implementation of VarArgHelper.
3275 struct VarArgNoOpHelper : public VarArgHelper {
3276 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
3277 MemorySanitizerVisitor &MSV) {}
3279 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
3281 void visitVAStartInst(VAStartInst &I) override {}
3283 void visitVACopyInst(VACopyInst &I) override {}
3285 void finalizeInstrumentation() override {}
3288 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
3289 MemorySanitizerVisitor &Visitor) {
3290 // VarArg handling is only implemented on AMD64. False positives are possible
3291 // on other platforms.
3292 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
3293 if (TargetTriple.getArch() == llvm::Triple::x86_64)
3294 return new VarArgAMD64Helper(Func, Msan, Visitor);
3295 else if (TargetTriple.getArch() == llvm::Triple::mips64 ||
3296 TargetTriple.getArch() == llvm::Triple::mips64el)
3297 return new VarArgMIPS64Helper(Func, Msan, Visitor);
3298 else if (TargetTriple.getArch() == llvm::Triple::aarch64)
3299 return new VarArgAArch64Helper(Func, Msan, Visitor);
3301 return new VarArgNoOpHelper(Func, Msan, Visitor);
3304 } // anonymous namespace
3306 bool MemorySanitizer::runOnFunction(Function &F) {
3307 if (&F == MsanCtorFunction)
3309 MemorySanitizerVisitor Visitor(F, *this);
3311 // Clear out readonly/readnone attributes.
3313 B.addAttribute(Attribute::ReadOnly)
3314 .addAttribute(Attribute::ReadNone);
3315 F.removeAttributes(AttributeSet::FunctionIndex,
3316 AttributeSet::get(F.getContext(),
3317 AttributeSet::FunctionIndex, B));
3319 return Visitor.runOnFunction();