1 //===-- MemorySanitizer.cpp - detector of uninitialized reads -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
13 /// Status: early prototype.
15 /// The algorithm of the tool is similar to Memcheck
16 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
17 /// byte of the application memory, poison the shadow of the malloc-ed
18 /// or alloca-ed memory, load the shadow bits on every memory read,
19 /// propagate the shadow bits through some of the arithmetic
20 /// instruction (including MOV), store the shadow bits on every memory
21 /// write, report a bug on some other instructions (e.g. JMP) if the
22 /// associated shadow is poisoned.
24 /// But there are differences too. The first and the major one:
25 /// compiler instrumentation instead of binary instrumentation. This
26 /// gives us much better register allocation, possible compiler
27 /// optimizations and a fast start-up. But this brings the major issue
28 /// as well: msan needs to see all program events, including system
29 /// calls and reads/writes in system libraries, so we either need to
30 /// compile *everything* with msan or use a binary translation
31 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
32 /// Another difference from Memcheck is that we use 8 shadow bits per
33 /// byte of application memory and use a direct shadow mapping. This
34 /// greatly simplifies the instrumentation code and avoids races on
35 /// shadow updates (Memcheck is single-threaded so races are not a
36 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
37 /// path storage that uses 8 bits per byte).
39 /// The default value of shadow is 0, which means "clean" (not poisoned).
41 /// Every module initializer should call __msan_init to ensure that the
42 /// shadow memory is ready. On error, __msan_warning is called. Since
43 /// parameters and return values may be passed via registers, we have a
44 /// specialized thread-local shadow for return values
45 /// (__msan_retval_tls) and parameters (__msan_param_tls).
49 /// MemorySanitizer can track origins (allocation points) of all uninitialized
50 /// values. This behavior is controlled with a flag (msan-track-origins) and is
51 /// disabled by default.
53 /// Origins are 4-byte values created and interpreted by the runtime library.
54 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
55 /// of application memory. Propagation of origins is basically a bunch of
56 /// "select" instructions that pick the origin of a dirty argument, if an
57 /// instruction has one.
59 /// Every 4 aligned, consecutive bytes of application memory have one origin
60 /// value associated with them. If these bytes contain uninitialized data
61 /// coming from 2 different allocations, the last store wins. Because of this,
62 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
65 /// Origins are meaningless for fully initialized values, so MemorySanitizer
66 /// avoids storing origin to memory when a fully initialized value is stored.
67 /// This way it avoids needless overwritting origin of the 4-byte region on
68 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
72 /// Ideally, every atomic store of application value should update the
73 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
74 /// of two disjoint locations can not be done without severe slowdown.
76 /// Therefore, we implement an approximation that may err on the safe side.
77 /// In this implementation, every atomically accessed location in the program
78 /// may only change from (partially) uninitialized to fully initialized, but
79 /// not the other way around. We load the shadow _after_ the application load,
80 /// and we store the shadow _before_ the app store. Also, we always store clean
81 /// shadow (if the application store is atomic). This way, if the store-load
82 /// pair constitutes a happens-before arc, shadow store and load are correctly
83 /// ordered such that the load will get either the value that was stored, or
84 /// some later value (which is always clean).
86 /// This does not work very well with Compare-And-Swap (CAS) and
87 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
88 /// must store the new shadow before the app operation, and load the shadow
89 /// after the app operation. Computers don't work this way. Current
90 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
91 /// value. It implements the store part as a simple atomic store by storing a
94 //===----------------------------------------------------------------------===//
96 #define DEBUG_TYPE "msan"
98 #include "llvm/Transforms/Instrumentation.h"
99 #include "llvm/ADT/DepthFirstIterator.h"
100 #include "llvm/ADT/SmallString.h"
101 #include "llvm/ADT/SmallVector.h"
102 #include "llvm/ADT/Triple.h"
103 #include "llvm/ADT/ValueMap.h"
104 #include "llvm/IR/DataLayout.h"
105 #include "llvm/IR/Function.h"
106 #include "llvm/IR/IRBuilder.h"
107 #include "llvm/IR/InlineAsm.h"
108 #include "llvm/IR/IntrinsicInst.h"
109 #include "llvm/IR/LLVMContext.h"
110 #include "llvm/IR/MDBuilder.h"
111 #include "llvm/IR/Module.h"
112 #include "llvm/IR/Type.h"
113 #include "llvm/InstVisitor.h"
114 #include "llvm/Support/CommandLine.h"
115 #include "llvm/Support/Compiler.h"
116 #include "llvm/Support/Debug.h"
117 #include "llvm/Support/raw_ostream.h"
118 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
119 #include "llvm/Transforms/Utils/Local.h"
120 #include "llvm/Transforms/Utils/ModuleUtils.h"
121 #include "llvm/Transforms/Utils/SpecialCaseList.h"
123 using namespace llvm;
125 static const uint64_t kShadowMask32 = 1ULL << 31;
126 static const uint64_t kShadowMask64 = 1ULL << 46;
127 static const uint64_t kOriginOffset32 = 1ULL << 30;
128 static const uint64_t kOriginOffset64 = 1ULL << 45;
129 static const unsigned kMinOriginAlignment = 4;
130 static const unsigned kShadowTLSAlignment = 8;
132 /// \brief Track origins of uninitialized values.
134 /// Adds a section to MemorySanitizer report that points to the allocation
135 /// (stack or heap) the uninitialized bits came from originally.
136 static cl::opt<bool> ClTrackOrigins("msan-track-origins",
137 cl::desc("Track origins (allocation sites) of poisoned memory"),
138 cl::Hidden, cl::init(false));
139 static cl::opt<bool> ClKeepGoing("msan-keep-going",
140 cl::desc("keep going after reporting a UMR"),
141 cl::Hidden, cl::init(false));
142 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
143 cl::desc("poison uninitialized stack variables"),
144 cl::Hidden, cl::init(true));
145 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
146 cl::desc("poison uninitialized stack variables with a call"),
147 cl::Hidden, cl::init(false));
148 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
149 cl::desc("poison uninitialized stack variables with the given patter"),
150 cl::Hidden, cl::init(0xff));
151 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
152 cl::desc("poison undef temps"),
153 cl::Hidden, cl::init(true));
155 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
156 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
157 cl::Hidden, cl::init(true));
159 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
160 cl::desc("exact handling of relational integer ICmp"),
161 cl::Hidden, cl::init(false));
163 static cl::opt<bool> ClStoreCleanOrigin("msan-store-clean-origin",
164 cl::desc("store origin for clean (fully initialized) values"),
165 cl::Hidden, cl::init(false));
167 // This flag controls whether we check the shadow of the address
168 // operand of load or store. Such bugs are very rare, since load from
169 // a garbage address typically results in SEGV, but still happen
170 // (e.g. only lower bits of address are garbage, or the access happens
171 // early at program startup where malloc-ed memory is more likely to
172 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
173 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
174 cl::desc("report accesses through a pointer which has poisoned shadow"),
175 cl::Hidden, cl::init(true));
177 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
178 cl::desc("print out instructions with default strict semantics"),
179 cl::Hidden, cl::init(false));
181 static cl::opt<std::string> ClBlacklistFile("msan-blacklist",
182 cl::desc("File containing the list of functions where MemorySanitizer "
183 "should not report bugs"), cl::Hidden);
185 // Experimental. Wraps all indirect calls in the instrumented code with
186 // a call to the given function. This is needed to assist the dynamic
187 // helper tool (MSanDR) to regain control on transition between instrumented and
188 // non-instrumented code.
189 static cl::opt<std::string> ClWrapIndirectCalls("msan-wrap-indirect-calls",
190 cl::desc("Wrap indirect calls with a given function"),
195 /// \brief An instrumentation pass implementing detection of uninitialized
198 /// MemorySanitizer: instrument the code in module to find
199 /// uninitialized reads.
200 class MemorySanitizer : public FunctionPass {
202 MemorySanitizer(bool TrackOrigins = false,
203 StringRef BlacklistFile = StringRef())
205 TrackOrigins(TrackOrigins || ClTrackOrigins),
208 BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
209 WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
210 const char *getPassName() const { return "MemorySanitizer"; }
211 bool runOnFunction(Function &F);
212 bool doInitialization(Module &M);
213 static char ID; // Pass identification, replacement for typeid.
216 void initializeCallbacks(Module &M);
218 /// \brief Track origins (allocation points) of uninitialized values.
225 /// \brief Thread-local shadow storage for function parameters.
226 GlobalVariable *ParamTLS;
227 /// \brief Thread-local origin storage for function parameters.
228 GlobalVariable *ParamOriginTLS;
229 /// \brief Thread-local shadow storage for function return value.
230 GlobalVariable *RetvalTLS;
231 /// \brief Thread-local origin storage for function return value.
232 GlobalVariable *RetvalOriginTLS;
233 /// \brief Thread-local shadow storage for in-register va_arg function
234 /// parameters (x86_64-specific).
235 GlobalVariable *VAArgTLS;
236 /// \brief Thread-local shadow storage for va_arg overflow area
237 /// (x86_64-specific).
238 GlobalVariable *VAArgOverflowSizeTLS;
239 /// \brief Thread-local space used to pass origin value to the UMR reporting
241 GlobalVariable *OriginTLS;
243 /// \brief The run-time callback to print a warning.
245 /// \brief Run-time helper that copies origin info for a memory range.
246 Value *MsanCopyOriginFn;
247 /// \brief Run-time helper that generates a new origin value for a stack
249 Value *MsanSetAllocaOrigin4Fn;
250 /// \brief Run-time helper that poisons stack on function entry.
251 Value *MsanPoisonStackFn;
252 /// \brief MSan runtime replacements for memmove, memcpy and memset.
253 Value *MemmoveFn, *MemcpyFn, *MemsetFn;
255 /// \brief Address mask used in application-to-shadow address calculation.
256 /// ShadowAddr is computed as ApplicationAddr & ~ShadowMask.
258 /// \brief Offset of the origin shadow from the "normal" shadow.
259 /// OriginAddr is computed as (ShadowAddr + OriginOffset) & ~3ULL
260 uint64_t OriginOffset;
261 /// \brief Branch weights for error reporting.
262 MDNode *ColdCallWeights;
263 /// \brief Branch weights for origin store.
264 MDNode *OriginStoreWeights;
265 /// \brief Path to blacklist file.
266 SmallString<64> BlacklistFile;
267 /// \brief The blacklist.
268 OwningPtr<SpecialCaseList> BL;
269 /// \brief An empty volatile inline asm that prevents callback merge.
272 bool WrapIndirectCalls;
273 /// \brief Run-time wrapper for indirect calls.
274 Value *IndirectCallWrapperFn;
275 // Argument and return type of IndirectCallWrapperFn: void (*f)(void).
276 Type *AnyFunctionPtrTy;
278 friend struct MemorySanitizerVisitor;
279 friend struct VarArgAMD64Helper;
283 char MemorySanitizer::ID = 0;
284 INITIALIZE_PASS(MemorySanitizer, "msan",
285 "MemorySanitizer: detects uninitialized reads.",
288 FunctionPass *llvm::createMemorySanitizerPass(bool TrackOrigins,
289 StringRef BlacklistFile) {
290 return new MemorySanitizer(TrackOrigins, BlacklistFile);
293 /// \brief Create a non-const global initialized with the given string.
295 /// Creates a writable global for Str so that we can pass it to the
296 /// run-time lib. Runtime uses first 4 bytes of the string to store the
297 /// frame ID, so the string needs to be mutable.
298 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
300 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
301 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
302 GlobalValue::PrivateLinkage, StrConst, "");
306 /// \brief Insert extern declaration of runtime-provided functions and globals.
307 void MemorySanitizer::initializeCallbacks(Module &M) {
308 // Only do this once.
313 // Create the callback.
314 // FIXME: this function should have "Cold" calling conv,
315 // which is not yet implemented.
316 StringRef WarningFnName = ClKeepGoing ? "__msan_warning"
317 : "__msan_warning_noreturn";
318 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy(), NULL);
320 MsanCopyOriginFn = M.getOrInsertFunction(
321 "__msan_copy_origin", IRB.getVoidTy(), IRB.getInt8PtrTy(),
322 IRB.getInt8PtrTy(), IntptrTy, NULL);
323 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
324 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
325 IRB.getInt8PtrTy(), IntptrTy, NULL);
326 MsanPoisonStackFn = M.getOrInsertFunction(
327 "__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy, NULL);
328 MemmoveFn = M.getOrInsertFunction(
329 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
330 IRB.getInt8PtrTy(), IntptrTy, NULL);
331 MemcpyFn = M.getOrInsertFunction(
332 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
334 MemsetFn = M.getOrInsertFunction(
335 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
339 RetvalTLS = new GlobalVariable(
340 M, ArrayType::get(IRB.getInt64Ty(), 8), false,
341 GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
342 GlobalVariable::InitialExecTLSModel);
343 RetvalOriginTLS = new GlobalVariable(
344 M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
345 "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
347 ParamTLS = new GlobalVariable(
348 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
349 GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
350 GlobalVariable::InitialExecTLSModel);
351 ParamOriginTLS = new GlobalVariable(
352 M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
353 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
355 VAArgTLS = new GlobalVariable(
356 M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
357 GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
358 GlobalVariable::InitialExecTLSModel);
359 VAArgOverflowSizeTLS = new GlobalVariable(
360 M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
361 "__msan_va_arg_overflow_size_tls", 0,
362 GlobalVariable::InitialExecTLSModel);
363 OriginTLS = new GlobalVariable(
364 M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
365 "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
367 // We insert an empty inline asm after __msan_report* to avoid callback merge.
368 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
369 StringRef(""), StringRef(""),
370 /*hasSideEffects=*/true);
372 if (WrapIndirectCalls) {
374 PointerType::getUnqual(FunctionType::get(IRB.getVoidTy(), false));
375 IndirectCallWrapperFn = M.getOrInsertFunction(
376 ClWrapIndirectCalls, AnyFunctionPtrTy, AnyFunctionPtrTy, NULL);
380 /// \brief Module-level initialization.
382 /// inserts a call to __msan_init to the module's constructor list.
383 bool MemorySanitizer::doInitialization(Module &M) {
384 TD = getAnalysisIfAvailable<DataLayout>();
387 BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
388 C = &(M.getContext());
389 unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
392 ShadowMask = kShadowMask64;
393 OriginOffset = kOriginOffset64;
396 ShadowMask = kShadowMask32;
397 OriginOffset = kOriginOffset32;
400 report_fatal_error("unsupported pointer size");
405 IntptrTy = IRB.getIntPtrTy(TD);
406 OriginTy = IRB.getInt32Ty();
408 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
409 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
411 // Insert a call to __msan_init/__msan_track_origins into the module's CTORs.
412 appendToGlobalCtors(M, cast<Function>(M.getOrInsertFunction(
413 "__msan_init", IRB.getVoidTy(), NULL)), 0);
416 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
417 IRB.getInt32(TrackOrigins), "__msan_track_origins");
420 new GlobalVariable(M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
421 IRB.getInt32(ClKeepGoing), "__msan_keep_going");
428 /// \brief A helper class that handles instrumentation of VarArg
429 /// functions on a particular platform.
431 /// Implementations are expected to insert the instrumentation
432 /// necessary to propagate argument shadow through VarArg function
433 /// calls. Visit* methods are called during an InstVisitor pass over
434 /// the function, and should avoid creating new basic blocks. A new
435 /// instance of this class is created for each instrumented function.
436 struct VarArgHelper {
437 /// \brief Visit a CallSite.
438 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
440 /// \brief Visit a va_start call.
441 virtual void visitVAStartInst(VAStartInst &I) = 0;
443 /// \brief Visit a va_copy call.
444 virtual void visitVACopyInst(VACopyInst &I) = 0;
446 /// \brief Finalize function instrumentation.
448 /// This method is called after visiting all interesting (see above)
449 /// instructions in a function.
450 virtual void finalizeInstrumentation() = 0;
452 virtual ~VarArgHelper() {}
455 struct MemorySanitizerVisitor;
458 CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
459 MemorySanitizerVisitor &Visitor);
461 /// This class does all the work for a given function. Store and Load
462 /// instructions store and load corresponding shadow and origin
463 /// values. Most instructions propagate shadow from arguments to their
464 /// return values. Certain instructions (most importantly, BranchInst)
465 /// test their argument shadow and print reports (with a runtime call) if it's
467 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
470 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
471 ValueMap<Value*, Value*> ShadowMap, OriginMap;
472 OwningPtr<VarArgHelper> VAHelper;
474 // The following flags disable parts of MSan instrumentation based on
475 // blacklist contents and command-line options.
480 bool CheckReturnValue;
482 struct ShadowOriginAndInsertPoint {
485 Instruction *OrigIns;
486 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
487 : Shadow(S), Origin(O), OrigIns(I) { }
488 ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
490 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
491 SmallVector<Instruction*, 16> StoreList;
493 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS)
494 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)) {
495 bool SanitizeFunction = !MS.BL->isIn(F) && F.getAttributes().hasAttribute(
496 AttributeSet::FunctionIndex,
497 Attribute::SanitizeMemory);
498 InsertChecks = SanitizeFunction;
499 LoadShadow = SanitizeFunction;
500 PoisonStack = SanitizeFunction && ClPoisonStack;
501 PoisonUndef = SanitizeFunction && ClPoisonUndef;
502 // FIXME: Consider using SpecialCaseList to specify a list of functions that
503 // must always return fully initialized values. For now, we hardcode "main".
504 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
506 DEBUG(if (!InsertChecks)
507 dbgs() << "MemorySanitizer is not inserting checks into '"
508 << F.getName() << "'\n");
511 void materializeStores() {
512 for (size_t i = 0, n = StoreList.size(); i < n; i++) {
513 StoreInst& I = *dyn_cast<StoreInst>(StoreList[i]);
516 Value *Val = I.getValueOperand();
517 Value *Addr = I.getPointerOperand();
518 Value *Shadow = I.isAtomic() ? getCleanShadow(Val) : getShadow(Val);
519 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
522 IRB.CreateAlignedStore(Shadow, ShadowPtr, I.getAlignment());
523 DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
526 if (ClCheckAccessAddress)
527 insertShadowCheck(Addr, &I);
530 I.setOrdering(addReleaseOrdering(I.getOrdering()));
532 if (MS.TrackOrigins) {
533 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
534 if (ClStoreCleanOrigin || isa<StructType>(Shadow->getType())) {
535 IRB.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRB),
538 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
540 // TODO(eugenis): handle non-zero constant shadow by inserting an
541 // unconditional check (can not simply fail compilation as this could
542 // be in the dead code).
543 if (isa<Constant>(ConvertedShadow))
546 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
547 getCleanShadow(ConvertedShadow), "_mscmp");
548 Instruction *CheckTerm =
549 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp), false,
550 MS.OriginStoreWeights);
551 IRBuilder<> IRBNew(CheckTerm);
552 IRBNew.CreateAlignedStore(getOrigin(Val), getOriginPtr(Addr, IRBNew),
559 void materializeChecks() {
560 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
561 Value *Shadow = InstrumentationList[i].Shadow;
562 Instruction *OrigIns = InstrumentationList[i].OrigIns;
563 IRBuilder<> IRB(OrigIns);
564 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
565 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
566 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
567 // See the comment in materializeStores().
568 if (isa<Constant>(ConvertedShadow))
570 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
571 getCleanShadow(ConvertedShadow), "_mscmp");
572 Instruction *CheckTerm =
573 SplitBlockAndInsertIfThen(cast<Instruction>(Cmp),
574 /* Unreachable */ !ClKeepGoing,
577 IRB.SetInsertPoint(CheckTerm);
578 if (MS.TrackOrigins) {
579 Value *Origin = InstrumentationList[i].Origin;
580 IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
583 CallInst *Call = IRB.CreateCall(MS.WarningFn);
584 Call->setDebugLoc(OrigIns->getDebugLoc());
585 IRB.CreateCall(MS.EmptyAsm);
586 DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
588 DEBUG(dbgs() << "DONE:\n" << F);
591 /// \brief Add MemorySanitizer instrumentation to a function.
592 bool runOnFunction() {
593 MS.initializeCallbacks(*F.getParent());
594 if (!MS.TD) return false;
596 // In the presence of unreachable blocks, we may see Phi nodes with
597 // incoming nodes from such blocks. Since InstVisitor skips unreachable
598 // blocks, such nodes will not have any shadow value associated with them.
599 // It's easier to remove unreachable blocks than deal with missing shadow.
600 removeUnreachableBlocks(F);
602 // Iterate all BBs in depth-first order and create shadow instructions
603 // for all instructions (where applicable).
604 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
605 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
606 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
607 BasicBlock *BB = *DI;
611 // Finalize PHI nodes.
612 for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
613 PHINode *PN = ShadowPHINodes[i];
614 PHINode *PNS = cast<PHINode>(getShadow(PN));
615 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
616 size_t NumValues = PN->getNumIncomingValues();
617 for (size_t v = 0; v < NumValues; v++) {
618 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
620 PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
624 VAHelper->finalizeInstrumentation();
626 // Delayed instrumentation of StoreInst.
627 // This may add new checks to be inserted later.
630 // Insert shadow value checks.
636 /// \brief Compute the shadow type that corresponds to a given Value.
637 Type *getShadowTy(Value *V) {
638 return getShadowTy(V->getType());
641 /// \brief Compute the shadow type that corresponds to a given Type.
642 Type *getShadowTy(Type *OrigTy) {
643 if (!OrigTy->isSized()) {
646 // For integer type, shadow is the same as the original type.
647 // This may return weird-sized types like i1.
648 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
650 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
651 uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
652 return VectorType::get(IntegerType::get(*MS.C, EltSize),
653 VT->getNumElements());
655 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
656 SmallVector<Type*, 4> Elements;
657 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
658 Elements.push_back(getShadowTy(ST->getElementType(i)));
659 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
660 DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
663 uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
664 return IntegerType::get(*MS.C, TypeSize);
667 /// \brief Flatten a vector type.
668 Type *getShadowTyNoVec(Type *ty) {
669 if (VectorType *vt = dyn_cast<VectorType>(ty))
670 return IntegerType::get(*MS.C, vt->getBitWidth());
674 /// \brief Convert a shadow value to it's flattened variant.
675 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
676 Type *Ty = V->getType();
677 Type *NoVecTy = getShadowTyNoVec(Ty);
678 if (Ty == NoVecTy) return V;
679 return IRB.CreateBitCast(V, NoVecTy);
682 /// \brief Compute the shadow address that corresponds to a given application
685 /// Shadow = Addr & ~ShadowMask.
686 Value *getShadowPtr(Value *Addr, Type *ShadowTy,
689 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
690 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
691 return IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
694 /// \brief Compute the origin address that corresponds to a given application
697 /// OriginAddr = (ShadowAddr + OriginOffset) & ~3ULL
698 Value *getOriginPtr(Value *Addr, IRBuilder<> &IRB) {
700 IRB.CreateAnd(IRB.CreatePointerCast(Addr, MS.IntptrTy),
701 ConstantInt::get(MS.IntptrTy, ~MS.ShadowMask));
703 IRB.CreateAdd(ShadowLong,
704 ConstantInt::get(MS.IntptrTy, MS.OriginOffset));
706 IRB.CreateAnd(Add, ConstantInt::get(MS.IntptrTy, ~3ULL));
707 return IRB.CreateIntToPtr(SecondAnd, PointerType::get(IRB.getInt32Ty(), 0));
710 /// \brief Compute the shadow address for a given function argument.
712 /// Shadow = ParamTLS+ArgOffset.
713 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
715 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
716 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
717 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
721 /// \brief Compute the origin address for a given function argument.
722 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
724 if (!MS.TrackOrigins) return 0;
725 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
726 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
727 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
731 /// \brief Compute the shadow address for a retval.
732 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
733 Value *Base = IRB.CreatePointerCast(MS.RetvalTLS, MS.IntptrTy);
734 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
738 /// \brief Compute the origin address for a retval.
739 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
740 // We keep a single origin for the entire retval. Might be too optimistic.
741 return MS.RetvalOriginTLS;
744 /// \brief Set SV to be the shadow value for V.
745 void setShadow(Value *V, Value *SV) {
746 assert(!ShadowMap.count(V) && "Values may only have one shadow");
750 /// \brief Set Origin to be the origin value for V.
751 void setOrigin(Value *V, Value *Origin) {
752 if (!MS.TrackOrigins) return;
753 assert(!OriginMap.count(V) && "Values may only have one origin");
754 DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
755 OriginMap[V] = Origin;
758 /// \brief Create a clean shadow value for a given value.
760 /// Clean shadow (all zeroes) means all bits of the value are defined
762 Constant *getCleanShadow(Value *V) {
763 Type *ShadowTy = getShadowTy(V);
766 return Constant::getNullValue(ShadowTy);
769 /// \brief Create a dirty shadow of a given shadow type.
770 Constant *getPoisonedShadow(Type *ShadowTy) {
772 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
773 return Constant::getAllOnesValue(ShadowTy);
774 StructType *ST = cast<StructType>(ShadowTy);
775 SmallVector<Constant *, 4> Vals;
776 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
777 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
778 return ConstantStruct::get(ST, Vals);
781 /// \brief Create a dirty shadow for a given value.
782 Constant *getPoisonedShadow(Value *V) {
783 Type *ShadowTy = getShadowTy(V);
786 return getPoisonedShadow(ShadowTy);
789 /// \brief Create a clean (zero) origin.
790 Value *getCleanOrigin() {
791 return Constant::getNullValue(MS.OriginTy);
794 /// \brief Get the shadow value for a given Value.
796 /// This function either returns the value set earlier with setShadow,
797 /// or extracts if from ParamTLS (for function arguments).
798 Value *getShadow(Value *V) {
799 if (Instruction *I = dyn_cast<Instruction>(V)) {
800 // For instructions the shadow is already stored in the map.
801 Value *Shadow = ShadowMap[V];
803 DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
805 assert(Shadow && "No shadow for a value");
809 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
810 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
811 DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
815 if (Argument *A = dyn_cast<Argument>(V)) {
816 // For arguments we compute the shadow on demand and store it in the map.
817 Value **ShadowPtr = &ShadowMap[V];
820 Function *F = A->getParent();
821 IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI());
822 unsigned ArgOffset = 0;
823 for (Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
825 if (!AI->getType()->isSized()) {
826 DEBUG(dbgs() << "Arg is not sized\n");
829 unsigned Size = AI->hasByValAttr()
830 ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
831 : MS.TD->getTypeAllocSize(AI->getType());
833 Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
834 if (AI->hasByValAttr()) {
835 // ByVal pointer itself has clean shadow. We copy the actual
836 // argument shadow to the underlying memory.
837 // Figure out maximal valid memcpy alignment.
838 unsigned ArgAlign = AI->getParamAlignment();
840 Type *EltType = A->getType()->getPointerElementType();
841 ArgAlign = MS.TD->getABITypeAlignment(EltType);
843 unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
844 Value *Cpy = EntryIRB.CreateMemCpy(
845 getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
847 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
849 *ShadowPtr = getCleanShadow(V);
851 *ShadowPtr = EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment);
853 DEBUG(dbgs() << " ARG: " << *AI << " ==> " <<
854 **ShadowPtr << "\n");
855 if (MS.TrackOrigins) {
856 Value* OriginPtr = getOriginPtrForArgument(AI, EntryIRB, ArgOffset);
857 setOrigin(A, EntryIRB.CreateLoad(OriginPtr));
860 ArgOffset += DataLayout::RoundUpAlignment(Size, kShadowTLSAlignment);
862 assert(*ShadowPtr && "Could not find shadow for an argument");
865 // For everything else the shadow is zero.
866 return getCleanShadow(V);
869 /// \brief Get the shadow for i-th argument of the instruction I.
870 Value *getShadow(Instruction *I, int i) {
871 return getShadow(I->getOperand(i));
874 /// \brief Get the origin for a value.
875 Value *getOrigin(Value *V) {
876 if (!MS.TrackOrigins) return 0;
877 if (isa<Instruction>(V) || isa<Argument>(V)) {
878 Value *Origin = OriginMap[V];
880 DEBUG(dbgs() << "NO ORIGIN: " << *V << "\n");
881 Origin = getCleanOrigin();
885 return getCleanOrigin();
888 /// \brief Get the origin for i-th argument of the instruction I.
889 Value *getOrigin(Instruction *I, int i) {
890 return getOrigin(I->getOperand(i));
893 /// \brief Remember the place where a shadow check should be inserted.
895 /// This location will be later instrumented with a check that will print a
896 /// UMR warning in runtime if the shadow value is not 0.
897 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
899 if (!InsertChecks) return;
901 Type *ShadowTy = Shadow->getType();
902 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
903 "Can only insert checks for integer and vector shadow types");
905 InstrumentationList.push_back(
906 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
909 /// \brief Remember the place where a shadow check should be inserted.
911 /// This location will be later instrumented with a check that will print a
912 /// UMR warning in runtime if the value is not fully defined.
913 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
915 Instruction *Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
917 Instruction *Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
918 insertShadowCheck(Shadow, Origin, OrigIns);
921 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
931 return AcquireRelease;
932 case SequentiallyConsistent:
933 return SequentiallyConsistent;
935 llvm_unreachable("Unknown ordering");
938 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
948 return AcquireRelease;
949 case SequentiallyConsistent:
950 return SequentiallyConsistent;
952 llvm_unreachable("Unknown ordering");
955 // ------------------- Visitors.
957 /// \brief Instrument LoadInst
959 /// Loads the corresponding shadow and (optionally) origin.
960 /// Optionally, checks that the load address is fully defined.
961 void visitLoadInst(LoadInst &I) {
962 assert(I.getType()->isSized() && "Load type must have size");
963 IRBuilder<> IRB(I.getNextNode());
964 Type *ShadowTy = getShadowTy(&I);
965 Value *Addr = I.getPointerOperand();
967 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
969 IRB.CreateAlignedLoad(ShadowPtr, I.getAlignment(), "_msld"));
971 setShadow(&I, getCleanShadow(&I));
974 if (ClCheckAccessAddress)
975 insertShadowCheck(I.getPointerOperand(), &I);
978 I.setOrdering(addAcquireOrdering(I.getOrdering()));
980 if (MS.TrackOrigins) {
982 unsigned Alignment = std::max(kMinOriginAlignment, I.getAlignment());
984 IRB.CreateAlignedLoad(getOriginPtr(Addr, IRB), Alignment));
986 setOrigin(&I, getCleanOrigin());
991 /// \brief Instrument StoreInst
993 /// Stores the corresponding shadow and (optionally) origin.
994 /// Optionally, checks that the store address is fully defined.
995 void visitStoreInst(StoreInst &I) {
996 StoreList.push_back(&I);
999 void handleCASOrRMW(Instruction &I) {
1000 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1002 IRBuilder<> IRB(&I);
1003 Value *Addr = I.getOperand(0);
1004 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
1006 if (ClCheckAccessAddress)
1007 insertShadowCheck(Addr, &I);
1009 // Only test the conditional argument of cmpxchg instruction.
1010 // The other argument can potentially be uninitialized, but we can not
1011 // detect this situation reliably without possible false positives.
1012 if (isa<AtomicCmpXchgInst>(I))
1013 insertShadowCheck(I.getOperand(1), &I);
1015 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1017 setShadow(&I, getCleanShadow(&I));
1020 void visitAtomicRMWInst(AtomicRMWInst &I) {
1022 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1025 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1027 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1030 // Vector manipulation.
1031 void visitExtractElementInst(ExtractElementInst &I) {
1032 insertShadowCheck(I.getOperand(1), &I);
1033 IRBuilder<> IRB(&I);
1034 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1036 setOrigin(&I, getOrigin(&I, 0));
1039 void visitInsertElementInst(InsertElementInst &I) {
1040 insertShadowCheck(I.getOperand(2), &I);
1041 IRBuilder<> IRB(&I);
1042 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1043 I.getOperand(2), "_msprop"));
1044 setOriginForNaryOp(I);
1047 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1048 insertShadowCheck(I.getOperand(2), &I);
1049 IRBuilder<> IRB(&I);
1050 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1051 I.getOperand(2), "_msprop"));
1052 setOriginForNaryOp(I);
1056 void visitSExtInst(SExtInst &I) {
1057 IRBuilder<> IRB(&I);
1058 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1059 setOrigin(&I, getOrigin(&I, 0));
1062 void visitZExtInst(ZExtInst &I) {
1063 IRBuilder<> IRB(&I);
1064 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1065 setOrigin(&I, getOrigin(&I, 0));
1068 void visitTruncInst(TruncInst &I) {
1069 IRBuilder<> IRB(&I);
1070 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1071 setOrigin(&I, getOrigin(&I, 0));
1074 void visitBitCastInst(BitCastInst &I) {
1075 IRBuilder<> IRB(&I);
1076 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1077 setOrigin(&I, getOrigin(&I, 0));
1080 void visitPtrToIntInst(PtrToIntInst &I) {
1081 IRBuilder<> IRB(&I);
1082 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1083 "_msprop_ptrtoint"));
1084 setOrigin(&I, getOrigin(&I, 0));
1087 void visitIntToPtrInst(IntToPtrInst &I) {
1088 IRBuilder<> IRB(&I);
1089 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1090 "_msprop_inttoptr"));
1091 setOrigin(&I, getOrigin(&I, 0));
1094 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
1095 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
1096 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
1097 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
1098 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
1099 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1101 /// \brief Propagate shadow for bitwise AND.
1103 /// This code is exact, i.e. if, for example, a bit in the left argument
1104 /// is defined and 0, then neither the value not definedness of the
1105 /// corresponding bit in B don't affect the resulting shadow.
1106 void visitAnd(BinaryOperator &I) {
1107 IRBuilder<> IRB(&I);
1108 // "And" of 0 and a poisoned value results in unpoisoned value.
1109 // 1&1 => 1; 0&1 => 0; p&1 => p;
1110 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1111 // 1&p => p; 0&p => 0; p&p => p;
1112 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1113 Value *S1 = getShadow(&I, 0);
1114 Value *S2 = getShadow(&I, 1);
1115 Value *V1 = I.getOperand(0);
1116 Value *V2 = I.getOperand(1);
1117 if (V1->getType() != S1->getType()) {
1118 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1119 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1121 Value *S1S2 = IRB.CreateAnd(S1, S2);
1122 Value *V1S2 = IRB.CreateAnd(V1, S2);
1123 Value *S1V2 = IRB.CreateAnd(S1, V2);
1124 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1125 setOriginForNaryOp(I);
1128 void visitOr(BinaryOperator &I) {
1129 IRBuilder<> IRB(&I);
1130 // "Or" of 1 and a poisoned value results in unpoisoned value.
1131 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1132 // 1|0 => 1; 0|0 => 0; p|0 => p;
1133 // 1|p => 1; 0|p => p; p|p => p;
1134 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1135 Value *S1 = getShadow(&I, 0);
1136 Value *S2 = getShadow(&I, 1);
1137 Value *V1 = IRB.CreateNot(I.getOperand(0));
1138 Value *V2 = IRB.CreateNot(I.getOperand(1));
1139 if (V1->getType() != S1->getType()) {
1140 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1141 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1143 Value *S1S2 = IRB.CreateAnd(S1, S2);
1144 Value *V1S2 = IRB.CreateAnd(V1, S2);
1145 Value *S1V2 = IRB.CreateAnd(S1, V2);
1146 setShadow(&I, IRB.CreateOr(S1S2, IRB.CreateOr(V1S2, S1V2)));
1147 setOriginForNaryOp(I);
1150 /// \brief Default propagation of shadow and/or origin.
1152 /// This class implements the general case of shadow propagation, used in all
1153 /// cases where we don't know and/or don't care about what the operation
1154 /// actually does. It converts all input shadow values to a common type
1155 /// (extending or truncating as necessary), and bitwise OR's them.
1157 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1158 /// fully initialized), and less prone to false positives.
1160 /// This class also implements the general case of origin propagation. For a
1161 /// Nary operation, result origin is set to the origin of an argument that is
1162 /// not entirely initialized. If there is more than one such arguments, the
1163 /// rightmost of them is picked. It does not matter which one is picked if all
1164 /// arguments are initialized.
1165 template <bool CombineShadow>
1170 MemorySanitizerVisitor *MSV;
1173 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
1174 Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
1176 /// \brief Add a pair of shadow and origin values to the mix.
1177 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
1178 if (CombineShadow) {
1183 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
1184 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
1188 if (MSV->MS.TrackOrigins) {
1193 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
1194 Value *Cond = IRB.CreateICmpNE(FlatShadow,
1195 MSV->getCleanShadow(FlatShadow));
1196 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
1202 /// \brief Add an application value to the mix.
1203 Combiner &Add(Value *V) {
1204 Value *OpShadow = MSV->getShadow(V);
1205 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
1206 return Add(OpShadow, OpOrigin);
1209 /// \brief Set the current combined values as the given instruction's shadow
1211 void Done(Instruction *I) {
1212 if (CombineShadow) {
1214 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
1215 MSV->setShadow(I, Shadow);
1217 if (MSV->MS.TrackOrigins) {
1219 MSV->setOrigin(I, Origin);
1224 typedef Combiner<true> ShadowAndOriginCombiner;
1225 typedef Combiner<false> OriginCombiner;
1227 /// \brief Propagate origin for arbitrary operation.
1228 void setOriginForNaryOp(Instruction &I) {
1229 if (!MS.TrackOrigins) return;
1230 IRBuilder<> IRB(&I);
1231 OriginCombiner OC(this, IRB);
1232 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1237 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
1238 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
1239 "Vector of pointers is not a valid shadow type");
1240 return Ty->isVectorTy() ?
1241 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
1242 Ty->getPrimitiveSizeInBits();
1245 /// \brief Cast between two shadow types, extending or truncating as
1247 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy) {
1248 Type *srcTy = V->getType();
1249 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
1250 return IRB.CreateIntCast(V, dstTy, true);
1251 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
1252 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
1253 return IRB.CreateIntCast(V, dstTy, true);
1254 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
1255 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
1256 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
1258 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), true);
1259 return IRB.CreateBitCast(V2, dstTy);
1260 // TODO: handle struct types.
1263 /// \brief Propagate shadow for arbitrary operation.
1264 void handleShadowOr(Instruction &I) {
1265 IRBuilder<> IRB(&I);
1266 ShadowAndOriginCombiner SC(this, IRB);
1267 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
1272 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
1273 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
1274 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
1275 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
1276 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
1277 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
1278 void visitMul(BinaryOperator &I) { handleShadowOr(I); }
1280 void handleDiv(Instruction &I) {
1281 IRBuilder<> IRB(&I);
1282 // Strict on the second argument.
1283 insertShadowCheck(I.getOperand(1), &I);
1284 setShadow(&I, getShadow(&I, 0));
1285 setOrigin(&I, getOrigin(&I, 0));
1288 void visitUDiv(BinaryOperator &I) { handleDiv(I); }
1289 void visitSDiv(BinaryOperator &I) { handleDiv(I); }
1290 void visitFDiv(BinaryOperator &I) { handleDiv(I); }
1291 void visitURem(BinaryOperator &I) { handleDiv(I); }
1292 void visitSRem(BinaryOperator &I) { handleDiv(I); }
1293 void visitFRem(BinaryOperator &I) { handleDiv(I); }
1295 /// \brief Instrument == and != comparisons.
1297 /// Sometimes the comparison result is known even if some of the bits of the
1298 /// arguments are not.
1299 void handleEqualityComparison(ICmpInst &I) {
1300 IRBuilder<> IRB(&I);
1301 Value *A = I.getOperand(0);
1302 Value *B = I.getOperand(1);
1303 Value *Sa = getShadow(A);
1304 Value *Sb = getShadow(B);
1306 // Get rid of pointers and vectors of pointers.
1307 // For ints (and vectors of ints), types of A and Sa match,
1308 // and this is a no-op.
1309 A = IRB.CreatePointerCast(A, Sa->getType());
1310 B = IRB.CreatePointerCast(B, Sb->getType());
1312 // A == B <==> (C = A^B) == 0
1313 // A != B <==> (C = A^B) != 0
1315 Value *C = IRB.CreateXor(A, B);
1316 Value *Sc = IRB.CreateOr(Sa, Sb);
1317 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
1318 // Result is defined if one of the following is true
1319 // * there is a defined 1 bit in C
1320 // * C is fully defined
1321 // Si = !(C & ~Sc) && Sc
1322 Value *Zero = Constant::getNullValue(Sc->getType());
1323 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
1325 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
1327 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
1328 Si->setName("_msprop_icmp");
1330 setOriginForNaryOp(I);
1333 /// \brief Build the lowest possible value of V, taking into account V's
1334 /// uninitialized bits.
1335 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1338 // Split shadow into sign bit and other bits.
1339 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1340 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1341 // Maximise the undefined shadow bit, minimize other undefined bits.
1343 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
1345 // Minimize undefined bits.
1346 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
1350 /// \brief Build the highest possible value of V, taking into account V's
1351 /// uninitialized bits.
1352 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
1355 // Split shadow into sign bit and other bits.
1356 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
1357 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
1358 // Minimise the undefined shadow bit, maximise other undefined bits.
1360 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
1362 // Maximize undefined bits.
1363 return IRB.CreateOr(A, Sa);
1367 /// \brief Instrument relational comparisons.
1369 /// This function does exact shadow propagation for all relational
1370 /// comparisons of integers, pointers and vectors of those.
1371 /// FIXME: output seems suboptimal when one of the operands is a constant
1372 void handleRelationalComparisonExact(ICmpInst &I) {
1373 IRBuilder<> IRB(&I);
1374 Value *A = I.getOperand(0);
1375 Value *B = I.getOperand(1);
1376 Value *Sa = getShadow(A);
1377 Value *Sb = getShadow(B);
1379 // Get rid of pointers and vectors of pointers.
1380 // For ints (and vectors of ints), types of A and Sa match,
1381 // and this is a no-op.
1382 A = IRB.CreatePointerCast(A, Sa->getType());
1383 B = IRB.CreatePointerCast(B, Sb->getType());
1385 // Let [a0, a1] be the interval of possible values of A, taking into account
1386 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
1387 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
1388 bool IsSigned = I.isSigned();
1389 Value *S1 = IRB.CreateICmp(I.getPredicate(),
1390 getLowestPossibleValue(IRB, A, Sa, IsSigned),
1391 getHighestPossibleValue(IRB, B, Sb, IsSigned));
1392 Value *S2 = IRB.CreateICmp(I.getPredicate(),
1393 getHighestPossibleValue(IRB, A, Sa, IsSigned),
1394 getLowestPossibleValue(IRB, B, Sb, IsSigned));
1395 Value *Si = IRB.CreateXor(S1, S2);
1397 setOriginForNaryOp(I);
1400 /// \brief Instrument signed relational comparisons.
1402 /// Handle (x<0) and (x>=0) comparisons (essentially, sign bit tests) by
1403 /// propagating the highest bit of the shadow. Everything else is delegated
1404 /// to handleShadowOr().
1405 void handleSignedRelationalComparison(ICmpInst &I) {
1406 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
1407 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
1409 CmpInst::Predicate pre = I.getPredicate();
1410 if (constOp0 && constOp0->isNullValue() &&
1411 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
1412 op = I.getOperand(1);
1413 } else if (constOp1 && constOp1->isNullValue() &&
1414 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) {
1415 op = I.getOperand(0);
1418 IRBuilder<> IRB(&I);
1420 IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op), "_msprop_icmpslt");
1421 setShadow(&I, Shadow);
1422 setOrigin(&I, getOrigin(op));
1428 void visitICmpInst(ICmpInst &I) {
1429 if (!ClHandleICmp) {
1433 if (I.isEquality()) {
1434 handleEqualityComparison(I);
1438 assert(I.isRelational());
1439 if (ClHandleICmpExact) {
1440 handleRelationalComparisonExact(I);
1444 handleSignedRelationalComparison(I);
1448 assert(I.isUnsigned());
1449 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
1450 handleRelationalComparisonExact(I);
1457 void visitFCmpInst(FCmpInst &I) {
1461 void handleShift(BinaryOperator &I) {
1462 IRBuilder<> IRB(&I);
1463 // If any of the S2 bits are poisoned, the whole thing is poisoned.
1464 // Otherwise perform the same shift on S1.
1465 Value *S1 = getShadow(&I, 0);
1466 Value *S2 = getShadow(&I, 1);
1467 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
1469 Value *V2 = I.getOperand(1);
1470 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
1471 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
1472 setOriginForNaryOp(I);
1475 void visitShl(BinaryOperator &I) { handleShift(I); }
1476 void visitAShr(BinaryOperator &I) { handleShift(I); }
1477 void visitLShr(BinaryOperator &I) { handleShift(I); }
1479 /// \brief Instrument llvm.memmove
1481 /// At this point we don't know if llvm.memmove will be inlined or not.
1482 /// If we don't instrument it and it gets inlined,
1483 /// our interceptor will not kick in and we will lose the memmove.
1484 /// If we instrument the call here, but it does not get inlined,
1485 /// we will memove the shadow twice: which is bad in case
1486 /// of overlapping regions. So, we simply lower the intrinsic to a call.
1488 /// Similar situation exists for memcpy and memset.
1489 void visitMemMoveInst(MemMoveInst &I) {
1490 IRBuilder<> IRB(&I);
1493 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1494 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1495 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1496 I.eraseFromParent();
1499 // Similar to memmove: avoid copying shadow twice.
1500 // This is somewhat unfortunate as it may slowdown small constant memcpys.
1501 // FIXME: consider doing manual inline for small constant sizes and proper
1503 void visitMemCpyInst(MemCpyInst &I) {
1504 IRBuilder<> IRB(&I);
1507 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1508 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
1509 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1510 I.eraseFromParent();
1514 void visitMemSetInst(MemSetInst &I) {
1515 IRBuilder<> IRB(&I);
1518 IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
1519 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
1520 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false));
1521 I.eraseFromParent();
1524 void visitVAStartInst(VAStartInst &I) {
1525 VAHelper->visitVAStartInst(I);
1528 void visitVACopyInst(VACopyInst &I) {
1529 VAHelper->visitVACopyInst(I);
1532 enum IntrinsicKind {
1533 IK_DoesNotAccessMemory,
1538 static IntrinsicKind getIntrinsicKind(Intrinsic::ID iid) {
1539 const int DoesNotAccessMemory = IK_DoesNotAccessMemory;
1540 const int OnlyReadsArgumentPointees = IK_OnlyReadsMemory;
1541 const int OnlyReadsMemory = IK_OnlyReadsMemory;
1542 const int OnlyAccessesArgumentPointees = IK_WritesMemory;
1543 const int UnknownModRefBehavior = IK_WritesMemory;
1544 #define GET_INTRINSIC_MODREF_BEHAVIOR
1545 #define ModRefBehavior IntrinsicKind
1546 #include "llvm/IR/Intrinsics.gen"
1547 #undef ModRefBehavior
1548 #undef GET_INTRINSIC_MODREF_BEHAVIOR
1551 /// \brief Handle vector store-like intrinsics.
1553 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
1554 /// has 1 pointer argument and 1 vector argument, returns void.
1555 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
1556 IRBuilder<> IRB(&I);
1557 Value* Addr = I.getArgOperand(0);
1558 Value *Shadow = getShadow(&I, 1);
1559 Value *ShadowPtr = getShadowPtr(Addr, Shadow->getType(), IRB);
1561 // We don't know the pointer alignment (could be unaligned SSE store!).
1562 // Have to assume to worst case.
1563 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
1565 if (ClCheckAccessAddress)
1566 insertShadowCheck(Addr, &I);
1568 // FIXME: use ClStoreCleanOrigin
1569 // FIXME: factor out common code from materializeStores
1570 if (MS.TrackOrigins)
1571 IRB.CreateStore(getOrigin(&I, 1), getOriginPtr(Addr, IRB));
1575 /// \brief Handle vector load-like intrinsics.
1577 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
1578 /// has 1 pointer argument, returns a vector.
1579 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
1580 IRBuilder<> IRB(&I);
1581 Value *Addr = I.getArgOperand(0);
1583 Type *ShadowTy = getShadowTy(&I);
1585 Value *ShadowPtr = getShadowPtr(Addr, ShadowTy, IRB);
1586 // We don't know the pointer alignment (could be unaligned SSE load!).
1587 // Have to assume to worst case.
1588 setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, 1, "_msld"));
1590 setShadow(&I, getCleanShadow(&I));
1593 if (ClCheckAccessAddress)
1594 insertShadowCheck(Addr, &I);
1596 if (MS.TrackOrigins) {
1598 setOrigin(&I, IRB.CreateLoad(getOriginPtr(Addr, IRB)));
1600 setOrigin(&I, getCleanOrigin());
1605 /// \brief Handle (SIMD arithmetic)-like intrinsics.
1607 /// Instrument intrinsics with any number of arguments of the same type,
1608 /// equal to the return type. The type should be simple (no aggregates or
1609 /// pointers; vectors are fine).
1610 /// Caller guarantees that this intrinsic does not access memory.
1611 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
1612 Type *RetTy = I.getType();
1613 if (!(RetTy->isIntOrIntVectorTy() ||
1614 RetTy->isFPOrFPVectorTy() ||
1615 RetTy->isX86_MMXTy()))
1618 unsigned NumArgOperands = I.getNumArgOperands();
1620 for (unsigned i = 0; i < NumArgOperands; ++i) {
1621 Type *Ty = I.getArgOperand(i)->getType();
1626 IRBuilder<> IRB(&I);
1627 ShadowAndOriginCombiner SC(this, IRB);
1628 for (unsigned i = 0; i < NumArgOperands; ++i)
1629 SC.Add(I.getArgOperand(i));
1635 /// \brief Heuristically instrument unknown intrinsics.
1637 /// The main purpose of this code is to do something reasonable with all
1638 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
1639 /// We recognize several classes of intrinsics by their argument types and
1640 /// ModRefBehaviour and apply special intrumentation when we are reasonably
1641 /// sure that we know what the intrinsic does.
1643 /// We special-case intrinsics where this approach fails. See llvm.bswap
1644 /// handling as an example of that.
1645 bool handleUnknownIntrinsic(IntrinsicInst &I) {
1646 unsigned NumArgOperands = I.getNumArgOperands();
1647 if (NumArgOperands == 0)
1650 Intrinsic::ID iid = I.getIntrinsicID();
1651 IntrinsicKind IK = getIntrinsicKind(iid);
1652 bool OnlyReadsMemory = IK == IK_OnlyReadsMemory;
1653 bool WritesMemory = IK == IK_WritesMemory;
1654 assert(!(OnlyReadsMemory && WritesMemory));
1656 if (NumArgOperands == 2 &&
1657 I.getArgOperand(0)->getType()->isPointerTy() &&
1658 I.getArgOperand(1)->getType()->isVectorTy() &&
1659 I.getType()->isVoidTy() &&
1661 // This looks like a vector store.
1662 return handleVectorStoreIntrinsic(I);
1665 if (NumArgOperands == 1 &&
1666 I.getArgOperand(0)->getType()->isPointerTy() &&
1667 I.getType()->isVectorTy() &&
1669 // This looks like a vector load.
1670 return handleVectorLoadIntrinsic(I);
1673 if (!OnlyReadsMemory && !WritesMemory)
1674 if (maybeHandleSimpleNomemIntrinsic(I))
1677 // FIXME: detect and handle SSE maskstore/maskload
1681 void handleBswap(IntrinsicInst &I) {
1682 IRBuilder<> IRB(&I);
1683 Value *Op = I.getArgOperand(0);
1684 Type *OpType = Op->getType();
1685 Function *BswapFunc = Intrinsic::getDeclaration(
1686 F.getParent(), Intrinsic::bswap, ArrayRef<Type*>(&OpType, 1));
1687 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
1688 setOrigin(&I, getOrigin(Op));
1691 // \brief Instrument vector convert instrinsic.
1693 // This function instruments intrinsics like cvtsi2ss:
1694 // %Out = int_xxx_cvtyyy(%ConvertOp)
1696 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
1697 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
1698 // number \p Out elements, and (if has 2 arguments) copies the rest of the
1699 // elements from \p CopyOp.
1700 // In most cases conversion involves floating-point value which may trigger a
1701 // hardware exception when not fully initialized. For this reason we require
1702 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
1703 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
1704 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
1705 // return a fully initialized value.
1706 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
1707 IRBuilder<> IRB(&I);
1708 Value *CopyOp, *ConvertOp;
1710 switch (I.getNumArgOperands()) {
1712 CopyOp = I.getArgOperand(0);
1713 ConvertOp = I.getArgOperand(1);
1716 ConvertOp = I.getArgOperand(0);
1720 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
1723 // The first *NumUsedElements* elements of ConvertOp are converted to the
1724 // same number of output elements. The rest of the output is copied from
1725 // CopyOp, or (if not available) filled with zeroes.
1726 // Combine shadow for elements of ConvertOp that are used in this operation,
1727 // and insert a check.
1728 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
1729 // int->any conversion.
1730 Value *ConvertShadow = getShadow(ConvertOp);
1731 Value *AggShadow = 0;
1732 if (ConvertOp->getType()->isVectorTy()) {
1733 AggShadow = IRB.CreateExtractElement(
1734 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
1735 for (int i = 1; i < NumUsedElements; ++i) {
1736 Value *MoreShadow = IRB.CreateExtractElement(
1737 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
1738 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
1741 AggShadow = ConvertShadow;
1743 assert(AggShadow->getType()->isIntegerTy());
1744 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
1746 // Build result shadow by zero-filling parts of CopyOp shadow that come from
1749 assert(CopyOp->getType() == I.getType());
1750 assert(CopyOp->getType()->isVectorTy());
1751 Value *ResultShadow = getShadow(CopyOp);
1752 Type *EltTy = ResultShadow->getType()->getVectorElementType();
1753 for (int i = 0; i < NumUsedElements; ++i) {
1754 ResultShadow = IRB.CreateInsertElement(
1755 ResultShadow, ConstantInt::getNullValue(EltTy),
1756 ConstantInt::get(IRB.getInt32Ty(), i));
1758 setShadow(&I, ResultShadow);
1759 setOrigin(&I, getOrigin(CopyOp));
1761 setShadow(&I, getCleanShadow(&I));
1765 void visitIntrinsicInst(IntrinsicInst &I) {
1766 switch (I.getIntrinsicID()) {
1767 case llvm::Intrinsic::bswap:
1770 case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
1771 case llvm::Intrinsic::x86_avx512_cvtsd2usi:
1772 case llvm::Intrinsic::x86_avx512_cvtss2usi64:
1773 case llvm::Intrinsic::x86_avx512_cvtss2usi:
1774 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
1775 case llvm::Intrinsic::x86_avx512_cvttss2usi:
1776 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
1777 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
1778 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
1779 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
1780 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
1781 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
1782 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
1783 case llvm::Intrinsic::x86_sse2_cvtsd2si:
1784 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
1785 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
1786 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
1787 case llvm::Intrinsic::x86_sse2_cvtss2sd:
1788 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
1789 case llvm::Intrinsic::x86_sse2_cvttsd2si:
1790 case llvm::Intrinsic::x86_sse_cvtsi2ss:
1791 case llvm::Intrinsic::x86_sse_cvtsi642ss:
1792 case llvm::Intrinsic::x86_sse_cvtss2si64:
1793 case llvm::Intrinsic::x86_sse_cvtss2si:
1794 case llvm::Intrinsic::x86_sse_cvttss2si64:
1795 case llvm::Intrinsic::x86_sse_cvttss2si:
1796 handleVectorConvertIntrinsic(I, 1);
1798 case llvm::Intrinsic::x86_sse2_cvtdq2pd:
1799 case llvm::Intrinsic::x86_sse2_cvtps2pd:
1800 case llvm::Intrinsic::x86_sse_cvtps2pi:
1801 case llvm::Intrinsic::x86_sse_cvttps2pi:
1802 handleVectorConvertIntrinsic(I, 2);
1805 if (!handleUnknownIntrinsic(I))
1806 visitInstruction(I);
1811 // Replace call to (*Fn) with a call to (*IndirectCallWrapperFn(Fn)).
1812 void wrapIndirectCall(IRBuilder<> &IRB, CallSite CS) {
1813 Value *Fn = CS.getCalledValue();
1814 Value *NewFn = IRB.CreateBitCast(
1815 IRB.CreateCall(MS.IndirectCallWrapperFn,
1816 IRB.CreateBitCast(Fn, MS.AnyFunctionPtrTy)),
1818 setShadow(NewFn, getShadow(Fn));
1819 CS.setCalledFunction(NewFn);
1822 void visitCallSite(CallSite CS) {
1823 Instruction &I = *CS.getInstruction();
1824 assert((CS.isCall() || CS.isInvoke()) && "Unknown type of CallSite");
1826 CallInst *Call = cast<CallInst>(&I);
1828 // For inline asm, do the usual thing: check argument shadow and mark all
1829 // outputs as clean. Note that any side effects of the inline asm that are
1830 // not immediately visible in its constraints are not handled.
1831 if (Call->isInlineAsm()) {
1832 visitInstruction(I);
1836 // Allow only tail calls with the same types, otherwise
1837 // we may have a false positive: shadow for a non-void RetVal
1838 // will get propagated to a void RetVal.
1839 if (Call->isTailCall() && Call->getType() != Call->getParent()->getType())
1840 Call->setTailCall(false);
1842 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
1844 // We are going to insert code that relies on the fact that the callee
1845 // will become a non-readonly function after it is instrumented by us. To
1846 // prevent this code from being optimized out, mark that function
1847 // non-readonly in advance.
1848 if (Function *Func = Call->getCalledFunction()) {
1849 // Clear out readonly/readnone attributes.
1851 B.addAttribute(Attribute::ReadOnly)
1852 .addAttribute(Attribute::ReadNone);
1853 Func->removeAttributes(AttributeSet::FunctionIndex,
1854 AttributeSet::get(Func->getContext(),
1855 AttributeSet::FunctionIndex,
1859 IRBuilder<> IRB(&I);
1861 if (MS.WrapIndirectCalls && !CS.getCalledFunction())
1862 wrapIndirectCall(IRB, CS);
1864 unsigned ArgOffset = 0;
1865 DEBUG(dbgs() << " CallSite: " << I << "\n");
1866 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
1867 ArgIt != End; ++ArgIt) {
1869 unsigned i = ArgIt - CS.arg_begin();
1870 if (!A->getType()->isSized()) {
1871 DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
1876 // Compute the Shadow for arg even if it is ByVal, because
1877 // in that case getShadow() will copy the actual arg shadow to
1878 // __msan_param_tls.
1879 Value *ArgShadow = getShadow(A);
1880 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
1881 DEBUG(dbgs() << " Arg#" << i << ": " << *A <<
1882 " Shadow: " << *ArgShadow << "\n");
1883 if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
1884 assert(A->getType()->isPointerTy() &&
1885 "ByVal argument is not a pointer!");
1886 Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
1887 unsigned Alignment = CS.getParamAlignment(i + 1);
1888 Store = IRB.CreateMemCpy(ArgShadowBase,
1889 getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
1892 Size = MS.TD->getTypeAllocSize(A->getType());
1893 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
1894 kShadowTLSAlignment);
1896 if (MS.TrackOrigins)
1897 IRB.CreateStore(getOrigin(A),
1898 getOriginPtrForArgument(A, IRB, ArgOffset));
1900 assert(Size != 0 && Store != 0);
1901 DEBUG(dbgs() << " Param:" << *Store << "\n");
1902 ArgOffset += DataLayout::RoundUpAlignment(Size, 8);
1904 DEBUG(dbgs() << " done with call args\n");
1907 cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0));
1908 if (FT->isVarArg()) {
1909 VAHelper->visitCallSite(CS, IRB);
1912 // Now, get the shadow for the RetVal.
1913 if (!I.getType()->isSized()) return;
1914 IRBuilder<> IRBBefore(&I);
1915 // Untill we have full dynamic coverage, make sure the retval shadow is 0.
1916 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
1917 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
1918 Instruction *NextInsn = 0;
1920 NextInsn = I.getNextNode();
1922 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
1923 if (!NormalDest->getSinglePredecessor()) {
1924 // FIXME: this case is tricky, so we are just conservative here.
1925 // Perhaps we need to split the edge between this BB and NormalDest,
1926 // but a naive attempt to use SplitEdge leads to a crash.
1927 setShadow(&I, getCleanShadow(&I));
1928 setOrigin(&I, getCleanOrigin());
1931 NextInsn = NormalDest->getFirstInsertionPt();
1933 "Could not find insertion point for retval shadow load");
1935 IRBuilder<> IRBAfter(NextInsn);
1936 Value *RetvalShadow =
1937 IRBAfter.CreateAlignedLoad(getShadowPtrForRetval(&I, IRBAfter),
1938 kShadowTLSAlignment, "_msret");
1939 setShadow(&I, RetvalShadow);
1940 if (MS.TrackOrigins)
1941 setOrigin(&I, IRBAfter.CreateLoad(getOriginPtrForRetval(IRBAfter)));
1944 void visitReturnInst(ReturnInst &I) {
1945 IRBuilder<> IRB(&I);
1946 Value *RetVal = I.getReturnValue();
1947 if (!RetVal) return;
1948 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
1949 if (CheckReturnValue) {
1950 insertShadowCheck(RetVal, &I);
1951 Value *Shadow = getCleanShadow(RetVal);
1952 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
1954 Value *Shadow = getShadow(RetVal);
1955 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
1956 // FIXME: make it conditional if ClStoreCleanOrigin==0
1957 if (MS.TrackOrigins)
1958 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
1962 void visitPHINode(PHINode &I) {
1963 IRBuilder<> IRB(&I);
1964 ShadowPHINodes.push_back(&I);
1965 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
1967 if (MS.TrackOrigins)
1968 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
1972 void visitAllocaInst(AllocaInst &I) {
1973 setShadow(&I, getCleanShadow(&I));
1974 IRBuilder<> IRB(I.getNextNode());
1975 uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
1976 if (PoisonStack && ClPoisonStackWithCall) {
1977 IRB.CreateCall2(MS.MsanPoisonStackFn,
1978 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
1979 ConstantInt::get(MS.IntptrTy, Size));
1981 Value *ShadowBase = getShadowPtr(&I, Type::getInt8PtrTy(*MS.C), IRB);
1982 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
1983 IRB.CreateMemSet(ShadowBase, PoisonValue, Size, I.getAlignment());
1986 if (PoisonStack && MS.TrackOrigins) {
1987 setOrigin(&I, getCleanOrigin());
1988 SmallString<2048> StackDescriptionStorage;
1989 raw_svector_ostream StackDescription(StackDescriptionStorage);
1990 // We create a string with a description of the stack allocation and
1991 // pass it into __msan_set_alloca_origin.
1992 // It will be printed by the run-time if stack-originated UMR is found.
1993 // The first 4 bytes of the string are set to '----' and will be replaced
1994 // by __msan_va_arg_overflow_size_tls at the first call.
1995 StackDescription << "----" << I.getName() << "@" << F.getName();
1997 createPrivateNonConstGlobalForString(*F.getParent(),
1998 StackDescription.str());
2000 IRB.CreateCall4(MS.MsanSetAllocaOrigin4Fn,
2001 IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
2002 ConstantInt::get(MS.IntptrTy, Size),
2003 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
2004 IRB.CreatePointerCast(&F, MS.IntptrTy));
2008 void visitSelectInst(SelectInst& I) {
2009 IRBuilder<> IRB(&I);
2010 // a = select b, c, d
2011 Value *S = IRB.CreateSelect(I.getCondition(), getShadow(I.getTrueValue()),
2012 getShadow(I.getFalseValue()));
2013 if (I.getType()->isAggregateType()) {
2014 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
2015 // an extra "select". This results in much more compact IR.
2016 // Sa = select Sb, poisoned, (select b, Sc, Sd)
2017 S = IRB.CreateSelect(getShadow(I.getCondition()),
2018 getPoisonedShadow(getShadowTy(I.getType())), S,
2019 "_msprop_select_agg");
2021 // Sa = (sext Sb) | (select b, Sc, Sd)
2023 S, CreateShadowCast(IRB, getShadow(I.getCondition()), S->getType()),
2027 if (MS.TrackOrigins) {
2028 // Origins are always i32, so any vector conditions must be flattened.
2029 // FIXME: consider tracking vector origins for app vectors?
2030 Value *Cond = I.getCondition();
2031 if (Cond->getType()->isVectorTy()) {
2032 Value *ConvertedShadow = convertToShadowTyNoVec(Cond, IRB);
2033 Cond = IRB.CreateICmpNE(ConvertedShadow,
2034 getCleanShadow(ConvertedShadow), "_mso_select");
2036 setOrigin(&I, IRB.CreateSelect(Cond,
2037 getOrigin(I.getTrueValue()), getOrigin(I.getFalseValue())));
2041 void visitLandingPadInst(LandingPadInst &I) {
2043 // See http://code.google.com/p/memory-sanitizer/issues/detail?id=1
2044 setShadow(&I, getCleanShadow(&I));
2045 setOrigin(&I, getCleanOrigin());
2048 void visitGetElementPtrInst(GetElementPtrInst &I) {
2052 void visitExtractValueInst(ExtractValueInst &I) {
2053 IRBuilder<> IRB(&I);
2054 Value *Agg = I.getAggregateOperand();
2055 DEBUG(dbgs() << "ExtractValue: " << I << "\n");
2056 Value *AggShadow = getShadow(Agg);
2057 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2058 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
2059 DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
2060 setShadow(&I, ResShadow);
2061 setOrigin(&I, getCleanOrigin());
2064 void visitInsertValueInst(InsertValueInst &I) {
2065 IRBuilder<> IRB(&I);
2066 DEBUG(dbgs() << "InsertValue: " << I << "\n");
2067 Value *AggShadow = getShadow(I.getAggregateOperand());
2068 Value *InsShadow = getShadow(I.getInsertedValueOperand());
2069 DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
2070 DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
2071 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
2072 DEBUG(dbgs() << " Res: " << *Res << "\n");
2074 setOrigin(&I, getCleanOrigin());
2077 void dumpInst(Instruction &I) {
2078 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2079 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
2081 errs() << "ZZZ " << I.getOpcodeName() << "\n";
2083 errs() << "QQQ " << I << "\n";
2086 void visitResumeInst(ResumeInst &I) {
2087 DEBUG(dbgs() << "Resume: " << I << "\n");
2088 // Nothing to do here.
2091 void visitInstruction(Instruction &I) {
2092 // Everything else: stop propagating and check for poisoned shadow.
2093 if (ClDumpStrictInstructions)
2095 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
2096 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
2097 insertShadowCheck(I.getOperand(i), &I);
2098 setShadow(&I, getCleanShadow(&I));
2099 setOrigin(&I, getCleanOrigin());
2103 /// \brief AMD64-specific implementation of VarArgHelper.
2104 struct VarArgAMD64Helper : public VarArgHelper {
2105 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
2106 // See a comment in visitCallSite for more details.
2107 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
2108 static const unsigned AMD64FpEndOffset = 176;
2111 MemorySanitizer &MS;
2112 MemorySanitizerVisitor &MSV;
2113 Value *VAArgTLSCopy;
2114 Value *VAArgOverflowSize;
2116 SmallVector<CallInst*, 16> VAStartInstrumentationList;
2118 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
2119 MemorySanitizerVisitor &MSV)
2120 : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
2122 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
2124 ArgKind classifyArgument(Value* arg) {
2125 // A very rough approximation of X86_64 argument classification rules.
2126 Type *T = arg->getType();
2127 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
2128 return AK_FloatingPoint;
2129 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
2130 return AK_GeneralPurpose;
2131 if (T->isPointerTy())
2132 return AK_GeneralPurpose;
2136 // For VarArg functions, store the argument shadow in an ABI-specific format
2137 // that corresponds to va_list layout.
2138 // We do this because Clang lowers va_arg in the frontend, and this pass
2139 // only sees the low level code that deals with va_list internals.
2140 // A much easier alternative (provided that Clang emits va_arg instructions)
2141 // would have been to associate each live instance of va_list with a copy of
2142 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
2144 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {
2145 unsigned GpOffset = 0;
2146 unsigned FpOffset = AMD64GpEndOffset;
2147 unsigned OverflowOffset = AMD64FpEndOffset;
2148 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
2149 ArgIt != End; ++ArgIt) {
2151 ArgKind AK = classifyArgument(A);
2152 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
2154 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
2158 case AK_GeneralPurpose:
2159 Base = getShadowPtrForVAArgument(A, IRB, GpOffset);
2162 case AK_FloatingPoint:
2163 Base = getShadowPtrForVAArgument(A, IRB, FpOffset);
2167 uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
2168 Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
2169 OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
2171 IRB.CreateAlignedStore(MSV.getShadow(A), Base, kShadowTLSAlignment);
2173 Constant *OverflowSize =
2174 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
2175 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
2178 /// \brief Compute the shadow address for a given va_arg.
2179 Value *getShadowPtrForVAArgument(Value *A, IRBuilder<> &IRB,
2181 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
2182 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
2183 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(A), 0),
2187 void visitVAStartInst(VAStartInst &I) {
2188 IRBuilder<> IRB(&I);
2189 VAStartInstrumentationList.push_back(&I);
2190 Value *VAListTag = I.getArgOperand(0);
2191 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2193 // Unpoison the whole __va_list_tag.
2194 // FIXME: magic ABI constants.
2195 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2196 /* size */24, /* alignment */8, false);
2199 void visitVACopyInst(VACopyInst &I) {
2200 IRBuilder<> IRB(&I);
2201 Value *VAListTag = I.getArgOperand(0);
2202 Value *ShadowPtr = MSV.getShadowPtr(VAListTag, IRB.getInt8Ty(), IRB);
2204 // Unpoison the whole __va_list_tag.
2205 // FIXME: magic ABI constants.
2206 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
2207 /* size */24, /* alignment */8, false);
2210 void finalizeInstrumentation() {
2211 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
2212 "finalizeInstrumentation called twice");
2213 if (!VAStartInstrumentationList.empty()) {
2214 // If there is a va_start in this function, make a backup copy of
2215 // va_arg_tls somewhere in the function entry block.
2216 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
2217 VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS);
2219 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
2221 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
2222 IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
2225 // Instrument va_start.
2226 // Copy va_list shadow from the backup copy of the TLS contents.
2227 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
2228 CallInst *OrigInst = VAStartInstrumentationList[i];
2229 IRBuilder<> IRB(OrigInst->getNextNode());
2230 Value *VAListTag = OrigInst->getArgOperand(0);
2232 Value *RegSaveAreaPtrPtr =
2234 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2235 ConstantInt::get(MS.IntptrTy, 16)),
2236 Type::getInt64PtrTy(*MS.C));
2237 Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
2238 Value *RegSaveAreaShadowPtr =
2239 MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
2240 IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
2241 AMD64FpEndOffset, 16);
2243 Value *OverflowArgAreaPtrPtr =
2245 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
2246 ConstantInt::get(MS.IntptrTy, 8)),
2247 Type::getInt64PtrTy(*MS.C));
2248 Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr);
2249 Value *OverflowArgAreaShadowPtr =
2250 MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
2251 Value *SrcPtr = IRB.CreateConstGEP1_32(VAArgTLSCopy, AMD64FpEndOffset);
2252 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
2257 /// \brief A no-op implementation of VarArgHelper.
2258 struct VarArgNoOpHelper : public VarArgHelper {
2259 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
2260 MemorySanitizerVisitor &MSV) {}
2262 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) {}
2264 void visitVAStartInst(VAStartInst &I) {}
2266 void visitVACopyInst(VACopyInst &I) {}
2268 void finalizeInstrumentation() {}
2271 VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
2272 MemorySanitizerVisitor &Visitor) {
2273 // VarArg handling is only implemented on AMD64. False positives are possible
2274 // on other platforms.
2275 llvm::Triple TargetTriple(Func.getParent()->getTargetTriple());
2276 if (TargetTriple.getArch() == llvm::Triple::x86_64)
2277 return new VarArgAMD64Helper(Func, Msan, Visitor);
2279 return new VarArgNoOpHelper(Func, Msan, Visitor);
2284 bool MemorySanitizer::runOnFunction(Function &F) {
2285 MemorySanitizerVisitor Visitor(F, *this);
2287 // Clear out readonly/readnone attributes.
2289 B.addAttribute(Attribute::ReadOnly)
2290 .addAttribute(Attribute::ReadNone);
2291 F.removeAttributes(AttributeSet::FunctionIndex,
2292 AttributeSet::get(F.getContext(),
2293 AttributeSet::FunctionIndex, B));
2295 return Visitor.runOnFunction();