1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Scalar.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/GlobalsModRef.h"
21 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
22 #include "llvm/Analysis/TargetLibraryInfo.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/GetElementPtrTypeIterator.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Transforms/Utils/Local.h"
37 #define DEBUG_TYPE "memcpyopt"
39 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
40 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
41 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
42 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
44 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
45 bool &VariableIdxFound,
46 const DataLayout &DL) {
47 // Skip over the first indices.
48 gep_type_iterator GTI = gep_type_begin(GEP);
49 for (unsigned i = 1; i != Idx; ++i, ++GTI)
52 // Compute the offset implied by the rest of the indices.
54 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
55 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
57 return VariableIdxFound = true;
58 if (OpC->isZero()) continue; // No offset.
60 // Handle struct indices, which add their field offset to the pointer.
61 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
62 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
66 // Otherwise, we have a sequential type like an array or vector. Multiply
67 // the index by the ElementSize.
68 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
69 Offset += Size*OpC->getSExtValue();
75 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
76 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
77 /// might be &A[40]. In this case offset would be -8.
78 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
79 const DataLayout &DL) {
80 Ptr1 = Ptr1->stripPointerCasts();
81 Ptr2 = Ptr2->stripPointerCasts();
83 // Handle the trivial case first.
89 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
90 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
92 bool VariableIdxFound = false;
94 // If one pointer is a GEP and the other isn't, then see if the GEP is a
95 // constant offset from the base, as in "P" and "gep P, 1".
96 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
97 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
98 return !VariableIdxFound;
101 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
102 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
103 return !VariableIdxFound;
106 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
107 // base. After that base, they may have some number of common (and
108 // potentially variable) indices. After that they handle some constant
109 // offset, which determines their offset from each other. At this point, we
110 // handle no other case.
111 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
114 // Skip any common indices and track the GEP types.
116 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
117 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
120 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
121 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
122 if (VariableIdxFound) return false;
124 Offset = Offset2-Offset1;
129 /// Represents a range of memset'd bytes with the ByteVal value.
130 /// This allows us to analyze stores like:
135 /// which sometimes happens with stores to arrays of structs etc. When we see
136 /// the first store, we make a range [1, 2). The second store extends the range
137 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
138 /// two ranges into [0, 3) which is memset'able.
141 // Start/End - A semi range that describes the span that this range covers.
142 // The range is closed at the start and open at the end: [Start, End).
145 /// StartPtr - The getelementptr instruction that points to the start of the
149 /// Alignment - The known alignment of the first store.
152 /// TheStores - The actual stores that make up this range.
153 SmallVector<Instruction*, 16> TheStores;
155 bool isProfitableToUseMemset(const DataLayout &DL) const;
157 } // end anon namespace
159 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
160 // If we found more than 4 stores to merge or 16 bytes, use memset.
161 if (TheStores.size() >= 4 || End-Start >= 16) return true;
163 // If there is nothing to merge, don't do anything.
164 if (TheStores.size() < 2) return false;
166 // If any of the stores are a memset, then it is always good to extend the
168 for (unsigned i = 0, e = TheStores.size(); i != e; ++i)
169 if (!isa<StoreInst>(TheStores[i]))
172 // Assume that the code generator is capable of merging pairs of stores
173 // together if it wants to.
174 if (TheStores.size() == 2) return false;
176 // If we have fewer than 8 stores, it can still be worthwhile to do this.
177 // For example, merging 4 i8 stores into an i32 store is useful almost always.
178 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
179 // memset will be split into 2 32-bit stores anyway) and doing so can
180 // pessimize the llvm optimizer.
182 // Since we don't have perfect knowledge here, make some assumptions: assume
183 // the maximum GPR width is the same size as the largest legal integer
184 // size. If so, check to see whether we will end up actually reducing the
185 // number of stores used.
186 unsigned Bytes = unsigned(End-Start);
187 unsigned MaxIntSize = DL.getLargestLegalIntTypeSize();
190 unsigned NumPointerStores = Bytes / MaxIntSize;
192 // Assume the remaining bytes if any are done a byte at a time.
193 unsigned NumByteStores = Bytes - NumPointerStores * MaxIntSize;
195 // If we will reduce the # stores (according to this heuristic), do the
196 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
198 return TheStores.size() > NumPointerStores+NumByteStores;
204 /// A sorted list of the memset ranges.
205 SmallVector<MemsetRange, 8> Ranges;
206 typedef SmallVectorImpl<MemsetRange>::iterator range_iterator;
207 const DataLayout &DL;
209 MemsetRanges(const DataLayout &DL) : DL(DL) {}
211 typedef SmallVectorImpl<MemsetRange>::const_iterator const_iterator;
212 const_iterator begin() const { return Ranges.begin(); }
213 const_iterator end() const { return Ranges.end(); }
214 bool empty() const { return Ranges.empty(); }
216 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
217 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
218 addStore(OffsetFromFirst, SI);
220 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
223 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
224 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
226 addRange(OffsetFromFirst, StoreSize,
227 SI->getPointerOperand(), SI->getAlignment(), SI);
230 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
231 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
232 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
235 void addRange(int64_t Start, int64_t Size, Value *Ptr,
236 unsigned Alignment, Instruction *Inst);
240 } // end anon namespace
243 /// Add a new store to the MemsetRanges data structure. This adds a
244 /// new range for the specified store at the specified offset, merging into
245 /// existing ranges as appropriate.
246 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
247 unsigned Alignment, Instruction *Inst) {
248 int64_t End = Start+Size;
250 range_iterator I = std::lower_bound(Ranges.begin(), Ranges.end(), Start,
251 [](const MemsetRange &LHS, int64_t RHS) { return LHS.End < RHS; });
253 // We now know that I == E, in which case we didn't find anything to merge
254 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
255 // to insert a new range. Handle this now.
256 if (I == Ranges.end() || End < I->Start) {
257 MemsetRange &R = *Ranges.insert(I, MemsetRange());
261 R.Alignment = Alignment;
262 R.TheStores.push_back(Inst);
266 // This store overlaps with I, add it.
267 I->TheStores.push_back(Inst);
269 // At this point, we may have an interval that completely contains our store.
270 // If so, just add it to the interval and return.
271 if (I->Start <= Start && I->End >= End)
274 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
275 // but is not entirely contained within the range.
277 // See if the range extends the start of the range. In this case, it couldn't
278 // possibly cause it to join the prior range, because otherwise we would have
280 if (Start < I->Start) {
283 I->Alignment = Alignment;
286 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
287 // is in or right at the end of I), and that End >= I->Start. Extend I out to
291 range_iterator NextI = I;
292 while (++NextI != Ranges.end() && End >= NextI->Start) {
293 // Merge the range in.
294 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
295 if (NextI->End > I->End)
303 //===----------------------------------------------------------------------===//
305 //===----------------------------------------------------------------------===//
308 class MemCpyOpt : public FunctionPass {
309 MemoryDependenceAnalysis *MD;
310 TargetLibraryInfo *TLI;
312 static char ID; // Pass identification, replacement for typeid
313 MemCpyOpt() : FunctionPass(ID) {
314 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
319 bool runOnFunction(Function &F) override;
322 // This transformation requires dominator postdominator info
323 void getAnalysisUsage(AnalysisUsage &AU) const override {
324 AU.setPreservesCFG();
325 AU.addRequired<AssumptionCacheTracker>();
326 AU.addRequired<DominatorTreeWrapperPass>();
327 AU.addRequired<MemoryDependenceAnalysis>();
328 AU.addRequired<AAResultsWrapperPass>();
329 AU.addRequired<TargetLibraryInfoWrapperPass>();
330 AU.addPreserved<GlobalsAAWrapperPass>();
331 AU.addPreserved<MemoryDependenceAnalysis>();
335 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
336 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
337 bool processMemCpy(MemCpyInst *M);
338 bool processMemMove(MemMoveInst *M);
339 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
340 uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
341 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep);
342 bool processMemSetMemCpyDependence(MemCpyInst *M, MemSetInst *MDep);
343 bool performMemCpyToMemSetOptzn(MemCpyInst *M, MemSetInst *MDep);
344 bool processByValArgument(CallSite CS, unsigned ArgNo);
345 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
348 bool iterateOnFunction(Function &F);
351 char MemCpyOpt::ID = 0;
354 /// The public interface to this file...
355 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
357 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
359 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
360 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
361 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
362 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
363 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
364 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
365 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
368 /// When scanning forward over instructions, we look for some other patterns to
369 /// fold away. In particular, this looks for stores to neighboring locations of
370 /// memory. If it sees enough consecutive ones, it attempts to merge them
371 /// together into a memcpy/memset.
372 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
373 Value *StartPtr, Value *ByteVal) {
374 const DataLayout &DL = StartInst->getModule()->getDataLayout();
376 // Okay, so we now have a single store that can be splatable. Scan to find
377 // all subsequent stores of the same value to offset from the same pointer.
378 // Join these together into ranges, so we can decide whether contiguous blocks
380 MemsetRanges Ranges(DL);
382 BasicBlock::iterator BI = StartInst;
383 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
384 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
385 // If the instruction is readnone, ignore it, otherwise bail out. We
386 // don't even allow readonly here because we don't want something like:
387 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
388 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
393 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
394 // If this is a store, see if we can merge it in.
395 if (!NextStore->isSimple()) break;
397 // Check to see if this stored value is of the same byte-splattable value.
398 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
401 // Check to see if this store is to a constant offset from the start ptr.
403 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
407 Ranges.addStore(Offset, NextStore);
409 MemSetInst *MSI = cast<MemSetInst>(BI);
411 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
412 !isa<ConstantInt>(MSI->getLength()))
415 // Check to see if this store is to a constant offset from the start ptr.
417 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
420 Ranges.addMemSet(Offset, MSI);
424 // If we have no ranges, then we just had a single store with nothing that
425 // could be merged in. This is a very common case of course.
429 // If we had at least one store that could be merged in, add the starting
430 // store as well. We try to avoid this unless there is at least something
431 // interesting as a small compile-time optimization.
432 Ranges.addInst(0, StartInst);
434 // If we create any memsets, we put it right before the first instruction that
435 // isn't part of the memset block. This ensure that the memset is dominated
436 // by any addressing instruction needed by the start of the block.
437 IRBuilder<> Builder(BI);
439 // Now that we have full information about ranges, loop over the ranges and
440 // emit memset's for anything big enough to be worthwhile.
441 Instruction *AMemSet = nullptr;
442 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
444 const MemsetRange &Range = *I;
446 if (Range.TheStores.size() == 1) continue;
448 // If it is profitable to lower this range to memset, do so now.
449 if (!Range.isProfitableToUseMemset(DL))
452 // Otherwise, we do want to transform this! Create a new memset.
453 // Get the starting pointer of the block.
454 StartPtr = Range.StartPtr;
456 // Determine alignment
457 unsigned Alignment = Range.Alignment;
458 if (Alignment == 0) {
460 cast<PointerType>(StartPtr->getType())->getElementType();
461 Alignment = DL.getABITypeAlignment(EltType);
465 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
467 DEBUG(dbgs() << "Replace stores:\n";
468 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
469 dbgs() << *Range.TheStores[i] << '\n';
470 dbgs() << "With: " << *AMemSet << '\n');
472 if (!Range.TheStores.empty())
473 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
475 // Zap all the stores.
476 for (SmallVectorImpl<Instruction *>::const_iterator
477 SI = Range.TheStores.begin(),
478 SE = Range.TheStores.end(); SI != SE; ++SI) {
479 MD->removeInstruction(*SI);
480 (*SI)->eraseFromParent();
489 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
490 if (!SI->isSimple()) return false;
491 const DataLayout &DL = SI->getModule()->getDataLayout();
493 // Detect cases where we're performing call slot forwarding, but
494 // happen to be using a load-store pair to implement it, rather than
496 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
497 if (LI->isSimple() && LI->hasOneUse() &&
498 LI->getParent() == SI->getParent()) {
499 MemDepResult ldep = MD->getDependency(LI);
500 CallInst *C = nullptr;
501 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
502 C = dyn_cast<CallInst>(ldep.getInst());
505 // Check that nothing touches the dest of the "copy" between
506 // the call and the store.
507 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
508 MemoryLocation StoreLoc = MemoryLocation::get(SI);
509 for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
510 E = C; I != E; --I) {
511 if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
519 unsigned storeAlign = SI->getAlignment();
521 storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
522 unsigned loadAlign = LI->getAlignment();
524 loadAlign = DL.getABITypeAlignment(LI->getType());
526 bool changed = performCallSlotOptzn(
527 LI, SI->getPointerOperand()->stripPointerCasts(),
528 LI->getPointerOperand()->stripPointerCasts(),
529 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
530 std::min(storeAlign, loadAlign), C);
532 MD->removeInstruction(SI);
533 SI->eraseFromParent();
534 MD->removeInstruction(LI);
535 LI->eraseFromParent();
543 // There are two cases that are interesting for this code to handle: memcpy
544 // and memset. Right now we only handle memset.
546 // Ensure that the value being stored is something that can be memset'able a
547 // byte at a time like "0" or "-1" or any width, as well as things like
548 // 0xA0A0A0A0 and 0.0.
549 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
550 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
552 BBI = I; // Don't invalidate iterator.
559 bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
560 // See if there is another memset or store neighboring this memset which
561 // allows us to widen out the memset to do a single larger store.
562 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
563 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
565 BBI = I; // Don't invalidate iterator.
572 /// Takes a memcpy and a call that it depends on,
573 /// and checks for the possibility of a call slot optimization by having
574 /// the call write its result directly into the destination of the memcpy.
575 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
576 Value *cpyDest, Value *cpySrc,
577 uint64_t cpyLen, unsigned cpyAlign,
579 // The general transformation to keep in mind is
581 // call @func(..., src, ...)
582 // memcpy(dest, src, ...)
586 // memcpy(dest, src, ...)
587 // call @func(..., dest, ...)
589 // Since moving the memcpy is technically awkward, we additionally check that
590 // src only holds uninitialized values at the moment of the call, meaning that
591 // the memcpy can be discarded rather than moved.
593 // Deliberately get the source and destination with bitcasts stripped away,
594 // because we'll need to do type comparisons based on the underlying type.
597 // Require that src be an alloca. This simplifies the reasoning considerably.
598 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
602 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
606 const DataLayout &DL = cpy->getModule()->getDataLayout();
607 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
608 srcArraySize->getZExtValue();
610 if (cpyLen < srcSize)
613 // Check that accessing the first srcSize bytes of dest will not cause a
614 // trap. Otherwise the transform is invalid since it might cause a trap
615 // to occur earlier than it otherwise would.
616 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
617 // The destination is an alloca. Check it is larger than srcSize.
618 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
622 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
623 destArraySize->getZExtValue();
625 if (destSize < srcSize)
627 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
628 if (A->getDereferenceableBytes() < srcSize) {
629 // If the destination is an sret parameter then only accesses that are
630 // outside of the returned struct type can trap.
631 if (!A->hasStructRetAttr())
634 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
635 if (!StructTy->isSized()) {
636 // The call may never return and hence the copy-instruction may never
637 // be executed, and therefore it's not safe to say "the destination
638 // has at least <cpyLen> bytes, as implied by the copy-instruction",
642 uint64_t destSize = DL.getTypeAllocSize(StructTy);
643 if (destSize < srcSize)
650 // Check that dest points to memory that is at least as aligned as src.
651 unsigned srcAlign = srcAlloca->getAlignment();
653 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
654 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
655 // If dest is not aligned enough and we can't increase its alignment then
657 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
660 // Check that src is not accessed except via the call and the memcpy. This
661 // guarantees that it holds only undefined values when passed in (so the final
662 // memcpy can be dropped), that it is not read or written between the call and
663 // the memcpy, and that writing beyond the end of it is undefined.
664 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
665 srcAlloca->user_end());
666 while (!srcUseList.empty()) {
667 User *U = srcUseList.pop_back_val();
669 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
670 for (User *UU : U->users())
671 srcUseList.push_back(UU);
674 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
675 if (!G->hasAllZeroIndices())
678 for (User *UU : U->users())
679 srcUseList.push_back(UU);
682 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
683 if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
684 IT->getIntrinsicID() == Intrinsic::lifetime_end)
687 if (U != C && U != cpy)
691 // Check that src isn't captured by the called function since the
692 // transformation can cause aliasing issues in that case.
693 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
694 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
697 // Since we're changing the parameter to the callsite, we need to make sure
698 // that what would be the new parameter dominates the callsite.
699 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
700 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
701 if (!DT.dominates(cpyDestInst, C))
704 // In addition to knowing that the call does not access src in some
705 // unexpected manner, for example via a global, which we deduce from
706 // the use analysis, we also need to know that it does not sneakily
707 // access dest. We rely on AA to figure this out for us.
708 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
709 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
710 // If necessary, perform additional analysis.
711 if (MR != MRI_NoModRef)
712 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
713 if (MR != MRI_NoModRef)
716 // All the checks have passed, so do the transformation.
717 bool changedArgument = false;
718 for (unsigned i = 0; i < CS.arg_size(); ++i)
719 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
720 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
721 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
722 cpyDest->getName(), C);
723 changedArgument = true;
724 if (CS.getArgument(i)->getType() == Dest->getType())
725 CS.setArgument(i, Dest);
727 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
728 CS.getArgument(i)->getType(), Dest->getName(), C));
731 if (!changedArgument)
734 // If the destination wasn't sufficiently aligned then increase its alignment.
735 if (!isDestSufficientlyAligned) {
736 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
737 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
740 // Drop any cached information about the call, because we may have changed
741 // its dependence information by changing its parameter.
742 MD->removeInstruction(C);
744 // Update AA metadata
745 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
746 // handled here, but combineMetadata doesn't support them yet
747 unsigned KnownIDs[] = {
748 LLVMContext::MD_tbaa,
749 LLVMContext::MD_alias_scope,
750 LLVMContext::MD_noalias,
752 combineMetadata(C, cpy, KnownIDs);
754 // Remove the memcpy.
755 MD->removeInstruction(cpy);
761 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
762 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
763 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) {
764 // We can only transforms memcpy's where the dest of one is the source of the
766 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
769 // If dep instruction is reading from our current input, then it is a noop
770 // transfer and substituting the input won't change this instruction. Just
771 // ignore the input and let someone else zap MDep. This handles cases like:
774 if (M->getSource() == MDep->getSource())
777 // Second, the length of the memcpy's must be the same, or the preceding one
778 // must be larger than the following one.
779 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
780 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
781 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
784 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
786 // Verify that the copied-from memory doesn't change in between the two
787 // transfers. For example, in:
791 // It would be invalid to transform the second memcpy into memcpy(c <- b).
793 // TODO: If the code between M and MDep is transparent to the destination "c",
794 // then we could still perform the xform by moving M up to the first memcpy.
796 // NOTE: This is conservative, it will stop on any read from the source loc,
797 // not just the defining memcpy.
798 MemDepResult SourceDep = MD->getPointerDependencyFrom(
799 MemoryLocation::getForSource(MDep), false, M, M->getParent());
800 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
803 // If the dest of the second might alias the source of the first, then the
804 // source and dest might overlap. We still want to eliminate the intermediate
805 // value, but we have to generate a memmove instead of memcpy.
806 bool UseMemMove = false;
807 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
808 MemoryLocation::getForSource(MDep)))
811 // If all checks passed, then we can transform M.
813 // Make sure to use the lesser of the alignment of the source and the dest
814 // since we're changing where we're reading from, but don't want to increase
815 // the alignment past what can be read from or written to.
816 // TODO: Is this worth it if we're creating a less aligned memcpy? For
817 // example we could be moving from movaps -> movq on x86.
818 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
820 IRBuilder<> Builder(M);
822 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
823 Align, M->isVolatile());
825 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
826 Align, M->isVolatile());
828 // Remove the instruction we're replacing.
829 MD->removeInstruction(M);
830 M->eraseFromParent();
835 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
836 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
837 /// weren't copied over by \p MemCpy.
839 /// In other words, transform:
841 /// memset(dst, c, dst_size);
842 /// memcpy(dst, src, src_size);
846 /// memcpy(dst, src, src_size);
847 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
849 bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
850 MemSetInst *MemSet) {
851 // We can only transform memset/memcpy with the same destination.
852 if (MemSet->getDest() != MemCpy->getDest())
855 // Check that there are no other dependencies on the memset destination.
856 MemDepResult DstDepInfo = MD->getPointerDependencyFrom(
857 MemoryLocation::getForDest(MemSet), false, MemCpy, MemCpy->getParent());
858 if (DstDepInfo.getInst() != MemSet)
861 // Use the same i8* dest as the memcpy, killing the memset dest if different.
862 Value *Dest = MemCpy->getRawDest();
863 Value *DestSize = MemSet->getLength();
864 Value *SrcSize = MemCpy->getLength();
866 // By default, create an unaligned memset.
868 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
870 const unsigned DestAlign =
871 std::max(MemSet->getAlignment(), MemCpy->getAlignment());
873 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
874 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
876 IRBuilder<> Builder(MemCpy);
878 // If the sizes have different types, zext the smaller one.
879 if (DestSize->getType() != SrcSize->getType()) {
880 if (DestSize->getType()->getIntegerBitWidth() >
881 SrcSize->getType()->getIntegerBitWidth())
882 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
884 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
888 Builder.CreateSelect(Builder.CreateICmpULE(DestSize, SrcSize),
889 ConstantInt::getNullValue(DestSize->getType()),
890 Builder.CreateSub(DestSize, SrcSize));
891 Builder.CreateMemSet(Builder.CreateGEP(Dest, SrcSize), MemSet->getOperand(1),
894 MD->removeInstruction(MemSet);
895 MemSet->eraseFromParent();
899 /// Transform memcpy to memset when its source was just memset.
900 /// In other words, turn:
902 /// memset(dst1, c, dst1_size);
903 /// memcpy(dst2, dst1, dst2_size);
907 /// memset(dst1, c, dst1_size);
908 /// memset(dst2, c, dst2_size);
910 /// When dst2_size <= dst1_size.
912 /// The \p MemCpy must have a Constant length.
913 bool MemCpyOpt::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
914 MemSetInst *MemSet) {
915 // This only makes sense on memcpy(..., memset(...), ...).
916 if (MemSet->getRawDest() != MemCpy->getRawSource())
919 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
920 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
921 // Make sure the memcpy doesn't read any more than what the memset wrote.
922 // Don't worry about sizes larger than i64.
923 if (!MemSetSize || CopySize->getZExtValue() > MemSetSize->getZExtValue())
926 IRBuilder<> Builder(MemCpy);
927 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
928 CopySize, MemCpy->getAlignment());
932 /// Perform simplification of memcpy's. If we have memcpy A
933 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
934 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
935 /// circumstances). This allows later passes to remove the first memcpy
937 bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
938 // We can only optimize non-volatile memcpy's.
939 if (M->isVolatile()) return false;
941 // If the source and destination of the memcpy are the same, then zap it.
942 if (M->getSource() == M->getDest()) {
943 MD->removeInstruction(M);
944 M->eraseFromParent();
948 // If copying from a constant, try to turn the memcpy into a memset.
949 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
950 if (GV->isConstant() && GV->hasDefinitiveInitializer())
951 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
952 IRBuilder<> Builder(M);
953 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
954 M->getAlignment(), false);
955 MD->removeInstruction(M);
956 M->eraseFromParent();
961 MemDepResult DepInfo = MD->getDependency(M);
963 // Try to turn a partially redundant memset + memcpy into
964 // memcpy + smaller memset. We don't need the memcpy size for this.
965 if (DepInfo.isClobber())
966 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
967 if (processMemSetMemCpyDependence(M, MDep))
970 // The optimizations after this point require the memcpy size.
971 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
972 if (!CopySize) return false;
974 // There are four possible optimizations we can do for memcpy:
975 // a) memcpy-memcpy xform which exposes redundance for DSE.
976 // b) call-memcpy xform for return slot optimization.
977 // c) memcpy from freshly alloca'd space or space that has just started its
978 // lifetime copies undefined data, and we can therefore eliminate the
979 // memcpy in favor of the data that was already at the destination.
980 // d) memcpy from a just-memset'd source can be turned into memset.
981 if (DepInfo.isClobber()) {
982 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
983 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
984 CopySize->getZExtValue(), M->getAlignment(),
986 MD->removeInstruction(M);
987 M->eraseFromParent();
993 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
994 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
997 if (SrcDepInfo.isClobber()) {
998 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
999 return processMemCpyMemCpyDependence(M, MDep);
1000 } else if (SrcDepInfo.isDef()) {
1001 Instruction *I = SrcDepInfo.getInst();
1002 bool hasUndefContents = false;
1004 if (isa<AllocaInst>(I)) {
1005 hasUndefContents = true;
1006 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1007 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1008 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1009 if (LTSize->getZExtValue() >= CopySize->getZExtValue())
1010 hasUndefContents = true;
1013 if (hasUndefContents) {
1014 MD->removeInstruction(M);
1015 M->eraseFromParent();
1021 if (SrcDepInfo.isClobber())
1022 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1023 if (performMemCpyToMemSetOptzn(M, MDep)) {
1024 MD->removeInstruction(M);
1025 M->eraseFromParent();
1033 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1035 bool MemCpyOpt::processMemMove(MemMoveInst *M) {
1036 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1038 if (!TLI->has(LibFunc::memmove))
1041 // See if the pointers alias.
1042 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1043 MemoryLocation::getForSource(M)))
1046 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
1048 // If not, then we know we can transform this.
1049 Module *Mod = M->getParent()->getParent()->getParent();
1050 Type *ArgTys[3] = { M->getRawDest()->getType(),
1051 M->getRawSource()->getType(),
1052 M->getLength()->getType() };
1053 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy,
1056 // MemDep may have over conservative information about this instruction, just
1057 // conservatively flush it from the cache.
1058 MD->removeInstruction(M);
1064 /// This is called on every byval argument in call sites.
1065 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
1066 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
1067 // Find out what feeds this byval argument.
1068 Value *ByValArg = CS.getArgument(ArgNo);
1069 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1070 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1071 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1072 MemoryLocation(ByValArg, ByValSize), true, CS.getInstruction(),
1073 CS.getInstruction()->getParent());
1074 if (!DepInfo.isClobber())
1077 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1078 // a memcpy, see if we can byval from the source of the memcpy instead of the
1080 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1081 if (!MDep || MDep->isVolatile() ||
1082 ByValArg->stripPointerCasts() != MDep->getDest())
1085 // The length of the memcpy must be larger or equal to the size of the byval.
1086 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1087 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1090 // Get the alignment of the byval. If the call doesn't specify the alignment,
1091 // then it is some target specific value that we can't know.
1092 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
1093 if (ByValAlign == 0) return false;
1095 // If it is greater than the memcpy, then we check to see if we can force the
1096 // source of the memcpy to the alignment we need. If we fail, we bail out.
1097 AssumptionCache &AC =
1098 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
1099 *CS->getParent()->getParent());
1100 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1101 if (MDep->getAlignment() < ByValAlign &&
1102 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
1103 CS.getInstruction(), &AC, &DT) < ByValAlign)
1106 // Verify that the copied-from memory doesn't change in between the memcpy and
1111 // It would be invalid to transform the second memcpy into foo(*b).
1113 // NOTE: This is conservative, it will stop on any read from the source loc,
1114 // not just the defining memcpy.
1115 MemDepResult SourceDep =
1116 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
1117 CS.getInstruction(), MDep->getParent());
1118 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1121 Value *TmpCast = MDep->getSource();
1122 if (MDep->getSource()->getType() != ByValArg->getType())
1123 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1124 "tmpcast", CS.getInstruction());
1126 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
1127 << " " << *MDep << "\n"
1128 << " " << *CS.getInstruction() << "\n");
1130 // Otherwise we're good! Update the byval argument.
1131 CS.setArgument(ArgNo, TmpCast);
1136 /// Executes one iteration of MemCpyOpt.
1137 bool MemCpyOpt::iterateOnFunction(Function &F) {
1138 bool MadeChange = false;
1140 // Walk all instruction in the function.
1141 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
1142 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
1143 // Avoid invalidating the iterator.
1144 Instruction *I = BI++;
1146 bool RepeatInstruction = false;
1148 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1149 MadeChange |= processStore(SI, BI);
1150 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1151 RepeatInstruction = processMemSet(M, BI);
1152 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1153 RepeatInstruction = processMemCpy(M);
1154 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1155 RepeatInstruction = processMemMove(M);
1156 else if (auto CS = CallSite(I)) {
1157 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
1158 if (CS.isByValArgument(i))
1159 MadeChange |= processByValArgument(CS, i);
1162 // Reprocess the instruction if desired.
1163 if (RepeatInstruction) {
1164 if (BI != BB->begin()) --BI;
1173 /// This is the main transformation entry point for a function.
1174 bool MemCpyOpt::runOnFunction(Function &F) {
1175 if (skipOptnoneFunction(F))
1178 bool MadeChange = false;
1179 MD = &getAnalysis<MemoryDependenceAnalysis>();
1180 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1182 // If we don't have at least memset and memcpy, there is little point of doing
1183 // anything here. These are required by a freestanding implementation, so if
1184 // even they are disabled, there is no point in trying hard.
1185 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
1189 if (!iterateOnFunction(F))