1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Scalar.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Dominators.h"
25 #include "llvm/IR/GetElementPtrTypeIterator.h"
26 #include "llvm/IR/GlobalVariable.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Transforms/Utils/Local.h"
36 #define DEBUG_TYPE "memcpyopt"
38 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
39 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
40 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
41 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
43 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
44 bool &VariableIdxFound,
45 const DataLayout &DL) {
46 // Skip over the first indices.
47 gep_type_iterator GTI = gep_type_begin(GEP);
48 for (unsigned i = 1; i != Idx; ++i, ++GTI)
51 // Compute the offset implied by the rest of the indices.
53 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
54 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
56 return VariableIdxFound = true;
57 if (OpC->isZero()) continue; // No offset.
59 // Handle struct indices, which add their field offset to the pointer.
60 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
61 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
65 // Otherwise, we have a sequential type like an array or vector. Multiply
66 // the index by the ElementSize.
67 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
68 Offset += Size*OpC->getSExtValue();
74 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
75 /// constant offset, and return that constant offset. For example, Ptr1 might
76 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
77 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
78 const DataLayout &DL) {
79 Ptr1 = Ptr1->stripPointerCasts();
80 Ptr2 = Ptr2->stripPointerCasts();
82 // Handle the trivial case first.
88 GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
89 GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
91 bool VariableIdxFound = false;
93 // If one pointer is a GEP and the other isn't, then see if the GEP is a
94 // constant offset from the base, as in "P" and "gep P, 1".
95 if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
96 Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
97 return !VariableIdxFound;
100 if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
101 Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
102 return !VariableIdxFound;
105 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
106 // base. After that base, they may have some number of common (and
107 // potentially variable) indices. After that they handle some constant
108 // offset, which determines their offset from each other. At this point, we
109 // handle no other case.
110 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
113 // Skip any common indices and track the GEP types.
115 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
116 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
119 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
120 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
121 if (VariableIdxFound) return false;
123 Offset = Offset2-Offset1;
128 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
129 /// This allows us to analyze stores like:
134 /// which sometimes happens with stores to arrays of structs etc. When we see
135 /// the first store, we make a range [1, 2). The second store extends the range
136 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
137 /// two ranges into [0, 3) which is memset'able.
140 // Start/End - A semi range that describes the span that this range covers.
141 // The range is closed at the start and open at the end: [Start, End).
144 /// StartPtr - The getelementptr instruction that points to the start of the
148 /// Alignment - The known alignment of the first store.
151 /// TheStores - The actual stores that make up this range.
152 SmallVector<Instruction*, 16> TheStores;
154 bool isProfitableToUseMemset(const DataLayout &DL) const;
156 } // end anon namespace
158 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
159 // If we found more than 4 stores to merge or 16 bytes, use memset.
160 if (TheStores.size() >= 4 || End-Start >= 16) return true;
162 // If there is nothing to merge, don't do anything.
163 if (TheStores.size() < 2) return false;
165 // If any of the stores are a memset, then it is always good to extend the
167 for (unsigned i = 0, e = TheStores.size(); i != e; ++i)
168 if (!isa<StoreInst>(TheStores[i]))
171 // Assume that the code generator is capable of merging pairs of stores
172 // together if it wants to.
173 if (TheStores.size() == 2) return false;
175 // If we have fewer than 8 stores, it can still be worthwhile to do this.
176 // For example, merging 4 i8 stores into an i32 store is useful almost always.
177 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
178 // memset will be split into 2 32-bit stores anyway) and doing so can
179 // pessimize the llvm optimizer.
181 // Since we don't have perfect knowledge here, make some assumptions: assume
182 // the maximum GPR width is the same size as the largest legal integer
183 // size. If so, check to see whether we will end up actually reducing the
184 // number of stores used.
185 unsigned Bytes = unsigned(End-Start);
186 unsigned MaxIntSize = DL.getLargestLegalIntTypeSize();
189 unsigned NumPointerStores = Bytes / MaxIntSize;
191 // Assume the remaining bytes if any are done a byte at a time.
192 unsigned NumByteStores = Bytes - NumPointerStores * MaxIntSize;
194 // If we will reduce the # stores (according to this heuristic), do the
195 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
197 return TheStores.size() > NumPointerStores+NumByteStores;
203 /// Ranges - A sorted list of the memset ranges. We use std::list here
204 /// because each element is relatively large and expensive to copy.
205 std::list<MemsetRange> Ranges;
206 typedef std::list<MemsetRange>::iterator range_iterator;
207 const DataLayout &DL;
209 MemsetRanges(const DataLayout &DL) : DL(DL) {}
211 typedef std::list<MemsetRange>::const_iterator const_iterator;
212 const_iterator begin() const { return Ranges.begin(); }
213 const_iterator end() const { return Ranges.end(); }
214 bool empty() const { return Ranges.empty(); }
216 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
217 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
218 addStore(OffsetFromFirst, SI);
220 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
223 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
224 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
226 addRange(OffsetFromFirst, StoreSize,
227 SI->getPointerOperand(), SI->getAlignment(), SI);
230 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
231 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
232 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
235 void addRange(int64_t Start, int64_t Size, Value *Ptr,
236 unsigned Alignment, Instruction *Inst);
240 } // end anon namespace
243 /// addRange - Add a new store to the MemsetRanges data structure. This adds a
244 /// new range for the specified store at the specified offset, merging into
245 /// existing ranges as appropriate.
247 /// Do a linear search of the ranges to see if this can be joined and/or to
248 /// find the insertion point in the list. We keep the ranges sorted for
249 /// simplicity here. This is a linear search of a linked list, which is ugly,
250 /// however the number of ranges is limited, so this won't get crazy slow.
251 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
252 unsigned Alignment, Instruction *Inst) {
253 int64_t End = Start+Size;
254 range_iterator I = Ranges.begin(), E = Ranges.end();
256 while (I != E && Start > I->End)
259 // We now know that I == E, in which case we didn't find anything to merge
260 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
261 // to insert a new range. Handle this now.
262 if (I == E || End < I->Start) {
263 MemsetRange &R = *Ranges.insert(I, MemsetRange());
267 R.Alignment = Alignment;
268 R.TheStores.push_back(Inst);
272 // This store overlaps with I, add it.
273 I->TheStores.push_back(Inst);
275 // At this point, we may have an interval that completely contains our store.
276 // If so, just add it to the interval and return.
277 if (I->Start <= Start && I->End >= End)
280 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
281 // but is not entirely contained within the range.
283 // See if the range extends the start of the range. In this case, it couldn't
284 // possibly cause it to join the prior range, because otherwise we would have
286 if (Start < I->Start) {
289 I->Alignment = Alignment;
292 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
293 // is in or right at the end of I), and that End >= I->Start. Extend I out to
297 range_iterator NextI = I;
298 while (++NextI != E && End >= NextI->Start) {
299 // Merge the range in.
300 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
301 if (NextI->End > I->End)
309 //===----------------------------------------------------------------------===//
311 //===----------------------------------------------------------------------===//
314 class MemCpyOpt : public FunctionPass {
315 MemoryDependenceAnalysis *MD;
316 TargetLibraryInfo *TLI;
318 static char ID; // Pass identification, replacement for typeid
319 MemCpyOpt() : FunctionPass(ID) {
320 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
325 bool runOnFunction(Function &F) override;
328 // This transformation requires dominator postdominator info
329 void getAnalysisUsage(AnalysisUsage &AU) const override {
330 AU.setPreservesCFG();
331 AU.addRequired<AssumptionCacheTracker>();
332 AU.addRequired<DominatorTreeWrapperPass>();
333 AU.addRequired<MemoryDependenceAnalysis>();
334 AU.addRequired<AliasAnalysis>();
335 AU.addRequired<TargetLibraryInfoWrapperPass>();
336 AU.addPreserved<AliasAnalysis>();
337 AU.addPreserved<MemoryDependenceAnalysis>();
341 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
342 bool processMemSet(MemSetInst *SI, BasicBlock::iterator &BBI);
343 bool processMemCpy(MemCpyInst *M);
344 bool processMemMove(MemMoveInst *M);
345 bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc,
346 uint64_t cpyLen, unsigned cpyAlign, CallInst *C);
347 bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
349 bool processByValArgument(CallSite CS, unsigned ArgNo);
350 Instruction *tryMergingIntoMemset(Instruction *I, Value *StartPtr,
353 bool iterateOnFunction(Function &F);
356 char MemCpyOpt::ID = 0;
359 // createMemCpyOptPass - The public interface to this file...
360 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
362 INITIALIZE_PASS_BEGIN(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
364 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
365 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
366 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
367 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
368 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
369 INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
372 /// tryMergingIntoMemset - When scanning forward over instructions, we look for
373 /// some other patterns to fold away. In particular, this looks for stores to
374 /// neighboring locations of memory. If it sees enough consecutive ones, it
375 /// attempts to merge them together into a memcpy/memset.
376 Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
377 Value *StartPtr, Value *ByteVal) {
378 const DataLayout &DL = StartInst->getModule()->getDataLayout();
380 // Okay, so we now have a single store that can be splatable. Scan to find
381 // all subsequent stores of the same value to offset from the same pointer.
382 // Join these together into ranges, so we can decide whether contiguous blocks
384 MemsetRanges Ranges(DL);
386 BasicBlock::iterator BI = StartInst;
387 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
388 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
389 // If the instruction is readnone, ignore it, otherwise bail out. We
390 // don't even allow readonly here because we don't want something like:
391 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
392 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
397 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
398 // If this is a store, see if we can merge it in.
399 if (!NextStore->isSimple()) break;
401 // Check to see if this stored value is of the same byte-splattable value.
402 if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
405 // Check to see if this store is to a constant offset from the start ptr.
407 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
411 Ranges.addStore(Offset, NextStore);
413 MemSetInst *MSI = cast<MemSetInst>(BI);
415 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
416 !isa<ConstantInt>(MSI->getLength()))
419 // Check to see if this store is to a constant offset from the start ptr.
421 if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
424 Ranges.addMemSet(Offset, MSI);
428 // If we have no ranges, then we just had a single store with nothing that
429 // could be merged in. This is a very common case of course.
433 // If we had at least one store that could be merged in, add the starting
434 // store as well. We try to avoid this unless there is at least something
435 // interesting as a small compile-time optimization.
436 Ranges.addInst(0, StartInst);
438 // If we create any memsets, we put it right before the first instruction that
439 // isn't part of the memset block. This ensure that the memset is dominated
440 // by any addressing instruction needed by the start of the block.
441 IRBuilder<> Builder(BI);
443 // Now that we have full information about ranges, loop over the ranges and
444 // emit memset's for anything big enough to be worthwhile.
445 Instruction *AMemSet = nullptr;
446 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
448 const MemsetRange &Range = *I;
450 if (Range.TheStores.size() == 1) continue;
452 // If it is profitable to lower this range to memset, do so now.
453 if (!Range.isProfitableToUseMemset(DL))
456 // Otherwise, we do want to transform this! Create a new memset.
457 // Get the starting pointer of the block.
458 StartPtr = Range.StartPtr;
460 // Determine alignment
461 unsigned Alignment = Range.Alignment;
462 if (Alignment == 0) {
464 cast<PointerType>(StartPtr->getType())->getElementType();
465 Alignment = DL.getABITypeAlignment(EltType);
469 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
471 DEBUG(dbgs() << "Replace stores:\n";
472 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
473 dbgs() << *Range.TheStores[i] << '\n';
474 dbgs() << "With: " << *AMemSet << '\n');
476 if (!Range.TheStores.empty())
477 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
479 // Zap all the stores.
480 for (SmallVectorImpl<Instruction *>::const_iterator
481 SI = Range.TheStores.begin(),
482 SE = Range.TheStores.end(); SI != SE; ++SI) {
483 MD->removeInstruction(*SI);
484 (*SI)->eraseFromParent();
493 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
494 if (!SI->isSimple()) return false;
495 const DataLayout &DL = SI->getModule()->getDataLayout();
497 // Detect cases where we're performing call slot forwarding, but
498 // happen to be using a load-store pair to implement it, rather than
500 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
501 if (LI->isSimple() && LI->hasOneUse() &&
502 LI->getParent() == SI->getParent()) {
503 MemDepResult ldep = MD->getDependency(LI);
504 CallInst *C = nullptr;
505 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
506 C = dyn_cast<CallInst>(ldep.getInst());
509 // Check that nothing touches the dest of the "copy" between
510 // the call and the store.
511 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
512 AliasAnalysis::Location StoreLoc = AA.getLocation(SI);
513 for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
514 E = C; I != E; --I) {
515 if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
523 unsigned storeAlign = SI->getAlignment();
525 storeAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
526 unsigned loadAlign = LI->getAlignment();
528 loadAlign = DL.getABITypeAlignment(LI->getType());
530 bool changed = performCallSlotOptzn(
531 LI, SI->getPointerOperand()->stripPointerCasts(),
532 LI->getPointerOperand()->stripPointerCasts(),
533 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
534 std::min(storeAlign, loadAlign), C);
536 MD->removeInstruction(SI);
537 SI->eraseFromParent();
538 MD->removeInstruction(LI);
539 LI->eraseFromParent();
547 // There are two cases that are interesting for this code to handle: memcpy
548 // and memset. Right now we only handle memset.
550 // Ensure that the value being stored is something that can be memset'able a
551 // byte at a time like "0" or "-1" or any width, as well as things like
552 // 0xA0A0A0A0 and 0.0.
553 if (Value *ByteVal = isBytewiseValue(SI->getOperand(0)))
554 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
556 BBI = I; // Don't invalidate iterator.
563 bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
564 // See if there is another memset or store neighboring this memset which
565 // allows us to widen out the memset to do a single larger store.
566 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
567 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
569 BBI = I; // Don't invalidate iterator.
576 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
577 /// and checks for the possibility of a call slot optimization by having
578 /// the call write its result directly into the destination of the memcpy.
579 bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
580 Value *cpyDest, Value *cpySrc,
581 uint64_t cpyLen, unsigned cpyAlign,
583 // The general transformation to keep in mind is
585 // call @func(..., src, ...)
586 // memcpy(dest, src, ...)
590 // memcpy(dest, src, ...)
591 // call @func(..., dest, ...)
593 // Since moving the memcpy is technically awkward, we additionally check that
594 // src only holds uninitialized values at the moment of the call, meaning that
595 // the memcpy can be discarded rather than moved.
597 // Deliberately get the source and destination with bitcasts stripped away,
598 // because we'll need to do type comparisons based on the underlying type.
601 // Require that src be an alloca. This simplifies the reasoning considerably.
602 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
606 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
610 const DataLayout &DL = cpy->getModule()->getDataLayout();
611 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
612 srcArraySize->getZExtValue();
614 if (cpyLen < srcSize)
617 // Check that accessing the first srcSize bytes of dest will not cause a
618 // trap. Otherwise the transform is invalid since it might cause a trap
619 // to occur earlier than it otherwise would.
620 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
621 // The destination is an alloca. Check it is larger than srcSize.
622 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
626 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
627 destArraySize->getZExtValue();
629 if (destSize < srcSize)
631 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
632 if (A->getDereferenceableBytes() < srcSize) {
633 // If the destination is an sret parameter then only accesses that are
634 // outside of the returned struct type can trap.
635 if (!A->hasStructRetAttr())
638 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
639 if (!StructTy->isSized()) {
640 // The call may never return and hence the copy-instruction may never
641 // be executed, and therefore it's not safe to say "the destination
642 // has at least <cpyLen> bytes, as implied by the copy-instruction",
646 uint64_t destSize = DL.getTypeAllocSize(StructTy);
647 if (destSize < srcSize)
654 // Check that dest points to memory that is at least as aligned as src.
655 unsigned srcAlign = srcAlloca->getAlignment();
657 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
658 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
659 // If dest is not aligned enough and we can't increase its alignment then
661 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
664 // Check that src is not accessed except via the call and the memcpy. This
665 // guarantees that it holds only undefined values when passed in (so the final
666 // memcpy can be dropped), that it is not read or written between the call and
667 // the memcpy, and that writing beyond the end of it is undefined.
668 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
669 srcAlloca->user_end());
670 while (!srcUseList.empty()) {
671 User *U = srcUseList.pop_back_val();
673 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
674 for (User *UU : U->users())
675 srcUseList.push_back(UU);
678 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
679 if (!G->hasAllZeroIndices())
682 for (User *UU : U->users())
683 srcUseList.push_back(UU);
686 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
687 if (IT->getIntrinsicID() == Intrinsic::lifetime_start ||
688 IT->getIntrinsicID() == Intrinsic::lifetime_end)
691 if (U != C && U != cpy)
695 // Check that src isn't captured by the called function since the
696 // transformation can cause aliasing issues in that case.
697 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
698 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
701 // Since we're changing the parameter to the callsite, we need to make sure
702 // that what would be the new parameter dominates the callsite.
703 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
704 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
705 if (!DT.dominates(cpyDestInst, C))
708 // In addition to knowing that the call does not access src in some
709 // unexpected manner, for example via a global, which we deduce from
710 // the use analysis, we also need to know that it does not sneakily
711 // access dest. We rely on AA to figure this out for us.
712 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
713 AliasAnalysis::ModRefResult MR = AA.getModRefInfo(C, cpyDest, srcSize);
714 // If necessary, perform additional analysis.
715 if (MR != AliasAnalysis::NoModRef)
716 MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
717 if (MR != AliasAnalysis::NoModRef)
720 // All the checks have passed, so do the transformation.
721 bool changedArgument = false;
722 for (unsigned i = 0; i < CS.arg_size(); ++i)
723 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
724 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
725 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
726 cpyDest->getName(), C);
727 changedArgument = true;
728 if (CS.getArgument(i)->getType() == Dest->getType())
729 CS.setArgument(i, Dest);
731 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
732 CS.getArgument(i)->getType(), Dest->getName(), C));
735 if (!changedArgument)
738 // If the destination wasn't sufficiently aligned then increase its alignment.
739 if (!isDestSufficientlyAligned) {
740 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
741 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
744 // Drop any cached information about the call, because we may have changed
745 // its dependence information by changing its parameter.
746 MD->removeInstruction(C);
748 // Update AA metadata
749 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
750 // handled here, but combineMetadata doesn't support them yet
751 unsigned KnownIDs[] = {
752 LLVMContext::MD_tbaa,
753 LLVMContext::MD_alias_scope,
754 LLVMContext::MD_noalias,
756 combineMetadata(C, cpy, KnownIDs);
758 // Remove the memcpy.
759 MD->removeInstruction(cpy);
765 /// processMemCpyMemCpyDependence - We've found that the (upward scanning)
766 /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
767 /// copy from MDep's input if we can. MSize is the size of M's copy.
769 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep,
771 // We can only transforms memcpy's where the dest of one is the source of the
773 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
776 // If dep instruction is reading from our current input, then it is a noop
777 // transfer and substituting the input won't change this instruction. Just
778 // ignore the input and let someone else zap MDep. This handles cases like:
781 if (M->getSource() == MDep->getSource())
784 // Second, the length of the memcpy's must be the same, or the preceding one
785 // must be larger than the following one.
786 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
787 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
788 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
791 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
793 // Verify that the copied-from memory doesn't change in between the two
794 // transfers. For example, in:
798 // It would be invalid to transform the second memcpy into memcpy(c <- b).
800 // TODO: If the code between M and MDep is transparent to the destination "c",
801 // then we could still perform the xform by moving M up to the first memcpy.
803 // NOTE: This is conservative, it will stop on any read from the source loc,
804 // not just the defining memcpy.
805 MemDepResult SourceDep =
806 MD->getPointerDependencyFrom(AA.getLocationForSource(MDep),
807 false, M, M->getParent());
808 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
811 // If the dest of the second might alias the source of the first, then the
812 // source and dest might overlap. We still want to eliminate the intermediate
813 // value, but we have to generate a memmove instead of memcpy.
814 bool UseMemMove = false;
815 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(MDep)))
818 // If all checks passed, then we can transform M.
820 // Make sure to use the lesser of the alignment of the source and the dest
821 // since we're changing where we're reading from, but don't want to increase
822 // the alignment past what can be read from or written to.
823 // TODO: Is this worth it if we're creating a less aligned memcpy? For
824 // example we could be moving from movaps -> movq on x86.
825 unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
827 IRBuilder<> Builder(M);
829 Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
830 Align, M->isVolatile());
832 Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
833 Align, M->isVolatile());
835 // Remove the instruction we're replacing.
836 MD->removeInstruction(M);
837 M->eraseFromParent();
843 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A
844 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
845 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
846 /// circumstances). This allows later passes to remove the first memcpy
848 bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
849 // We can only optimize non-volatile memcpy's.
850 if (M->isVolatile()) return false;
852 // If the source and destination of the memcpy are the same, then zap it.
853 if (M->getSource() == M->getDest()) {
854 MD->removeInstruction(M);
855 M->eraseFromParent();
859 // If copying from a constant, try to turn the memcpy into a memset.
860 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
861 if (GV->isConstant() && GV->hasDefinitiveInitializer())
862 if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
863 IRBuilder<> Builder(M);
864 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
865 M->getAlignment(), false);
866 MD->removeInstruction(M);
867 M->eraseFromParent();
872 // The optimizations after this point require the memcpy size.
873 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
874 if (!CopySize) return false;
876 // The are three possible optimizations we can do for memcpy:
877 // a) memcpy-memcpy xform which exposes redundance for DSE.
878 // b) call-memcpy xform for return slot optimization.
879 // c) memcpy from freshly alloca'd space or space that has just started its
880 // lifetime copies undefined data, and we can therefore eliminate the
881 // memcpy in favor of the data that was already at the destination.
882 MemDepResult DepInfo = MD->getDependency(M);
883 if (DepInfo.isClobber()) {
884 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
885 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
886 CopySize->getZExtValue(), M->getAlignment(),
888 MD->removeInstruction(M);
889 M->eraseFromParent();
895 AliasAnalysis::Location SrcLoc = AliasAnalysis::getLocationForSource(M);
896 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true,
898 if (SrcDepInfo.isClobber()) {
899 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
900 return processMemCpyMemCpyDependence(M, MDep, CopySize->getZExtValue());
901 } else if (SrcDepInfo.isDef()) {
902 Instruction *I = SrcDepInfo.getInst();
903 bool hasUndefContents = false;
905 if (isa<AllocaInst>(I)) {
906 hasUndefContents = true;
907 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
908 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
909 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
910 if (LTSize->getZExtValue() >= CopySize->getZExtValue())
911 hasUndefContents = true;
914 if (hasUndefContents) {
915 MD->removeInstruction(M);
916 M->eraseFromParent();
925 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
926 /// are guaranteed not to alias.
927 bool MemCpyOpt::processMemMove(MemMoveInst *M) {
928 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
930 if (!TLI->has(LibFunc::memmove))
933 // See if the pointers alias.
934 if (!AA.isNoAlias(AA.getLocationForDest(M), AA.getLocationForSource(M)))
937 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
939 // If not, then we know we can transform this.
940 Module *Mod = M->getParent()->getParent()->getParent();
941 Type *ArgTys[3] = { M->getRawDest()->getType(),
942 M->getRawSource()->getType(),
943 M->getLength()->getType() };
944 M->setCalledFunction(Intrinsic::getDeclaration(Mod, Intrinsic::memcpy,
947 // MemDep may have over conservative information about this instruction, just
948 // conservatively flush it from the cache.
949 MD->removeInstruction(M);
955 /// processByValArgument - This is called on every byval argument in call sites.
956 bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
957 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
958 // Find out what feeds this byval argument.
959 Value *ByValArg = CS.getArgument(ArgNo);
960 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
961 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
962 MemDepResult DepInfo =
963 MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
964 true, CS.getInstruction(),
965 CS.getInstruction()->getParent());
966 if (!DepInfo.isClobber())
969 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
970 // a memcpy, see if we can byval from the source of the memcpy instead of the
972 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
973 if (!MDep || MDep->isVolatile() ||
974 ByValArg->stripPointerCasts() != MDep->getDest())
977 // The length of the memcpy must be larger or equal to the size of the byval.
978 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
979 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
982 // Get the alignment of the byval. If the call doesn't specify the alignment,
983 // then it is some target specific value that we can't know.
984 unsigned ByValAlign = CS.getParamAlignment(ArgNo+1);
985 if (ByValAlign == 0) return false;
987 // If it is greater than the memcpy, then we check to see if we can force the
988 // source of the memcpy to the alignment we need. If we fail, we bail out.
989 AssumptionCache &AC =
990 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
991 *CS->getParent()->getParent());
992 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
993 if (MDep->getAlignment() < ByValAlign &&
994 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
995 CS.getInstruction(), &AC, &DT) < ByValAlign)
998 // Verify that the copied-from memory doesn't change in between the memcpy and
1003 // It would be invalid to transform the second memcpy into foo(*b).
1005 // NOTE: This is conservative, it will stop on any read from the source loc,
1006 // not just the defining memcpy.
1007 MemDepResult SourceDep =
1008 MD->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep),
1009 false, CS.getInstruction(), MDep->getParent());
1010 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1013 Value *TmpCast = MDep->getSource();
1014 if (MDep->getSource()->getType() != ByValArg->getType())
1015 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1016 "tmpcast", CS.getInstruction());
1018 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
1019 << " " << *MDep << "\n"
1020 << " " << *CS.getInstruction() << "\n");
1022 // Otherwise we're good! Update the byval argument.
1023 CS.setArgument(ArgNo, TmpCast);
1028 /// iterateOnFunction - Executes one iteration of MemCpyOpt.
1029 bool MemCpyOpt::iterateOnFunction(Function &F) {
1030 bool MadeChange = false;
1032 // Walk all instruction in the function.
1033 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
1034 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) {
1035 // Avoid invalidating the iterator.
1036 Instruction *I = BI++;
1038 bool RepeatInstruction = false;
1040 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1041 MadeChange |= processStore(SI, BI);
1042 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1043 RepeatInstruction = processMemSet(M, BI);
1044 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1045 RepeatInstruction = processMemCpy(M);
1046 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1047 RepeatInstruction = processMemMove(M);
1048 else if (CallSite CS = (Value*)I) {
1049 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
1050 if (CS.isByValArgument(i))
1051 MadeChange |= processByValArgument(CS, i);
1054 // Reprocess the instruction if desired.
1055 if (RepeatInstruction) {
1056 if (BI != BB->begin()) --BI;
1065 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a
1068 bool MemCpyOpt::runOnFunction(Function &F) {
1069 if (skipOptnoneFunction(F))
1072 bool MadeChange = false;
1073 MD = &getAnalysis<MemoryDependenceAnalysis>();
1074 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1076 // If we don't have at least memset and memcpy, there is little point of doing
1077 // anything here. These are required by a freestanding implementation, so if
1078 // even they are disabled, there is no point in trying hard.
1079 if (!TLI->has(LibFunc::memset) || !TLI->has(LibFunc::memcpy))
1083 if (!iterateOnFunction(F))