1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass munges the code in the input function to better prepare it for
11 // SelectionDAG-based code generation. This works around limitations in it's
12 // basic-block-at-a-time approach. It should eventually be removed.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "codegenprepare"
17 #include "llvm/CodeGen/Passes.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/InstructionSimplify.h"
22 #include "llvm/IR/CallSite.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Dominators.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GetElementPtrTypeIterator.h"
29 #include "llvm/IR/IRBuilder.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/PatternMatch.h"
34 #include "llvm/IR/ValueHandle.h"
35 #include "llvm/IR/ValueMap.h"
36 #include "llvm/Pass.h"
37 #include "llvm/Support/CommandLine.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Target/TargetLibraryInfo.h"
41 #include "llvm/Target/TargetLowering.h"
42 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
43 #include "llvm/Transforms/Utils/BuildLibCalls.h"
44 #include "llvm/Transforms/Utils/BypassSlowDivision.h"
45 #include "llvm/Transforms/Utils/Local.h"
47 using namespace llvm::PatternMatch;
49 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
50 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
51 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
52 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
54 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
56 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
57 "computations were sunk");
58 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
59 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
60 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
61 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
62 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
64 static cl::opt<bool> DisableBranchOpts(
65 "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
66 cl::desc("Disable branch optimizations in CodeGenPrepare"));
68 static cl::opt<bool> DisableSelectToBranch(
69 "disable-cgp-select2branch", cl::Hidden, cl::init(false),
70 cl::desc("Disable select to branch conversion."));
73 typedef SmallPtrSet<Instruction *, 16> SetOfInstrs;
74 typedef DenseMap<Instruction *, Type *> InstrToOrigTy;
76 class CodeGenPrepare : public FunctionPass {
77 /// TLI - Keep a pointer of a TargetLowering to consult for determining
78 /// transformation profitability.
79 const TargetMachine *TM;
80 const TargetLowering *TLI;
81 const TargetLibraryInfo *TLInfo;
84 /// CurInstIterator - As we scan instructions optimizing them, this is the
85 /// next instruction to optimize. Xforms that can invalidate this should
87 BasicBlock::iterator CurInstIterator;
89 /// Keeps track of non-local addresses that have been sunk into a block.
90 /// This allows us to avoid inserting duplicate code for blocks with
91 /// multiple load/stores of the same address.
92 ValueMap<Value*, Value*> SunkAddrs;
94 /// Keeps track of all truncates inserted for the current function.
95 SetOfInstrs InsertedTruncsSet;
96 /// Keeps track of the type of the related instruction before their
97 /// promotion for the current function.
98 InstrToOrigTy PromotedInsts;
100 /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to
104 /// OptSize - True if optimizing for size.
108 static char ID; // Pass identification, replacement for typeid
109 explicit CodeGenPrepare(const TargetMachine *TM = 0)
110 : FunctionPass(ID), TM(TM), TLI(0) {
111 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
113 bool runOnFunction(Function &F) override;
115 const char *getPassName() const override { return "CodeGen Prepare"; }
117 void getAnalysisUsage(AnalysisUsage &AU) const override {
118 AU.addPreserved<DominatorTreeWrapperPass>();
119 AU.addRequired<TargetLibraryInfo>();
123 bool EliminateFallThrough(Function &F);
124 bool EliminateMostlyEmptyBlocks(Function &F);
125 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
126 void EliminateMostlyEmptyBlock(BasicBlock *BB);
127 bool OptimizeBlock(BasicBlock &BB);
128 bool OptimizeInst(Instruction *I);
129 bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy);
130 bool OptimizeInlineAsmInst(CallInst *CS);
131 bool OptimizeCallInst(CallInst *CI);
132 bool SinkExtExpand(CastInst *I);
133 bool MoveExtToFormExtLoad(Instruction *I);
134 bool OptimizeExtUses(Instruction *I);
135 bool OptimizeSelectInst(SelectInst *SI);
136 bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI);
137 bool DupRetToEnableTailCallOpts(BasicBlock *BB);
138 bool PlaceDbgValues(Function &F);
142 char CodeGenPrepare::ID = 0;
143 static void *initializeCodeGenPreparePassOnce(PassRegistry &Registry) {
144 initializeTargetLibraryInfoPass(Registry);
145 PassInfo *PI = new PassInfo(
146 "Optimize for code generation", "codegenprepare", &CodeGenPrepare::ID,
147 PassInfo::NormalCtor_t(callDefaultCtor<CodeGenPrepare>), false, false,
148 PassInfo::TargetMachineCtor_t(callTargetMachineCtor<CodeGenPrepare>));
149 Registry.registerPass(*PI, true);
153 void llvm::initializeCodeGenPreparePass(PassRegistry &Registry) {
154 CALL_ONCE_INITIALIZATION(initializeCodeGenPreparePassOnce)
157 FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) {
158 return new CodeGenPrepare(TM);
161 bool CodeGenPrepare::runOnFunction(Function &F) {
162 bool EverMadeChange = false;
163 // Clear per function information.
164 InsertedTruncsSet.clear();
165 PromotedInsts.clear();
168 if (TM) TLI = TM->getTargetLowering();
169 TLInfo = &getAnalysis<TargetLibraryInfo>();
170 DominatorTreeWrapperPass *DTWP =
171 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
172 DT = DTWP ? &DTWP->getDomTree() : 0;
173 OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
174 Attribute::OptimizeForSize);
176 /// This optimization identifies DIV instructions that can be
177 /// profitably bypassed and carried out with a shorter, faster divide.
178 if (!OptSize && TLI && TLI->isSlowDivBypassed()) {
179 const DenseMap<unsigned int, unsigned int> &BypassWidths =
180 TLI->getBypassSlowDivWidths();
181 for (Function::iterator I = F.begin(); I != F.end(); I++)
182 EverMadeChange |= bypassSlowDivision(F, I, BypassWidths);
185 // Eliminate blocks that contain only PHI nodes and an
186 // unconditional branch.
187 EverMadeChange |= EliminateMostlyEmptyBlocks(F);
189 // llvm.dbg.value is far away from the value then iSel may not be able
190 // handle it properly. iSel will drop llvm.dbg.value if it can not
191 // find a node corresponding to the value.
192 EverMadeChange |= PlaceDbgValues(F);
194 bool MadeChange = true;
197 for (Function::iterator I = F.begin(); I != F.end(); ) {
198 BasicBlock *BB = I++;
199 MadeChange |= OptimizeBlock(*BB);
201 EverMadeChange |= MadeChange;
206 if (!DisableBranchOpts) {
208 SmallPtrSet<BasicBlock*, 8> WorkList;
209 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
210 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
211 MadeChange |= ConstantFoldTerminator(BB, true);
212 if (!MadeChange) continue;
214 for (SmallVectorImpl<BasicBlock*>::iterator
215 II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
216 if (pred_begin(*II) == pred_end(*II))
217 WorkList.insert(*II);
220 // Delete the dead blocks and any of their dead successors.
221 MadeChange |= !WorkList.empty();
222 while (!WorkList.empty()) {
223 BasicBlock *BB = *WorkList.begin();
225 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
229 for (SmallVectorImpl<BasicBlock*>::iterator
230 II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
231 if (pred_begin(*II) == pred_end(*II))
232 WorkList.insert(*II);
235 // Merge pairs of basic blocks with unconditional branches, connected by
237 if (EverMadeChange || MadeChange)
238 MadeChange |= EliminateFallThrough(F);
242 EverMadeChange |= MadeChange;
245 if (ModifiedDT && DT)
248 return EverMadeChange;
251 /// EliminateFallThrough - Merge basic blocks which are connected
252 /// by a single edge, where one of the basic blocks has a single successor
253 /// pointing to the other basic block, which has a single predecessor.
254 bool CodeGenPrepare::EliminateFallThrough(Function &F) {
255 bool Changed = false;
256 // Scan all of the blocks in the function, except for the entry block.
257 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
258 BasicBlock *BB = I++;
259 // If the destination block has a single pred, then this is a trivial
260 // edge, just collapse it.
261 BasicBlock *SinglePred = BB->getSinglePredecessor();
263 // Don't merge if BB's address is taken.
264 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
266 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
267 if (Term && !Term->isConditional()) {
269 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n");
270 // Remember if SinglePred was the entry block of the function.
271 // If so, we will need to move BB back to the entry position.
272 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
273 MergeBasicBlockIntoOnlyPred(BB, this);
275 if (isEntry && BB != &BB->getParent()->getEntryBlock())
276 BB->moveBefore(&BB->getParent()->getEntryBlock());
278 // We have erased a block. Update the iterator.
285 /// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes,
286 /// debug info directives, and an unconditional branch. Passes before isel
287 /// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for
288 /// isel. Start by eliminating these blocks so we can split them the way we
290 bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
291 bool MadeChange = false;
292 // Note that this intentionally skips the entry block.
293 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
294 BasicBlock *BB = I++;
296 // If this block doesn't end with an uncond branch, ignore it.
297 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
298 if (!BI || !BI->isUnconditional())
301 // If the instruction before the branch (skipping debug info) isn't a phi
302 // node, then other stuff is happening here.
303 BasicBlock::iterator BBI = BI;
304 if (BBI != BB->begin()) {
306 while (isa<DbgInfoIntrinsic>(BBI)) {
307 if (BBI == BB->begin())
311 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
315 // Do not break infinite loops.
316 BasicBlock *DestBB = BI->getSuccessor(0);
320 if (!CanMergeBlocks(BB, DestBB))
323 EliminateMostlyEmptyBlock(BB);
329 /// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
330 /// single uncond branch between them, and BB contains no other non-phi
332 bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
333 const BasicBlock *DestBB) const {
334 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
335 // the successor. If there are more complex condition (e.g. preheaders),
336 // don't mess around with them.
337 BasicBlock::const_iterator BBI = BB->begin();
338 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
339 for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
341 const Instruction *User = cast<Instruction>(*UI);
342 if (User->getParent() != DestBB || !isa<PHINode>(User))
344 // If User is inside DestBB block and it is a PHINode then check
345 // incoming value. If incoming value is not from BB then this is
346 // a complex condition (e.g. preheaders) we want to avoid here.
347 if (User->getParent() == DestBB) {
348 if (const PHINode *UPN = dyn_cast<PHINode>(User))
349 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
350 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
351 if (Insn && Insn->getParent() == BB &&
352 Insn->getParent() != UPN->getIncomingBlock(I))
359 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
360 // and DestBB may have conflicting incoming values for the block. If so, we
361 // can't merge the block.
362 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
363 if (!DestBBPN) return true; // no conflict.
365 // Collect the preds of BB.
366 SmallPtrSet<const BasicBlock*, 16> BBPreds;
367 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
368 // It is faster to get preds from a PHI than with pred_iterator.
369 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
370 BBPreds.insert(BBPN->getIncomingBlock(i));
372 BBPreds.insert(pred_begin(BB), pred_end(BB));
375 // Walk the preds of DestBB.
376 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
377 BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
378 if (BBPreds.count(Pred)) { // Common predecessor?
379 BBI = DestBB->begin();
380 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
381 const Value *V1 = PN->getIncomingValueForBlock(Pred);
382 const Value *V2 = PN->getIncomingValueForBlock(BB);
384 // If V2 is a phi node in BB, look up what the mapped value will be.
385 if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
386 if (V2PN->getParent() == BB)
387 V2 = V2PN->getIncomingValueForBlock(Pred);
389 // If there is a conflict, bail out.
390 if (V1 != V2) return false;
399 /// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and
400 /// an unconditional branch in it.
401 void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
402 BranchInst *BI = cast<BranchInst>(BB->getTerminator());
403 BasicBlock *DestBB = BI->getSuccessor(0);
405 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB);
407 // If the destination block has a single pred, then this is a trivial edge,
409 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
410 if (SinglePred != DestBB) {
411 // Remember if SinglePred was the entry block of the function. If so, we
412 // will need to move BB back to the entry position.
413 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
414 MergeBasicBlockIntoOnlyPred(DestBB, this);
416 if (isEntry && BB != &BB->getParent()->getEntryBlock())
417 BB->moveBefore(&BB->getParent()->getEntryBlock());
419 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
424 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
425 // to handle the new incoming edges it is about to have.
427 for (BasicBlock::iterator BBI = DestBB->begin();
428 (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
429 // Remove the incoming value for BB, and remember it.
430 Value *InVal = PN->removeIncomingValue(BB, false);
432 // Two options: either the InVal is a phi node defined in BB or it is some
433 // value that dominates BB.
434 PHINode *InValPhi = dyn_cast<PHINode>(InVal);
435 if (InValPhi && InValPhi->getParent() == BB) {
436 // Add all of the input values of the input PHI as inputs of this phi.
437 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
438 PN->addIncoming(InValPhi->getIncomingValue(i),
439 InValPhi->getIncomingBlock(i));
441 // Otherwise, add one instance of the dominating value for each edge that
442 // we will be adding.
443 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
444 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
445 PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
447 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
448 PN->addIncoming(InVal, *PI);
453 // The PHIs are now updated, change everything that refers to BB to use
454 // DestBB and remove BB.
455 BB->replaceAllUsesWith(DestBB);
456 if (DT && !ModifiedDT) {
457 BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock();
458 BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock();
459 BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom);
460 DT->changeImmediateDominator(DestBB, NewIDom);
463 BB->eraseFromParent();
466 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
469 /// SinkCast - Sink the specified cast instruction into its user blocks
470 static bool SinkCast(CastInst *CI) {
471 BasicBlock *DefBB = CI->getParent();
473 /// InsertedCasts - Only insert a cast in each block once.
474 DenseMap<BasicBlock*, CastInst*> InsertedCasts;
476 bool MadeChange = false;
477 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
479 Use &TheUse = UI.getUse();
480 Instruction *User = cast<Instruction>(*UI);
482 // Figure out which BB this cast is used in. For PHI's this is the
483 // appropriate predecessor block.
484 BasicBlock *UserBB = User->getParent();
485 if (PHINode *PN = dyn_cast<PHINode>(User)) {
486 UserBB = PN->getIncomingBlock(UI);
489 // Preincrement use iterator so we don't invalidate it.
492 // If this user is in the same block as the cast, don't change the cast.
493 if (UserBB == DefBB) continue;
495 // If we have already inserted a cast into this block, use it.
496 CastInst *&InsertedCast = InsertedCasts[UserBB];
499 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
501 CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
506 // Replace a use of the cast with a use of the new cast.
507 TheUse = InsertedCast;
511 // If we removed all uses, nuke the cast.
512 if (CI->use_empty()) {
513 CI->eraseFromParent();
520 /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
521 /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC),
522 /// sink it into user blocks to reduce the number of virtual
523 /// registers that must be created and coalesced.
525 /// Return true if any changes are made.
527 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
528 // If this is a noop copy,
529 EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
530 EVT DstVT = TLI.getValueType(CI->getType());
532 // This is an fp<->int conversion?
533 if (SrcVT.isInteger() != DstVT.isInteger())
536 // If this is an extension, it will be a zero or sign extension, which
538 if (SrcVT.bitsLT(DstVT)) return false;
540 // If these values will be promoted, find out what they will be promoted
541 // to. This helps us consider truncates on PPC as noop copies when they
543 if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
544 TargetLowering::TypePromoteInteger)
545 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
546 if (TLI.getTypeAction(CI->getContext(), DstVT) ==
547 TargetLowering::TypePromoteInteger)
548 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
550 // If, after promotion, these are the same types, this is a noop copy.
557 /// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
558 /// the number of virtual registers that must be created and coalesced. This is
559 /// a clear win except on targets with multiple condition code registers
560 /// (PowerPC), where it might lose; some adjustment may be wanted there.
562 /// Return true if any changes are made.
563 static bool OptimizeCmpExpression(CmpInst *CI) {
564 BasicBlock *DefBB = CI->getParent();
566 /// InsertedCmp - Only insert a cmp in each block once.
567 DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
569 bool MadeChange = false;
570 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
572 Use &TheUse = UI.getUse();
573 Instruction *User = cast<Instruction>(*UI);
575 // Preincrement use iterator so we don't invalidate it.
578 // Don't bother for PHI nodes.
579 if (isa<PHINode>(User))
582 // Figure out which BB this cmp is used in.
583 BasicBlock *UserBB = User->getParent();
585 // If this user is in the same block as the cmp, don't change the cmp.
586 if (UserBB == DefBB) continue;
588 // If we have already inserted a cmp into this block, use it.
589 CmpInst *&InsertedCmp = InsertedCmps[UserBB];
592 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
594 CmpInst::Create(CI->getOpcode(),
595 CI->getPredicate(), CI->getOperand(0),
596 CI->getOperand(1), "", InsertPt);
600 // Replace a use of the cmp with a use of the new cmp.
601 TheUse = InsertedCmp;
605 // If we removed all uses, nuke the cmp.
607 CI->eraseFromParent();
613 class CodeGenPrepareFortifiedLibCalls : public SimplifyFortifiedLibCalls {
615 void replaceCall(Value *With) override {
616 CI->replaceAllUsesWith(With);
617 CI->eraseFromParent();
619 bool isFoldable(unsigned SizeCIOp, unsigned, bool) const override {
620 if (ConstantInt *SizeCI =
621 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp)))
622 return SizeCI->isAllOnesValue();
626 } // end anonymous namespace
628 bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
629 BasicBlock *BB = CI->getParent();
631 // Lower inline assembly if we can.
632 // If we found an inline asm expession, and if the target knows how to
633 // lower it to normal LLVM code, do so now.
634 if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
635 if (TLI->ExpandInlineAsm(CI)) {
636 // Avoid invalidating the iterator.
637 CurInstIterator = BB->begin();
638 // Avoid processing instructions out of order, which could cause
639 // reuse before a value is defined.
643 // Sink address computing for memory operands into the block.
644 if (OptimizeInlineAsmInst(CI))
648 // Lower all uses of llvm.objectsize.*
649 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
650 if (II && II->getIntrinsicID() == Intrinsic::objectsize) {
651 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
652 Type *ReturnTy = CI->getType();
653 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
655 // Substituting this can cause recursive simplifications, which can
656 // invalidate our iterator. Use a WeakVH to hold onto it in case this
658 WeakVH IterHandle(CurInstIterator);
660 replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
661 TLInfo, ModifiedDT ? 0 : DT);
663 // If the iterator instruction was recursively deleted, start over at the
664 // start of the block.
665 if (IterHandle != CurInstIterator) {
666 CurInstIterator = BB->begin();
673 SmallVector<Value*, 2> PtrOps;
675 if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy))
676 while (!PtrOps.empty())
677 if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy))
681 // From here on out we're working with named functions.
682 if (CI->getCalledFunction() == 0) return false;
684 // We'll need DataLayout from here on out.
685 const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
686 if (!TD) return false;
688 // Lower all default uses of _chk calls. This is very similar
689 // to what InstCombineCalls does, but here we are only lowering calls
690 // that have the default "don't know" as the objectsize. Anything else
691 // should be left alone.
692 CodeGenPrepareFortifiedLibCalls Simplifier;
693 return Simplifier.fold(CI, TD, TLInfo);
696 /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return
697 /// instructions to the predecessor to enable tail call optimizations. The
698 /// case it is currently looking for is:
701 /// %tmp0 = tail call i32 @f0()
704 /// %tmp1 = tail call i32 @f1()
707 /// %tmp2 = tail call i32 @f2()
710 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
718 /// %tmp0 = tail call i32 @f0()
721 /// %tmp1 = tail call i32 @f1()
724 /// %tmp2 = tail call i32 @f2()
727 bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) {
731 ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator());
736 BitCastInst *BCI = 0;
737 Value *V = RI->getReturnValue();
739 BCI = dyn_cast<BitCastInst>(V);
741 V = BCI->getOperand(0);
743 PN = dyn_cast<PHINode>(V);
748 if (PN && PN->getParent() != BB)
751 // It's not safe to eliminate the sign / zero extension of the return value.
752 // See llvm::isInTailCallPosition().
753 const Function *F = BB->getParent();
754 AttributeSet CallerAttrs = F->getAttributes();
755 if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
756 CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
759 // Make sure there are no instructions between the PHI and return, or that the
760 // return is the first instruction in the block.
762 BasicBlock::iterator BI = BB->begin();
763 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
765 // Also skip over the bitcast.
770 BasicBlock::iterator BI = BB->begin();
771 while (isa<DbgInfoIntrinsic>(BI)) ++BI;
776 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
778 SmallVector<CallInst*, 4> TailCalls;
780 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
781 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
782 // Make sure the phi value is indeed produced by the tail call.
783 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
784 TLI->mayBeEmittedAsTailCall(CI))
785 TailCalls.push_back(CI);
788 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
789 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
790 if (!VisitedBBs.insert(*PI))
793 BasicBlock::InstListType &InstList = (*PI)->getInstList();
794 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
795 BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
796 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
800 CallInst *CI = dyn_cast<CallInst>(&*RI);
801 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI))
802 TailCalls.push_back(CI);
806 bool Changed = false;
807 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
808 CallInst *CI = TailCalls[i];
811 // Conservatively require the attributes of the call to match those of the
812 // return. Ignore noalias because it doesn't affect the call sequence.
813 AttributeSet CalleeAttrs = CS.getAttributes();
814 if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
815 removeAttribute(Attribute::NoAlias) !=
816 AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex).
817 removeAttribute(Attribute::NoAlias))
820 // Make sure the call instruction is followed by an unconditional branch to
822 BasicBlock *CallBB = CI->getParent();
823 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
824 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
827 // Duplicate the return into CallBB.
828 (void)FoldReturnIntoUncondBranch(RI, BB, CallBB);
829 ModifiedDT = Changed = true;
833 // If we eliminated all predecessors of the block, delete the block now.
834 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
835 BB->eraseFromParent();
840 //===----------------------------------------------------------------------===//
841 // Memory Optimization
842 //===----------------------------------------------------------------------===//
846 /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode
847 /// which holds actual Value*'s for register values.
848 struct ExtAddrMode : public TargetLowering::AddrMode {
851 ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
852 void print(raw_ostream &OS) const;
855 bool operator==(const ExtAddrMode& O) const {
856 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
857 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
858 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
863 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
869 void ExtAddrMode::print(raw_ostream &OS) const {
870 bool NeedPlus = false;
873 OS << (NeedPlus ? " + " : "")
875 BaseGV->printAsOperand(OS, /*PrintType=*/false);
880 OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
883 OS << (NeedPlus ? " + " : "")
885 BaseReg->printAsOperand(OS, /*PrintType=*/false);
889 OS << (NeedPlus ? " + " : "")
891 ScaledReg->printAsOperand(OS, /*PrintType=*/false);
897 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
898 void ExtAddrMode::dump() const {
904 /// \brief This class provides transaction based operation on the IR.
905 /// Every change made through this class is recorded in the internal state and
906 /// can be undone (rollback) until commit is called.
907 class TypePromotionTransaction {
909 /// \brief This represents the common interface of the individual transaction.
910 /// Each class implements the logic for doing one specific modification on
911 /// the IR via the TypePromotionTransaction.
912 class TypePromotionAction {
914 /// The Instruction modified.
918 /// \brief Constructor of the action.
919 /// The constructor performs the related action on the IR.
920 TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
922 virtual ~TypePromotionAction() {}
924 /// \brief Undo the modification done by this action.
925 /// When this method is called, the IR must be in the same state as it was
926 /// before this action was applied.
927 /// \pre Undoing the action works if and only if the IR is in the exact same
928 /// state as it was directly after this action was applied.
929 virtual void undo() = 0;
931 /// \brief Advocate every change made by this action.
932 /// When the results on the IR of the action are to be kept, it is important
933 /// to call this function, otherwise hidden information may be kept forever.
934 virtual void commit() {
935 // Nothing to be done, this action is not doing anything.
939 /// \brief Utility to remember the position of an instruction.
940 class InsertionHandler {
941 /// Position of an instruction.
942 /// Either an instruction:
943 /// - Is the first in a basic block: BB is used.
944 /// - Has a previous instructon: PrevInst is used.
946 Instruction *PrevInst;
949 /// Remember whether or not the instruction had a previous instruction.
950 bool HasPrevInstruction;
953 /// \brief Record the position of \p Inst.
954 InsertionHandler(Instruction *Inst) {
955 BasicBlock::iterator It = Inst;
956 HasPrevInstruction = (It != (Inst->getParent()->begin()));
957 if (HasPrevInstruction)
958 Point.PrevInst = --It;
960 Point.BB = Inst->getParent();
963 /// \brief Insert \p Inst at the recorded position.
964 void insert(Instruction *Inst) {
965 if (HasPrevInstruction) {
966 if (Inst->getParent())
967 Inst->removeFromParent();
968 Inst->insertAfter(Point.PrevInst);
970 Instruction *Position = Point.BB->getFirstInsertionPt();
971 if (Inst->getParent())
972 Inst->moveBefore(Position);
974 Inst->insertBefore(Position);
979 /// \brief Move an instruction before another.
980 class InstructionMoveBefore : public TypePromotionAction {
981 /// Original position of the instruction.
982 InsertionHandler Position;
985 /// \brief Move \p Inst before \p Before.
986 InstructionMoveBefore(Instruction *Inst, Instruction *Before)
987 : TypePromotionAction(Inst), Position(Inst) {
988 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n");
989 Inst->moveBefore(Before);
992 /// \brief Move the instruction back to its original position.
993 void undo() override {
994 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
995 Position.insert(Inst);
999 /// \brief Set the operand of an instruction with a new value.
1000 class OperandSetter : public TypePromotionAction {
1001 /// Original operand of the instruction.
1003 /// Index of the modified instruction.
1007 /// \brief Set \p Idx operand of \p Inst with \p NewVal.
1008 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
1009 : TypePromotionAction(Inst), Idx(Idx) {
1010 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
1011 << "for:" << *Inst << "\n"
1012 << "with:" << *NewVal << "\n");
1013 Origin = Inst->getOperand(Idx);
1014 Inst->setOperand(Idx, NewVal);
1017 /// \brief Restore the original value of the instruction.
1018 void undo() override {
1019 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
1020 << "for: " << *Inst << "\n"
1021 << "with: " << *Origin << "\n");
1022 Inst->setOperand(Idx, Origin);
1026 /// \brief Hide the operands of an instruction.
1027 /// Do as if this instruction was not using any of its operands.
1028 class OperandsHider : public TypePromotionAction {
1029 /// The list of original operands.
1030 SmallVector<Value *, 4> OriginalValues;
1033 /// \brief Remove \p Inst from the uses of the operands of \p Inst.
1034 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
1035 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
1036 unsigned NumOpnds = Inst->getNumOperands();
1037 OriginalValues.reserve(NumOpnds);
1038 for (unsigned It = 0; It < NumOpnds; ++It) {
1039 // Save the current operand.
1040 Value *Val = Inst->getOperand(It);
1041 OriginalValues.push_back(Val);
1043 // We could use OperandSetter here, but that would implied an overhead
1044 // that we are not willing to pay.
1045 Inst->setOperand(It, UndefValue::get(Val->getType()));
1049 /// \brief Restore the original list of uses.
1050 void undo() override {
1051 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
1052 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
1053 Inst->setOperand(It, OriginalValues[It]);
1057 /// \brief Build a truncate instruction.
1058 class TruncBuilder : public TypePromotionAction {
1060 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty
1062 /// trunc Opnd to Ty.
1063 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
1064 IRBuilder<> Builder(Opnd);
1065 Inst = cast<Instruction>(Builder.CreateTrunc(Opnd, Ty, "promoted"));
1066 DEBUG(dbgs() << "Do: TruncBuilder: " << *Inst << "\n");
1069 /// \brief Get the built instruction.
1070 Instruction *getBuiltInstruction() { return Inst; }
1072 /// \brief Remove the built instruction.
1073 void undo() override {
1074 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Inst << "\n");
1075 Inst->eraseFromParent();
1079 /// \brief Build a sign extension instruction.
1080 class SExtBuilder : public TypePromotionAction {
1082 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty
1084 /// sext Opnd to Ty.
1085 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
1086 : TypePromotionAction(Inst) {
1087 IRBuilder<> Builder(InsertPt);
1088 Inst = cast<Instruction>(Builder.CreateSExt(Opnd, Ty, "promoted"));
1089 DEBUG(dbgs() << "Do: SExtBuilder: " << *Inst << "\n");
1092 /// \brief Get the built instruction.
1093 Instruction *getBuiltInstruction() { return Inst; }
1095 /// \brief Remove the built instruction.
1096 void undo() override {
1097 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Inst << "\n");
1098 Inst->eraseFromParent();
1102 /// \brief Mutate an instruction to another type.
1103 class TypeMutator : public TypePromotionAction {
1104 /// Record the original type.
1108 /// \brief Mutate the type of \p Inst into \p NewTy.
1109 TypeMutator(Instruction *Inst, Type *NewTy)
1110 : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
1111 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
1113 Inst->mutateType(NewTy);
1116 /// \brief Mutate the instruction back to its original type.
1117 void undo() override {
1118 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
1120 Inst->mutateType(OrigTy);
1124 /// \brief Replace the uses of an instruction by another instruction.
1125 class UsesReplacer : public TypePromotionAction {
1126 /// Helper structure to keep track of the replaced uses.
1127 struct InstructionAndIdx {
1128 /// The instruction using the instruction.
1130 /// The index where this instruction is used for Inst.
1132 InstructionAndIdx(Instruction *Inst, unsigned Idx)
1133 : Inst(Inst), Idx(Idx) {}
1136 /// Keep track of the original uses (pair Instruction, Index).
1137 SmallVector<InstructionAndIdx, 4> OriginalUses;
1138 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator;
1141 /// \brief Replace all the use of \p Inst by \p New.
1142 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
1143 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
1145 // Record the original uses.
1146 for (Value::use_iterator UseIt = Inst->use_begin(),
1147 EndIt = Inst->use_end();
1148 UseIt != EndIt; ++UseIt) {
1149 Instruction *Use = cast<Instruction>(*UseIt);
1150 OriginalUses.push_back(InstructionAndIdx(Use, UseIt.getOperandNo()));
1152 // Now, we can replace the uses.
1153 Inst->replaceAllUsesWith(New);
1156 /// \brief Reassign the original uses of Inst to Inst.
1157 void undo() override {
1158 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
1159 for (use_iterator UseIt = OriginalUses.begin(),
1160 EndIt = OriginalUses.end();
1161 UseIt != EndIt; ++UseIt) {
1162 UseIt->Inst->setOperand(UseIt->Idx, Inst);
1167 /// \brief Remove an instruction from the IR.
1168 class InstructionRemover : public TypePromotionAction {
1169 /// Original position of the instruction.
1170 InsertionHandler Inserter;
1171 /// Helper structure to hide all the link to the instruction. In other
1172 /// words, this helps to do as if the instruction was removed.
1173 OperandsHider Hider;
1174 /// Keep track of the uses replaced, if any.
1175 UsesReplacer *Replacer;
1178 /// \brief Remove all reference of \p Inst and optinally replace all its
1180 /// \pre If !Inst->use_empty(), then New != NULL
1181 InstructionRemover(Instruction *Inst, Value *New = NULL)
1182 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
1185 Replacer = new UsesReplacer(Inst, New);
1186 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
1187 Inst->removeFromParent();
1190 ~InstructionRemover() { delete Replacer; }
1192 /// \brief Really remove the instruction.
1193 void commit() override { delete Inst; }
1195 /// \brief Resurrect the instruction and reassign it to the proper uses if
1196 /// new value was provided when build this action.
1197 void undo() override {
1198 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
1199 Inserter.insert(Inst);
1207 /// Restoration point.
1208 /// The restoration point is a pointer to an action instead of an iterator
1209 /// because the iterator may be invalidated but not the pointer.
1210 typedef const TypePromotionAction *ConstRestorationPt;
1211 /// Advocate every changes made in that transaction.
1213 /// Undo all the changes made after the given point.
1214 void rollback(ConstRestorationPt Point);
1215 /// Get the current restoration point.
1216 ConstRestorationPt getRestorationPoint() const;
1218 /// \name API for IR modification with state keeping to support rollback.
1220 /// Same as Instruction::setOperand.
1221 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
1222 /// Same as Instruction::eraseFromParent.
1223 void eraseInstruction(Instruction *Inst, Value *NewVal = NULL);
1224 /// Same as Value::replaceAllUsesWith.
1225 void replaceAllUsesWith(Instruction *Inst, Value *New);
1226 /// Same as Value::mutateType.
1227 void mutateType(Instruction *Inst, Type *NewTy);
1228 /// Same as IRBuilder::createTrunc.
1229 Instruction *createTrunc(Instruction *Opnd, Type *Ty);
1230 /// Same as IRBuilder::createSExt.
1231 Instruction *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
1232 /// Same as Instruction::moveBefore.
1233 void moveBefore(Instruction *Inst, Instruction *Before);
1236 ~TypePromotionTransaction();
1239 /// The ordered list of actions made so far.
1240 SmallVector<TypePromotionAction *, 16> Actions;
1241 typedef SmallVectorImpl<TypePromotionAction *>::iterator CommitPt;
1244 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
1247 new TypePromotionTransaction::OperandSetter(Inst, Idx, NewVal));
1250 void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
1253 new TypePromotionTransaction::InstructionRemover(Inst, NewVal));
1256 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
1258 Actions.push_back(new TypePromotionTransaction::UsesReplacer(Inst, New));
1261 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
1262 Actions.push_back(new TypePromotionTransaction::TypeMutator(Inst, NewTy));
1265 Instruction *TypePromotionTransaction::createTrunc(Instruction *Opnd,
1267 TruncBuilder *TB = new TruncBuilder(Opnd, Ty);
1268 Actions.push_back(TB);
1269 return TB->getBuiltInstruction();
1272 Instruction *TypePromotionTransaction::createSExt(Instruction *Inst,
1273 Value *Opnd, Type *Ty) {
1274 SExtBuilder *SB = new SExtBuilder(Inst, Opnd, Ty);
1275 Actions.push_back(SB);
1276 return SB->getBuiltInstruction();
1279 void TypePromotionTransaction::moveBefore(Instruction *Inst,
1280 Instruction *Before) {
1282 new TypePromotionTransaction::InstructionMoveBefore(Inst, Before));
1285 TypePromotionTransaction::ConstRestorationPt
1286 TypePromotionTransaction::getRestorationPoint() const {
1287 return Actions.rbegin() != Actions.rend() ? *Actions.rbegin() : NULL;
1290 void TypePromotionTransaction::commit() {
1291 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
1299 void TypePromotionTransaction::rollback(
1300 TypePromotionTransaction::ConstRestorationPt Point) {
1301 while (!Actions.empty() && Point != (*Actions.rbegin())) {
1302 TypePromotionAction *Curr = Actions.pop_back_val();
1308 TypePromotionTransaction::~TypePromotionTransaction() {
1309 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; ++It)
1314 /// \brief A helper class for matching addressing modes.
1316 /// This encapsulates the logic for matching the target-legal addressing modes.
1317 class AddressingModeMatcher {
1318 SmallVectorImpl<Instruction*> &AddrModeInsts;
1319 const TargetLowering &TLI;
1321 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
1322 /// the memory instruction that we're computing this address for.
1324 Instruction *MemoryInst;
1326 /// AddrMode - This is the addressing mode that we're building up. This is
1327 /// part of the return value of this addressing mode matching stuff.
1328 ExtAddrMode &AddrMode;
1330 /// The truncate instruction inserted by other CodeGenPrepare optimizations.
1331 const SetOfInstrs &InsertedTruncs;
1332 /// A map from the instructions to their type before promotion.
1333 InstrToOrigTy &PromotedInsts;
1334 /// The ongoing transaction where every action should be registered.
1335 TypePromotionTransaction &TPT;
1337 /// IgnoreProfitability - This is set to true when we should not do
1338 /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode
1339 /// always returns true.
1340 bool IgnoreProfitability;
1342 AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI,
1343 const TargetLowering &T, Type *AT,
1344 Instruction *MI, ExtAddrMode &AM,
1345 const SetOfInstrs &InsertedTruncs,
1346 InstrToOrigTy &PromotedInsts,
1347 TypePromotionTransaction &TPT)
1348 : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM),
1349 InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) {
1350 IgnoreProfitability = false;
1354 /// Match - Find the maximal addressing mode that a load/store of V can fold,
1355 /// give an access type of AccessTy. This returns a list of involved
1356 /// instructions in AddrModeInsts.
1357 /// \p InsertedTruncs The truncate instruction inserted by other
1360 /// \p PromotedInsts maps the instructions to their type before promotion.
1361 /// \p The ongoing transaction where every action should be registered.
1362 static ExtAddrMode Match(Value *V, Type *AccessTy,
1363 Instruction *MemoryInst,
1364 SmallVectorImpl<Instruction*> &AddrModeInsts,
1365 const TargetLowering &TLI,
1366 const SetOfInstrs &InsertedTruncs,
1367 InstrToOrigTy &PromotedInsts,
1368 TypePromotionTransaction &TPT) {
1371 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, AccessTy,
1372 MemoryInst, Result, InsertedTruncs,
1373 PromotedInsts, TPT).MatchAddr(V, 0);
1374 (void)Success; assert(Success && "Couldn't select *anything*?");
1378 bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
1379 bool MatchAddr(Value *V, unsigned Depth);
1380 bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth,
1381 bool *MovedAway = NULL);
1382 bool IsProfitableToFoldIntoAddressingMode(Instruction *I,
1383 ExtAddrMode &AMBefore,
1384 ExtAddrMode &AMAfter);
1385 bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
1386 bool IsPromotionProfitable(unsigned MatchedSize, unsigned SizeWithPromotion,
1387 Value *PromotedOperand) const;
1390 /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
1391 /// Return true and update AddrMode if this addr mode is legal for the target,
1393 bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
1395 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
1396 // mode. Just process that directly.
1398 return MatchAddr(ScaleReg, Depth);
1400 // If the scale is 0, it takes nothing to add this.
1404 // If we already have a scale of this value, we can add to it, otherwise, we
1405 // need an available scale field.
1406 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
1409 ExtAddrMode TestAddrMode = AddrMode;
1411 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
1412 // [A+B + A*7] -> [B+A*8].
1413 TestAddrMode.Scale += Scale;
1414 TestAddrMode.ScaledReg = ScaleReg;
1416 // If the new address isn't legal, bail out.
1417 if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
1420 // It was legal, so commit it.
1421 AddrMode = TestAddrMode;
1423 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
1424 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
1425 // X*Scale + C*Scale to addr mode.
1426 ConstantInt *CI = 0; Value *AddLHS = 0;
1427 if (isa<Instruction>(ScaleReg) && // not a constant expr.
1428 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
1429 TestAddrMode.ScaledReg = AddLHS;
1430 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
1432 // If this addressing mode is legal, commit it and remember that we folded
1433 // this instruction.
1434 if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
1435 AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
1436 AddrMode = TestAddrMode;
1441 // Otherwise, not (x+c)*scale, just return what we have.
1445 /// MightBeFoldableInst - This is a little filter, which returns true if an
1446 /// addressing computation involving I might be folded into a load/store
1447 /// accessing it. This doesn't need to be perfect, but needs to accept at least
1448 /// the set of instructions that MatchOperationAddr can.
1449 static bool MightBeFoldableInst(Instruction *I) {
1450 switch (I->getOpcode()) {
1451 case Instruction::BitCast:
1452 // Don't touch identity bitcasts.
1453 if (I->getType() == I->getOperand(0)->getType())
1455 return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
1456 case Instruction::PtrToInt:
1457 // PtrToInt is always a noop, as we know that the int type is pointer sized.
1459 case Instruction::IntToPtr:
1460 // We know the input is intptr_t, so this is foldable.
1462 case Instruction::Add:
1464 case Instruction::Mul:
1465 case Instruction::Shl:
1466 // Can only handle X*C and X << C.
1467 return isa<ConstantInt>(I->getOperand(1));
1468 case Instruction::GetElementPtr:
1475 /// \brief Hepler class to perform type promotion.
1476 class TypePromotionHelper {
1477 /// \brief Utility function to check whether or not a sign extension of
1478 /// \p Inst with \p ConsideredSExtType can be moved through \p Inst by either
1479 /// using the operands of \p Inst or promoting \p Inst.
1480 /// In other words, check if:
1481 /// sext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredSExtType.
1482 /// #1 Promotion applies:
1483 /// ConsideredSExtType Inst (sext opnd1 to ConsideredSExtType, ...).
1484 /// #2 Operand reuses:
1485 /// sext opnd1 to ConsideredSExtType.
1486 /// \p PromotedInsts maps the instructions to their type before promotion.
1487 static bool canGetThrough(const Instruction *Inst, Type *ConsideredSExtType,
1488 const InstrToOrigTy &PromotedInsts);
1490 /// \brief Utility function to determine if \p OpIdx should be promoted when
1491 /// promoting \p Inst.
1492 static bool shouldSExtOperand(const Instruction *Inst, int OpIdx) {
1493 if (isa<SelectInst>(Inst) && OpIdx == 0)
1498 /// \brief Utility function to promote the operand of \p SExt when this
1499 /// operand is a promotable trunc or sext.
1500 /// \p PromotedInsts maps the instructions to their type before promotion.
1501 /// \p CreatedInsts[out] contains how many non-free instructions have been
1502 /// created to promote the operand of SExt.
1503 /// Should never be called directly.
1504 /// \return The promoted value which is used instead of SExt.
1505 static Value *promoteOperandForTruncAndSExt(Instruction *SExt,
1506 TypePromotionTransaction &TPT,
1507 InstrToOrigTy &PromotedInsts,
1508 unsigned &CreatedInsts);
1510 /// \brief Utility function to promote the operand of \p SExt when this
1511 /// operand is promotable and is not a supported trunc or sext.
1512 /// \p PromotedInsts maps the instructions to their type before promotion.
1513 /// \p CreatedInsts[out] contains how many non-free instructions have been
1514 /// created to promote the operand of SExt.
1515 /// Should never be called directly.
1516 /// \return The promoted value which is used instead of SExt.
1517 static Value *promoteOperandForOther(Instruction *SExt,
1518 TypePromotionTransaction &TPT,
1519 InstrToOrigTy &PromotedInsts,
1520 unsigned &CreatedInsts);
1523 /// Type for the utility function that promotes the operand of SExt.
1524 typedef Value *(*Action)(Instruction *SExt, TypePromotionTransaction &TPT,
1525 InstrToOrigTy &PromotedInsts,
1526 unsigned &CreatedInsts);
1527 /// \brief Given a sign extend instruction \p SExt, return the approriate
1528 /// action to promote the operand of \p SExt instead of using SExt.
1529 /// \return NULL if no promotable action is possible with the current
1531 /// \p InsertedTruncs keeps track of all the truncate instructions inserted by
1532 /// the others CodeGenPrepare optimizations. This information is important
1533 /// because we do not want to promote these instructions as CodeGenPrepare
1534 /// will reinsert them later. Thus creating an infinite loop: create/remove.
1535 /// \p PromotedInsts maps the instructions to their type before promotion.
1536 static Action getAction(Instruction *SExt, const SetOfInstrs &InsertedTruncs,
1537 const TargetLowering &TLI,
1538 const InstrToOrigTy &PromotedInsts);
1541 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
1542 Type *ConsideredSExtType,
1543 const InstrToOrigTy &PromotedInsts) {
1544 // We can always get through sext.
1545 if (isa<SExtInst>(Inst))
1548 // We can get through binary operator, if it is legal. In other words, the
1549 // binary operator must have a nuw or nsw flag.
1550 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
1551 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
1552 (BinOp->hasNoUnsignedWrap() || BinOp->hasNoSignedWrap()))
1555 // Check if we can do the following simplification.
1556 // sext(trunc(sext)) --> sext
1557 if (!isa<TruncInst>(Inst))
1560 Value *OpndVal = Inst->getOperand(0);
1561 // Check if we can use this operand in the sext.
1562 // If the type is larger than the result type of the sign extension,
1564 if (OpndVal->getType()->getIntegerBitWidth() >
1565 ConsideredSExtType->getIntegerBitWidth())
1568 // If the operand of the truncate is not an instruction, we will not have
1569 // any information on the dropped bits.
1570 // (Actually we could for constant but it is not worth the extra logic).
1571 Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
1575 // Check if the source of the type is narrow enough.
1576 // I.e., check that trunc just drops sign extended bits.
1577 // #1 get the type of the operand.
1578 const Type *OpndType;
1579 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
1580 if (It != PromotedInsts.end())
1581 OpndType = It->second;
1582 else if (isa<SExtInst>(Opnd))
1583 OpndType = cast<Instruction>(Opnd)->getOperand(0)->getType();
1587 // #2 check that the truncate just drop sign extended bits.
1588 if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth())
1594 TypePromotionHelper::Action TypePromotionHelper::getAction(
1595 Instruction *SExt, const SetOfInstrs &InsertedTruncs,
1596 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
1597 Instruction *SExtOpnd = dyn_cast<Instruction>(SExt->getOperand(0));
1598 Type *SExtTy = SExt->getType();
1599 // If the operand of the sign extension is not an instruction, we cannot
1601 // If it, check we can get through.
1602 if (!SExtOpnd || !canGetThrough(SExtOpnd, SExtTy, PromotedInsts))
1605 // Do not promote if the operand has been added by codegenprepare.
1606 // Otherwise, it means we are undoing an optimization that is likely to be
1607 // redone, thus causing potential infinite loop.
1608 if (isa<TruncInst>(SExtOpnd) && InsertedTruncs.count(SExtOpnd))
1611 // SExt or Trunc instructions.
1612 // Return the related handler.
1613 if (isa<SExtInst>(SExtOpnd) || isa<TruncInst>(SExtOpnd))
1614 return promoteOperandForTruncAndSExt;
1616 // Regular instruction.
1617 // Abort early if we will have to insert non-free instructions.
1618 if (!SExtOpnd->hasOneUse() &&
1619 !TLI.isTruncateFree(SExtTy, SExtOpnd->getType()))
1621 return promoteOperandForOther;
1624 Value *TypePromotionHelper::promoteOperandForTruncAndSExt(
1625 llvm::Instruction *SExt, TypePromotionTransaction &TPT,
1626 InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts) {
1627 // By construction, the operand of SExt is an instruction. Otherwise we cannot
1628 // get through it and this method should not be called.
1629 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
1630 // Replace sext(trunc(opnd)) or sext(sext(opnd))
1632 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
1635 // Remove dead code.
1636 if (SExtOpnd->use_empty())
1637 TPT.eraseInstruction(SExtOpnd);
1639 // Check if the sext is still needed.
1640 if (SExt->getType() != SExt->getOperand(0)->getType())
1643 // At this point we have: sext ty opnd to ty.
1644 // Reassign the uses of SExt to the opnd and remove SExt.
1645 Value *NextVal = SExt->getOperand(0);
1646 TPT.eraseInstruction(SExt, NextVal);
1651 TypePromotionHelper::promoteOperandForOther(Instruction *SExt,
1652 TypePromotionTransaction &TPT,
1653 InstrToOrigTy &PromotedInsts,
1654 unsigned &CreatedInsts) {
1655 // By construction, the operand of SExt is an instruction. Otherwise we cannot
1656 // get through it and this method should not be called.
1657 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
1659 if (!SExtOpnd->hasOneUse()) {
1660 // SExtOpnd will be promoted.
1661 // All its uses, but SExt, will need to use a truncated value of the
1662 // promoted version.
1663 // Create the truncate now.
1664 Instruction *Trunc = TPT.createTrunc(SExt, SExtOpnd->getType());
1665 Trunc->removeFromParent();
1666 // Insert it just after the definition.
1667 Trunc->insertAfter(SExtOpnd);
1669 TPT.replaceAllUsesWith(SExtOpnd, Trunc);
1670 // Restore the operand of SExt (which has been replace by the previous call
1671 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
1672 TPT.setOperand(SExt, 0, SExtOpnd);
1675 // Get through the Instruction:
1676 // 1. Update its type.
1677 // 2. Replace the uses of SExt by Inst.
1678 // 3. Sign extend each operand that needs to be sign extended.
1680 // Remember the original type of the instruction before promotion.
1681 // This is useful to know that the high bits are sign extended bits.
1682 PromotedInsts.insert(
1683 std::pair<Instruction *, Type *>(SExtOpnd, SExtOpnd->getType()));
1685 TPT.mutateType(SExtOpnd, SExt->getType());
1687 TPT.replaceAllUsesWith(SExt, SExtOpnd);
1689 Instruction *SExtForOpnd = SExt;
1691 DEBUG(dbgs() << "Propagate SExt to operands\n");
1692 for (int OpIdx = 0, EndOpIdx = SExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
1694 DEBUG(dbgs() << "Operand:\n" << *(SExtOpnd->getOperand(OpIdx)) << '\n');
1695 if (SExtOpnd->getOperand(OpIdx)->getType() == SExt->getType() ||
1696 !shouldSExtOperand(SExtOpnd, OpIdx)) {
1697 DEBUG(dbgs() << "No need to propagate\n");
1700 // Check if we can statically sign extend the operand.
1701 Value *Opnd = SExtOpnd->getOperand(OpIdx);
1702 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
1703 DEBUG(dbgs() << "Statically sign extend\n");
1706 ConstantInt::getSigned(SExt->getType(), Cst->getSExtValue()));
1709 // UndefValue are typed, so we have to statically sign extend them.
1710 if (isa<UndefValue>(Opnd)) {
1711 DEBUG(dbgs() << "Statically sign extend\n");
1712 TPT.setOperand(SExtOpnd, OpIdx, UndefValue::get(SExt->getType()));
1716 // Otherwise we have to explicity sign extend the operand.
1717 // Check if SExt was reused to sign extend an operand.
1719 // If yes, create a new one.
1720 DEBUG(dbgs() << "More operands to sext\n");
1721 SExtForOpnd = TPT.createSExt(SExt, Opnd, SExt->getType());
1725 TPT.setOperand(SExtForOpnd, 0, Opnd);
1727 // Move the sign extension before the insertion point.
1728 TPT.moveBefore(SExtForOpnd, SExtOpnd);
1729 TPT.setOperand(SExtOpnd, OpIdx, SExtForOpnd);
1730 // If more sext are required, new instructions will have to be created.
1733 if (SExtForOpnd == SExt) {
1734 DEBUG(dbgs() << "Sign extension is useless now\n");
1735 TPT.eraseInstruction(SExt);
1740 /// IsPromotionProfitable - Check whether or not promoting an instruction
1741 /// to a wider type was profitable.
1742 /// \p MatchedSize gives the number of instructions that have been matched
1743 /// in the addressing mode after the promotion was applied.
1744 /// \p SizeWithPromotion gives the number of created instructions for
1745 /// the promotion plus the number of instructions that have been
1746 /// matched in the addressing mode before the promotion.
1747 /// \p PromotedOperand is the value that has been promoted.
1748 /// \return True if the promotion is profitable, false otherwise.
1750 AddressingModeMatcher::IsPromotionProfitable(unsigned MatchedSize,
1751 unsigned SizeWithPromotion,
1752 Value *PromotedOperand) const {
1753 // We folded less instructions than what we created to promote the operand.
1754 // This is not profitable.
1755 if (MatchedSize < SizeWithPromotion)
1757 if (MatchedSize > SizeWithPromotion)
1759 // The promotion is neutral but it may help folding the sign extension in
1760 // loads for instance.
1761 // Check that we did not create an illegal instruction.
1762 Instruction *PromotedInst = dyn_cast<Instruction>(PromotedOperand);
1765 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
1766 // If the ISDOpcode is undefined, it was undefined before the promotion.
1769 // Otherwise, check if the promoted instruction is legal or not.
1770 return TLI.isOperationLegalOrCustom(ISDOpcode,
1771 EVT::getEVT(PromotedInst->getType()));
1774 /// MatchOperationAddr - Given an instruction or constant expr, see if we can
1775 /// fold the operation into the addressing mode. If so, update the addressing
1776 /// mode and return true, otherwise return false without modifying AddrMode.
1777 /// If \p MovedAway is not NULL, it contains the information of whether or
1778 /// not AddrInst has to be folded into the addressing mode on success.
1779 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
1780 /// because it has been moved away.
1781 /// Thus AddrInst must not be added in the matched instructions.
1782 /// This state can happen when AddrInst is a sext, since it may be moved away.
1783 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
1784 /// not be referenced anymore.
1785 bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
1788 // Avoid exponential behavior on extremely deep expression trees.
1789 if (Depth >= 5) return false;
1791 // By default, all matched instructions stay in place.
1796 case Instruction::PtrToInt:
1797 // PtrToInt is always a noop, as we know that the int type is pointer sized.
1798 return MatchAddr(AddrInst->getOperand(0), Depth);
1799 case Instruction::IntToPtr:
1800 // This inttoptr is a no-op if the integer type is pointer sized.
1801 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
1802 TLI.getPointerTy(AddrInst->getType()->getPointerAddressSpace()))
1803 return MatchAddr(AddrInst->getOperand(0), Depth);
1805 case Instruction::BitCast:
1806 // BitCast is always a noop, and we can handle it as long as it is
1807 // int->int or pointer->pointer (we don't want int<->fp or something).
1808 if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
1809 AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
1810 // Don't touch identity bitcasts. These were probably put here by LSR,
1811 // and we don't want to mess around with them. Assume it knows what it
1813 AddrInst->getOperand(0)->getType() != AddrInst->getType())
1814 return MatchAddr(AddrInst->getOperand(0), Depth);
1816 case Instruction::Add: {
1817 // Check to see if we can merge in the RHS then the LHS. If so, we win.
1818 ExtAddrMode BackupAddrMode = AddrMode;
1819 unsigned OldSize = AddrModeInsts.size();
1820 // Start a transaction at this point.
1821 // The LHS may match but not the RHS.
1822 // Therefore, we need a higher level restoration point to undo partially
1823 // matched operation.
1824 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
1825 TPT.getRestorationPoint();
1827 if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
1828 MatchAddr(AddrInst->getOperand(0), Depth+1))
1831 // Restore the old addr mode info.
1832 AddrMode = BackupAddrMode;
1833 AddrModeInsts.resize(OldSize);
1834 TPT.rollback(LastKnownGood);
1836 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
1837 if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
1838 MatchAddr(AddrInst->getOperand(1), Depth+1))
1841 // Otherwise we definitely can't merge the ADD in.
1842 AddrMode = BackupAddrMode;
1843 AddrModeInsts.resize(OldSize);
1844 TPT.rollback(LastKnownGood);
1847 //case Instruction::Or:
1848 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
1850 case Instruction::Mul:
1851 case Instruction::Shl: {
1852 // Can only handle X*C and X << C.
1853 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
1854 if (!RHS) return false;
1855 int64_t Scale = RHS->getSExtValue();
1856 if (Opcode == Instruction::Shl)
1857 Scale = 1LL << Scale;
1859 return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
1861 case Instruction::GetElementPtr: {
1862 // Scan the GEP. We check it if it contains constant offsets and at most
1863 // one variable offset.
1864 int VariableOperand = -1;
1865 unsigned VariableScale = 0;
1867 int64_t ConstantOffset = 0;
1868 const DataLayout *TD = TLI.getDataLayout();
1869 gep_type_iterator GTI = gep_type_begin(AddrInst);
1870 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
1871 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1872 const StructLayout *SL = TD->getStructLayout(STy);
1874 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
1875 ConstantOffset += SL->getElementOffset(Idx);
1877 uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
1878 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
1879 ConstantOffset += CI->getSExtValue()*TypeSize;
1880 } else if (TypeSize) { // Scales of zero don't do anything.
1881 // We only allow one variable index at the moment.
1882 if (VariableOperand != -1)
1885 // Remember the variable index.
1886 VariableOperand = i;
1887 VariableScale = TypeSize;
1892 // A common case is for the GEP to only do a constant offset. In this case,
1893 // just add it to the disp field and check validity.
1894 if (VariableOperand == -1) {
1895 AddrMode.BaseOffs += ConstantOffset;
1896 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
1897 // Check to see if we can fold the base pointer in too.
1898 if (MatchAddr(AddrInst->getOperand(0), Depth+1))
1901 AddrMode.BaseOffs -= ConstantOffset;
1905 // Save the valid addressing mode in case we can't match.
1906 ExtAddrMode BackupAddrMode = AddrMode;
1907 unsigned OldSize = AddrModeInsts.size();
1909 // See if the scale and offset amount is valid for this target.
1910 AddrMode.BaseOffs += ConstantOffset;
1912 // Match the base operand of the GEP.
1913 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
1914 // If it couldn't be matched, just stuff the value in a register.
1915 if (AddrMode.HasBaseReg) {
1916 AddrMode = BackupAddrMode;
1917 AddrModeInsts.resize(OldSize);
1920 AddrMode.HasBaseReg = true;
1921 AddrMode.BaseReg = AddrInst->getOperand(0);
1924 // Match the remaining variable portion of the GEP.
1925 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
1927 // If it couldn't be matched, try stuffing the base into a register
1928 // instead of matching it, and retrying the match of the scale.
1929 AddrMode = BackupAddrMode;
1930 AddrModeInsts.resize(OldSize);
1931 if (AddrMode.HasBaseReg)
1933 AddrMode.HasBaseReg = true;
1934 AddrMode.BaseReg = AddrInst->getOperand(0);
1935 AddrMode.BaseOffs += ConstantOffset;
1936 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
1937 VariableScale, Depth)) {
1938 // If even that didn't work, bail.
1939 AddrMode = BackupAddrMode;
1940 AddrModeInsts.resize(OldSize);
1947 case Instruction::SExt: {
1948 // Try to move this sext out of the way of the addressing mode.
1949 Instruction *SExt = cast<Instruction>(AddrInst);
1950 // Ask for a method for doing so.
1951 TypePromotionHelper::Action TPH = TypePromotionHelper::getAction(
1952 SExt, InsertedTruncs, TLI, PromotedInsts);
1956 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
1957 TPT.getRestorationPoint();
1958 unsigned CreatedInsts = 0;
1959 Value *PromotedOperand = TPH(SExt, TPT, PromotedInsts, CreatedInsts);
1960 // SExt has been moved away.
1961 // Thus either it will be rematched later in the recursive calls or it is
1962 // gone. Anyway, we must not fold it into the addressing mode at this point.
1966 // addr = gep base, idx
1968 // promotedOpnd = sext opnd <- no match here
1969 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
1970 // addr = gep base, op <- match
1974 assert(PromotedOperand &&
1975 "TypePromotionHelper should have filtered out those cases");
1977 ExtAddrMode BackupAddrMode = AddrMode;
1978 unsigned OldSize = AddrModeInsts.size();
1980 if (!MatchAddr(PromotedOperand, Depth) ||
1981 !IsPromotionProfitable(AddrModeInsts.size(), OldSize + CreatedInsts,
1983 AddrMode = BackupAddrMode;
1984 AddrModeInsts.resize(OldSize);
1985 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
1986 TPT.rollback(LastKnownGood);
1995 /// MatchAddr - If we can, try to add the value of 'Addr' into the current
1996 /// addressing mode. If Addr can't be added to AddrMode this returns false and
1997 /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type
1998 /// or intptr_t for the target.
2000 bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
2001 // Start a transaction at this point that we will rollback if the matching
2003 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
2004 TPT.getRestorationPoint();
2005 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
2006 // Fold in immediates if legal for the target.
2007 AddrMode.BaseOffs += CI->getSExtValue();
2008 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
2010 AddrMode.BaseOffs -= CI->getSExtValue();
2011 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
2012 // If this is a global variable, try to fold it into the addressing mode.
2013 if (AddrMode.BaseGV == 0) {
2014 AddrMode.BaseGV = GV;
2015 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
2017 AddrMode.BaseGV = 0;
2019 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
2020 ExtAddrMode BackupAddrMode = AddrMode;
2021 unsigned OldSize = AddrModeInsts.size();
2023 // Check to see if it is possible to fold this operation.
2024 bool MovedAway = false;
2025 if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
2026 // This instruction may have been move away. If so, there is nothing
2030 // Okay, it's possible to fold this. Check to see if it is actually
2031 // *profitable* to do so. We use a simple cost model to avoid increasing
2032 // register pressure too much.
2033 if (I->hasOneUse() ||
2034 IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
2035 AddrModeInsts.push_back(I);
2039 // It isn't profitable to do this, roll back.
2040 //cerr << "NOT FOLDING: " << *I;
2041 AddrMode = BackupAddrMode;
2042 AddrModeInsts.resize(OldSize);
2043 TPT.rollback(LastKnownGood);
2045 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
2046 if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
2048 TPT.rollback(LastKnownGood);
2049 } else if (isa<ConstantPointerNull>(Addr)) {
2050 // Null pointer gets folded without affecting the addressing mode.
2054 // Worse case, the target should support [reg] addressing modes. :)
2055 if (!AddrMode.HasBaseReg) {
2056 AddrMode.HasBaseReg = true;
2057 AddrMode.BaseReg = Addr;
2058 // Still check for legality in case the target supports [imm] but not [i+r].
2059 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
2061 AddrMode.HasBaseReg = false;
2062 AddrMode.BaseReg = 0;
2065 // If the base register is already taken, see if we can do [r+r].
2066 if (AddrMode.Scale == 0) {
2068 AddrMode.ScaledReg = Addr;
2069 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
2072 AddrMode.ScaledReg = 0;
2075 TPT.rollback(LastKnownGood);
2079 /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
2080 /// inline asm call are due to memory operands. If so, return true, otherwise
2082 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
2083 const TargetLowering &TLI) {
2084 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI));
2085 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
2086 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
2088 // Compute the constraint code and ConstraintType to use.
2089 TLI.ComputeConstraintToUse(OpInfo, SDValue());
2091 // If this asm operand is our Value*, and if it isn't an indirect memory
2092 // operand, we can't fold it!
2093 if (OpInfo.CallOperandVal == OpVal &&
2094 (OpInfo.ConstraintType != TargetLowering::C_Memory ||
2095 !OpInfo.isIndirect))
2102 /// FindAllMemoryUses - Recursively walk all the uses of I until we find a
2103 /// memory use. If we find an obviously non-foldable instruction, return true.
2104 /// Add the ultimately found memory instructions to MemoryUses.
2105 static bool FindAllMemoryUses(Instruction *I,
2106 SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
2107 SmallPtrSet<Instruction*, 16> &ConsideredInsts,
2108 const TargetLowering &TLI) {
2109 // If we already considered this instruction, we're done.
2110 if (!ConsideredInsts.insert(I))
2113 // If this is an obviously unfoldable instruction, bail out.
2114 if (!MightBeFoldableInst(I))
2117 // Loop over all the uses, recursively processing them.
2118 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2122 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
2123 MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
2127 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
2128 unsigned opNo = UI.getOperandNo();
2129 if (opNo == 0) return true; // Storing addr, not into addr.
2130 MemoryUses.push_back(std::make_pair(SI, opNo));
2134 if (CallInst *CI = dyn_cast<CallInst>(U)) {
2135 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
2136 if (!IA) return true;
2138 // If this is a memory operand, we're cool, otherwise bail out.
2139 if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
2144 if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
2152 /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
2153 /// the use site that we're folding it into. If so, there is no cost to
2154 /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values
2155 /// that we know are live at the instruction already.
2156 bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
2157 Value *KnownLive2) {
2158 // If Val is either of the known-live values, we know it is live!
2159 if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
2162 // All values other than instructions and arguments (e.g. constants) are live.
2163 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
2165 // If Val is a constant sized alloca in the entry block, it is live, this is
2166 // true because it is just a reference to the stack/frame pointer, which is
2167 // live for the whole function.
2168 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
2169 if (AI->isStaticAlloca())
2172 // Check to see if this value is already used in the memory instruction's
2173 // block. If so, it's already live into the block at the very least, so we
2174 // can reasonably fold it.
2175 return Val->isUsedInBasicBlock(MemoryInst->getParent());
2178 /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
2179 /// mode of the machine to fold the specified instruction into a load or store
2180 /// that ultimately uses it. However, the specified instruction has multiple
2181 /// uses. Given this, it may actually increase register pressure to fold it
2182 /// into the load. For example, consider this code:
2186 /// use(Y) -> nonload/store
2190 /// In this case, Y has multiple uses, and can be folded into the load of Z
2191 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
2192 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
2193 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
2194 /// number of computations either.
2196 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
2197 /// X was live across 'load Z' for other reasons, we actually *would* want to
2198 /// fold the addressing mode in the Z case. This would make Y die earlier.
2199 bool AddressingModeMatcher::
2200 IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
2201 ExtAddrMode &AMAfter) {
2202 if (IgnoreProfitability) return true;
2204 // AMBefore is the addressing mode before this instruction was folded into it,
2205 // and AMAfter is the addressing mode after the instruction was folded. Get
2206 // the set of registers referenced by AMAfter and subtract out those
2207 // referenced by AMBefore: this is the set of values which folding in this
2208 // address extends the lifetime of.
2210 // Note that there are only two potential values being referenced here,
2211 // BaseReg and ScaleReg (global addresses are always available, as are any
2212 // folded immediates).
2213 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
2215 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
2216 // lifetime wasn't extended by adding this instruction.
2217 if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
2219 if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
2222 // If folding this instruction (and it's subexprs) didn't extend any live
2223 // ranges, we're ok with it.
2224 if (BaseReg == 0 && ScaledReg == 0)
2227 // If all uses of this instruction are ultimately load/store/inlineasm's,
2228 // check to see if their addressing modes will include this instruction. If
2229 // so, we can fold it into all uses, so it doesn't matter if it has multiple
2231 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
2232 SmallPtrSet<Instruction*, 16> ConsideredInsts;
2233 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
2234 return false; // Has a non-memory, non-foldable use!
2236 // Now that we know that all uses of this instruction are part of a chain of
2237 // computation involving only operations that could theoretically be folded
2238 // into a memory use, loop over each of these uses and see if they could
2239 // *actually* fold the instruction.
2240 SmallVector<Instruction*, 32> MatchedAddrModeInsts;
2241 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
2242 Instruction *User = MemoryUses[i].first;
2243 unsigned OpNo = MemoryUses[i].second;
2245 // Get the access type of this use. If the use isn't a pointer, we don't
2246 // know what it accesses.
2247 Value *Address = User->getOperand(OpNo);
2248 if (!Address->getType()->isPointerTy())
2250 Type *AddressAccessTy = Address->getType()->getPointerElementType();
2252 // Do a match against the root of this address, ignoring profitability. This
2253 // will tell us if the addressing mode for the memory operation will
2254 // *actually* cover the shared instruction.
2256 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
2257 TPT.getRestorationPoint();
2258 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
2259 MemoryInst, Result, InsertedTruncs,
2260 PromotedInsts, TPT);
2261 Matcher.IgnoreProfitability = true;
2262 bool Success = Matcher.MatchAddr(Address, 0);
2263 (void)Success; assert(Success && "Couldn't select *anything*?");
2265 // The match was to check the profitability, the changes made are not
2266 // part of the original matcher. Therefore, they should be dropped
2267 // otherwise the original matcher will not present the right state.
2268 TPT.rollback(LastKnownGood);
2270 // If the match didn't cover I, then it won't be shared by it.
2271 if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
2272 I) == MatchedAddrModeInsts.end())
2275 MatchedAddrModeInsts.clear();
2281 } // end anonymous namespace
2283 /// IsNonLocalValue - Return true if the specified values are defined in a
2284 /// different basic block than BB.
2285 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
2286 if (Instruction *I = dyn_cast<Instruction>(V))
2287 return I->getParent() != BB;
2291 /// OptimizeMemoryInst - Load and Store Instructions often have
2292 /// addressing modes that can do significant amounts of computation. As such,
2293 /// instruction selection will try to get the load or store to do as much
2294 /// computation as possible for the program. The problem is that isel can only
2295 /// see within a single block. As such, we sink as much legal addressing mode
2296 /// stuff into the block as possible.
2298 /// This method is used to optimize both load/store and inline asms with memory
2300 bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
2304 // Try to collapse single-value PHI nodes. This is necessary to undo
2305 // unprofitable PRE transformations.
2306 SmallVector<Value*, 8> worklist;
2307 SmallPtrSet<Value*, 16> Visited;
2308 worklist.push_back(Addr);
2310 // Use a worklist to iteratively look through PHI nodes, and ensure that
2311 // the addressing mode obtained from the non-PHI roots of the graph
2313 Value *Consensus = 0;
2314 unsigned NumUsesConsensus = 0;
2315 bool IsNumUsesConsensusValid = false;
2316 SmallVector<Instruction*, 16> AddrModeInsts;
2317 ExtAddrMode AddrMode;
2318 TypePromotionTransaction TPT;
2319 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
2320 TPT.getRestorationPoint();
2321 while (!worklist.empty()) {
2322 Value *V = worklist.back();
2323 worklist.pop_back();
2325 // Break use-def graph loops.
2326 if (!Visited.insert(V)) {
2331 // For a PHI node, push all of its incoming values.
2332 if (PHINode *P = dyn_cast<PHINode>(V)) {
2333 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i)
2334 worklist.push_back(P->getIncomingValue(i));
2338 // For non-PHIs, determine the addressing mode being computed.
2339 SmallVector<Instruction*, 16> NewAddrModeInsts;
2340 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
2341 V, AccessTy, MemoryInst, NewAddrModeInsts, *TLI, InsertedTruncsSet,
2342 PromotedInsts, TPT);
2344 // This check is broken into two cases with very similar code to avoid using
2345 // getNumUses() as much as possible. Some values have a lot of uses, so
2346 // calling getNumUses() unconditionally caused a significant compile-time
2350 AddrMode = NewAddrMode;
2351 AddrModeInsts = NewAddrModeInsts;
2353 } else if (NewAddrMode == AddrMode) {
2354 if (!IsNumUsesConsensusValid) {
2355 NumUsesConsensus = Consensus->getNumUses();
2356 IsNumUsesConsensusValid = true;
2359 // Ensure that the obtained addressing mode is equivalent to that obtained
2360 // for all other roots of the PHI traversal. Also, when choosing one
2361 // such root as representative, select the one with the most uses in order
2362 // to keep the cost modeling heuristics in AddressingModeMatcher
2364 unsigned NumUses = V->getNumUses();
2365 if (NumUses > NumUsesConsensus) {
2367 NumUsesConsensus = NumUses;
2368 AddrModeInsts = NewAddrModeInsts;
2377 // If the addressing mode couldn't be determined, or if multiple different
2378 // ones were determined, bail out now.
2380 TPT.rollback(LastKnownGood);
2385 // Check to see if any of the instructions supersumed by this addr mode are
2386 // non-local to I's BB.
2387 bool AnyNonLocal = false;
2388 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
2389 if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) {
2395 // If all the instructions matched are already in this BB, don't do anything.
2397 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n");
2401 // Insert this computation right after this user. Since our caller is
2402 // scanning from the top of the BB to the bottom, reuse of the expr are
2403 // guaranteed to happen later.
2404 IRBuilder<> Builder(MemoryInst);
2406 // Now that we determined the addressing expression we want to use and know
2407 // that we have to sink it into this block. Check to see if we have already
2408 // done this for some other load/store instr in this block. If so, reuse the
2410 Value *&SunkAddr = SunkAddrs[Addr];
2412 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
2414 if (SunkAddr->getType() != Addr->getType())
2415 SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType());
2417 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
2419 Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType());
2422 // Start with the base register. Do this first so that subsequent address
2423 // matching finds it last, which will prevent it from trying to match it
2424 // as the scaled value in case it happens to be a mul. That would be
2425 // problematic if we've sunk a different mul for the scale, because then
2426 // we'd end up sinking both muls.
2427 if (AddrMode.BaseReg) {
2428 Value *V = AddrMode.BaseReg;
2429 if (V->getType()->isPointerTy())
2430 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
2431 if (V->getType() != IntPtrTy)
2432 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
2436 // Add the scale value.
2437 if (AddrMode.Scale) {
2438 Value *V = AddrMode.ScaledReg;
2439 if (V->getType() == IntPtrTy) {
2441 } else if (V->getType()->isPointerTy()) {
2442 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
2443 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
2444 cast<IntegerType>(V->getType())->getBitWidth()) {
2445 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
2447 V = Builder.CreateSExt(V, IntPtrTy, "sunkaddr");
2449 if (AddrMode.Scale != 1)
2450 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
2453 Result = Builder.CreateAdd(Result, V, "sunkaddr");
2458 // Add in the BaseGV if present.
2459 if (AddrMode.BaseGV) {
2460 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
2462 Result = Builder.CreateAdd(Result, V, "sunkaddr");
2467 // Add in the Base Offset if present.
2468 if (AddrMode.BaseOffs) {
2469 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
2471 Result = Builder.CreateAdd(Result, V, "sunkaddr");
2477 SunkAddr = Constant::getNullValue(Addr->getType());
2479 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
2482 MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
2484 // If we have no uses, recursively delete the value and all dead instructions
2486 if (Repl->use_empty()) {
2487 // This can cause recursive deletion, which can invalidate our iterator.
2488 // Use a WeakVH to hold onto it in case this happens.
2489 WeakVH IterHandle(CurInstIterator);
2490 BasicBlock *BB = CurInstIterator->getParent();
2492 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
2494 if (IterHandle != CurInstIterator) {
2495 // If the iterator instruction was recursively deleted, start over at the
2496 // start of the block.
2497 CurInstIterator = BB->begin();
2505 /// OptimizeInlineAsmInst - If there are any memory operands, use
2506 /// OptimizeMemoryInst to sink their address computing into the block when
2507 /// possible / profitable.
2508 bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
2509 bool MadeChange = false;
2511 TargetLowering::AsmOperandInfoVector
2512 TargetConstraints = TLI->ParseConstraints(CS);
2514 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
2515 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
2517 // Compute the constraint code and ConstraintType to use.
2518 TLI->ComputeConstraintToUse(OpInfo, SDValue());
2520 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
2521 OpInfo.isIndirect) {
2522 Value *OpVal = CS->getArgOperand(ArgNo++);
2523 MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType());
2524 } else if (OpInfo.Type == InlineAsm::isInput)
2531 /// SinkExtExpand - Sink a zext or sext into its user blocks if the target type
2532 /// doesn't fit in one register
2533 bool CodeGenPrepare::SinkExtExpand(CastInst *CI) {
2535 TLI->getTypeAction(CI->getContext(), TLI->getValueType(CI->getType())) ==
2536 TargetLowering::TypeExpandInteger)
2537 return SinkCast(CI);
2541 /// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same
2542 /// basic block as the load, unless conditions are unfavorable. This allows
2543 /// SelectionDAG to fold the extend into the load.
2545 bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *I) {
2546 // Look for a load being extended.
2547 LoadInst *LI = dyn_cast<LoadInst>(I->getOperand(0));
2548 if (!LI) return false;
2550 // If they're already in the same block, there's nothing to do.
2551 if (LI->getParent() == I->getParent())
2554 // Do not undo the optimization in SinkExtExpand
2556 TLI->getTypeAction(I->getContext(), TLI->getValueType(I->getType())) ==
2557 TargetLowering::TypeExpandInteger)
2560 // If the load has other users and the truncate is not free, this probably
2561 // isn't worthwhile.
2562 if (!LI->hasOneUse() &&
2563 TLI && (TLI->isTypeLegal(TLI->getValueType(LI->getType())) ||
2564 !TLI->isTypeLegal(TLI->getValueType(I->getType()))) &&
2565 !TLI->isTruncateFree(I->getType(), LI->getType()))
2568 // Check whether the target supports casts folded into loads.
2570 if (isa<ZExtInst>(I))
2571 LType = ISD::ZEXTLOAD;
2573 assert(isa<SExtInst>(I) && "Unexpected ext type!");
2574 LType = ISD::SEXTLOAD;
2576 if (TLI && !TLI->isLoadExtLegal(LType, TLI->getValueType(LI->getType())))
2579 // Move the extend into the same block as the load, so that SelectionDAG
2581 I->removeFromParent();
2587 bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
2588 BasicBlock *DefBB = I->getParent();
2590 // If the result of a {s|z}ext and its source are both live out, rewrite all
2591 // other uses of the source with result of extension.
2592 Value *Src = I->getOperand(0);
2593 if (Src->hasOneUse())
2596 // Only do this xform if truncating is free.
2597 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
2600 // Only safe to perform the optimization if the source is also defined in
2602 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
2605 bool DefIsLiveOut = false;
2606 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2608 Instruction *User = cast<Instruction>(*UI);
2610 // Figure out which BB this ext is used in.
2611 BasicBlock *UserBB = User->getParent();
2612 if (UserBB == DefBB) continue;
2613 DefIsLiveOut = true;
2619 // Make sure none of the uses are PHI nodes.
2620 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
2622 Instruction *User = cast<Instruction>(*UI);
2623 BasicBlock *UserBB = User->getParent();
2624 if (UserBB == DefBB) continue;
2625 // Be conservative. We don't want this xform to end up introducing
2626 // reloads just before load / store instructions.
2627 if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User))
2631 // InsertedTruncs - Only insert one trunc in each block once.
2632 DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
2634 bool MadeChange = false;
2635 for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
2637 Use &TheUse = UI.getUse();
2638 Instruction *User = cast<Instruction>(*UI);
2640 // Figure out which BB this ext is used in.
2641 BasicBlock *UserBB = User->getParent();
2642 if (UserBB == DefBB) continue;
2644 // Both src and def are live in this block. Rewrite the use.
2645 Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
2647 if (!InsertedTrunc) {
2648 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2649 InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt);
2650 InsertedTruncsSet.insert(InsertedTrunc);
2653 // Replace a use of the {s|z}ext source with a use of the result.
2654 TheUse = InsertedTrunc;
2662 /// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be
2663 /// turned into an explicit branch.
2664 static bool isFormingBranchFromSelectProfitable(SelectInst *SI) {
2665 // FIXME: This should use the same heuristics as IfConversion to determine
2666 // whether a select is better represented as a branch. This requires that
2667 // branch probability metadata is preserved for the select, which is not the
2670 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2672 // If the branch is predicted right, an out of order CPU can avoid blocking on
2673 // the compare. Emit cmovs on compares with a memory operand as branches to
2674 // avoid stalls on the load from memory. If the compare has more than one use
2675 // there's probably another cmov or setcc around so it's not worth emitting a
2680 Value *CmpOp0 = Cmp->getOperand(0);
2681 Value *CmpOp1 = Cmp->getOperand(1);
2683 // We check that the memory operand has one use to avoid uses of the loaded
2684 // value directly after the compare, making branches unprofitable.
2685 return Cmp->hasOneUse() &&
2686 ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) ||
2687 (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse()));
2691 /// If we have a SelectInst that will likely profit from branch prediction,
2692 /// turn it into a branch.
2693 bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) {
2694 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
2696 // Can we convert the 'select' to CF ?
2697 if (DisableSelectToBranch || OptSize || !TLI || VectorCond)
2700 TargetLowering::SelectSupportKind SelectKind;
2702 SelectKind = TargetLowering::VectorMaskSelect;
2703 else if (SI->getType()->isVectorTy())
2704 SelectKind = TargetLowering::ScalarCondVectorVal;
2706 SelectKind = TargetLowering::ScalarValSelect;
2708 // Do we have efficient codegen support for this kind of 'selects' ?
2709 if (TLI->isSelectSupported(SelectKind)) {
2710 // We have efficient codegen support for the select instruction.
2711 // Check if it is profitable to keep this 'select'.
2712 if (!TLI->isPredictableSelectExpensive() ||
2713 !isFormingBranchFromSelectProfitable(SI))
2719 // First, we split the block containing the select into 2 blocks.
2720 BasicBlock *StartBlock = SI->getParent();
2721 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI));
2722 BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
2724 // Create a new block serving as the landing pad for the branch.
2725 BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid",
2726 NextBlock->getParent(), NextBlock);
2728 // Move the unconditional branch from the block with the select in it into our
2729 // landing pad block.
2730 StartBlock->getTerminator()->eraseFromParent();
2731 BranchInst::Create(NextBlock, SmallBlock);
2733 // Insert the real conditional branch based on the original condition.
2734 BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI);
2736 // The select itself is replaced with a PHI Node.
2737 PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin());
2739 PN->addIncoming(SI->getTrueValue(), StartBlock);
2740 PN->addIncoming(SI->getFalseValue(), SmallBlock);
2741 SI->replaceAllUsesWith(PN);
2742 SI->eraseFromParent();
2744 // Instruct OptimizeBlock to skip to the next block.
2745 CurInstIterator = StartBlock->end();
2746 ++NumSelectsExpanded;
2750 static bool isBroadcastShuffle(ShuffleVectorInst *SVI) {
2751 SmallVector<int, 16> Mask(SVI->getShuffleMask());
2753 for (unsigned i = 0; i < Mask.size(); ++i) {
2754 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem)
2756 SplatElem = Mask[i];
2762 /// Some targets have expensive vector shifts if the lanes aren't all the same
2763 /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases
2764 /// it's often worth sinking a shufflevector splat down to its use so that
2765 /// codegen can spot all lanes are identical.
2766 bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
2767 BasicBlock *DefBB = SVI->getParent();
2769 // Only do this xform if variable vector shifts are particularly expensive.
2770 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
2773 // We only expect better codegen by sinking a shuffle if we can recognise a
2775 if (!isBroadcastShuffle(SVI))
2778 // InsertedShuffles - Only insert a shuffle in each block once.
2779 DenseMap<BasicBlock*, Instruction*> InsertedShuffles;
2781 bool MadeChange = false;
2782 for (Value::use_iterator UI = SVI->use_begin(), E = SVI->use_end();
2784 Instruction *User = cast<Instruction>(*UI);
2786 // Figure out which BB this ext is used in.
2787 BasicBlock *UserBB = User->getParent();
2788 if (UserBB == DefBB) continue;
2790 // For now only apply this when the splat is used by a shift instruction.
2791 if (!User->isShift()) continue;
2793 // Everything checks out, sink the shuffle if the user's block doesn't
2794 // already have a copy.
2795 Instruction *&InsertedShuffle = InsertedShuffles[UserBB];
2797 if (!InsertedShuffle) {
2798 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2799 InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0),
2801 SVI->getOperand(2), "", InsertPt);
2804 User->replaceUsesOfWith(SVI, InsertedShuffle);
2808 // If we removed all uses, nuke the shuffle.
2809 if (SVI->use_empty()) {
2810 SVI->eraseFromParent();
2817 bool CodeGenPrepare::OptimizeInst(Instruction *I) {
2818 if (PHINode *P = dyn_cast<PHINode>(I)) {
2819 // It is possible for very late stage optimizations (such as SimplifyCFG)
2820 // to introduce PHI nodes too late to be cleaned up. If we detect such a
2821 // trivial PHI, go ahead and zap it here.
2822 if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : 0,
2824 P->replaceAllUsesWith(V);
2825 P->eraseFromParent();
2832 if (CastInst *CI = dyn_cast<CastInst>(I)) {
2833 // If the source of the cast is a constant, then this should have
2834 // already been constant folded. The only reason NOT to constant fold
2835 // it is if something (e.g. LSR) was careful to place the constant
2836 // evaluation in a block other than then one that uses it (e.g. to hoist
2837 // the address of globals out of a loop). If this is the case, we don't
2838 // want to forward-subst the cast.
2839 if (isa<Constant>(CI->getOperand(0)))
2842 if (TLI && OptimizeNoopCopyExpression(CI, *TLI))
2845 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
2846 if (SinkExtExpand(CI))
2848 bool MadeChange = MoveExtToFormExtLoad(I);
2849 return MadeChange | OptimizeExtUses(I);
2854 if (CmpInst *CI = dyn_cast<CmpInst>(I))
2855 if (!TLI || !TLI->hasMultipleConditionRegisters())
2856 return OptimizeCmpExpression(CI);
2858 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
2860 return OptimizeMemoryInst(I, I->getOperand(0), LI->getType());
2864 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
2866 return OptimizeMemoryInst(I, SI->getOperand(1),
2867 SI->getOperand(0)->getType());
2871 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
2872 if (GEPI->hasAllZeroIndices()) {
2873 /// The GEP operand must be a pointer, so must its result -> BitCast
2874 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
2875 GEPI->getName(), GEPI);
2876 GEPI->replaceAllUsesWith(NC);
2877 GEPI->eraseFromParent();
2885 if (CallInst *CI = dyn_cast<CallInst>(I))
2886 return OptimizeCallInst(CI);
2888 if (SelectInst *SI = dyn_cast<SelectInst>(I))
2889 return OptimizeSelectInst(SI);
2891 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I))
2892 return OptimizeShuffleVectorInst(SVI);
2897 // In this pass we look for GEP and cast instructions that are used
2898 // across basic blocks and rewrite them to improve basic-block-at-a-time
2900 bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
2902 bool MadeChange = false;
2904 CurInstIterator = BB.begin();
2905 while (CurInstIterator != BB.end())
2906 MadeChange |= OptimizeInst(CurInstIterator++);
2908 MadeChange |= DupRetToEnableTailCallOpts(&BB);
2913 // llvm.dbg.value is far away from the value then iSel may not be able
2914 // handle it properly. iSel will drop llvm.dbg.value if it can not
2915 // find a node corresponding to the value.
2916 bool CodeGenPrepare::PlaceDbgValues(Function &F) {
2917 bool MadeChange = false;
2918 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
2919 Instruction *PrevNonDbgInst = NULL;
2920 for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) {
2921 Instruction *Insn = BI; ++BI;
2922 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
2924 PrevNonDbgInst = Insn;
2928 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
2929 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) {
2930 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI);
2931 DVI->removeFromParent();
2932 if (isa<PHINode>(VI))
2933 DVI->insertBefore(VI->getParent()->getFirstInsertionPt());
2935 DVI->insertAfter(VI);