1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass munges the code in the input function to better prepare it for
11 // SelectionDAG-based code generation. This works around limitations in it's
12 // basic-block-at-a-time approach. It should eventually be removed.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "codegenprepare"
17 #include "llvm/Transforms/Scalar.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/Pass.h"
23 #include "llvm/Target/TargetAsmInfo.h"
24 #include "llvm/Target/TargetData.h"
25 #include "llvm/Target/TargetLowering.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/Compiler.h"
33 #include "llvm/Support/GetElementPtrTypeIterator.h"
37 class VISIBILITY_HIDDEN CodeGenPrepare : public FunctionPass {
38 /// TLI - Keep a pointer of a TargetLowering to consult for determining
39 /// transformation profitability.
40 const TargetLowering *TLI;
42 static char ID; // Pass identification, replacement for typeid
43 explicit CodeGenPrepare(const TargetLowering *tli = 0)
44 : FunctionPass((intptr_t)&ID), TLI(tli) {}
45 bool runOnFunction(Function &F);
48 bool EliminateMostlyEmptyBlocks(Function &F);
49 bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
50 void EliminateMostlyEmptyBlock(BasicBlock *BB);
51 bool OptimizeBlock(BasicBlock &BB);
52 bool OptimizeLoadStoreInst(Instruction *I, Value *Addr,
54 DenseMap<Value*,Value*> &SunkAddrs);
58 char CodeGenPrepare::ID = 0;
59 static RegisterPass<CodeGenPrepare> X("codegenprepare",
60 "Optimize for code generation");
62 FunctionPass *llvm::createCodeGenPreparePass(const TargetLowering *TLI) {
63 return new CodeGenPrepare(TLI);
67 bool CodeGenPrepare::runOnFunction(Function &F) {
68 bool EverMadeChange = false;
70 // First pass, eliminate blocks that contain only PHI nodes and an
71 // unconditional branch.
72 EverMadeChange |= EliminateMostlyEmptyBlocks(F);
74 bool MadeChange = true;
77 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
78 MadeChange |= OptimizeBlock(*BB);
79 EverMadeChange |= MadeChange;
81 return EverMadeChange;
84 /// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes
85 /// and an unconditional branch. Passes before isel (e.g. LSR/loopsimplify)
86 /// often split edges in ways that are non-optimal for isel. Start by
87 /// eliminating these blocks so we can split them the way we want them.
88 bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) {
89 bool MadeChange = false;
90 // Note that this intentionally skips the entry block.
91 for (Function::iterator I = ++F.begin(), E = F.end(); I != E; ) {
94 // If this block doesn't end with an uncond branch, ignore it.
95 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
96 if (!BI || !BI->isUnconditional())
99 // If the instruction before the branch isn't a phi node, then other stuff
100 // is happening here.
101 BasicBlock::iterator BBI = BI;
102 if (BBI != BB->begin()) {
104 if (!isa<PHINode>(BBI)) continue;
107 // Do not break infinite loops.
108 BasicBlock *DestBB = BI->getSuccessor(0);
112 if (!CanMergeBlocks(BB, DestBB))
115 EliminateMostlyEmptyBlock(BB);
121 /// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a
122 /// single uncond branch between them, and BB contains no other non-phi
124 bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
125 const BasicBlock *DestBB) const {
126 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
127 // the successor. If there are more complex condition (e.g. preheaders),
128 // don't mess around with them.
129 BasicBlock::const_iterator BBI = BB->begin();
130 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
131 for (Value::use_const_iterator UI = PN->use_begin(), E = PN->use_end();
133 const Instruction *User = cast<Instruction>(*UI);
134 if (User->getParent() != DestBB || !isa<PHINode>(User))
136 // If User is inside DestBB block and it is a PHINode then check
137 // incoming value. If incoming value is not from BB then this is
138 // a complex condition (e.g. preheaders) we want to avoid here.
139 if (User->getParent() == DestBB) {
140 if (const PHINode *UPN = dyn_cast<PHINode>(User))
141 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
142 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
143 if (Insn && Insn->getParent() == BB &&
144 Insn->getParent() != UPN->getIncomingBlock(I))
151 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
152 // and DestBB may have conflicting incoming values for the block. If so, we
153 // can't merge the block.
154 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
155 if (!DestBBPN) return true; // no conflict.
157 // Collect the preds of BB.
158 SmallPtrSet<BasicBlock*, 16> BBPreds;
159 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
160 // It is faster to get preds from a PHI than with pred_iterator.
161 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
162 BBPreds.insert(BBPN->getIncomingBlock(i));
164 BBPreds.insert(pred_begin(BB), pred_end(BB));
167 // Walk the preds of DestBB.
168 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
169 BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
170 if (BBPreds.count(Pred)) { // Common predecessor?
171 BBI = DestBB->begin();
172 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
173 const Value *V1 = PN->getIncomingValueForBlock(Pred);
174 const Value *V2 = PN->getIncomingValueForBlock(BB);
176 // If V2 is a phi node in BB, look up what the mapped value will be.
177 if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
178 if (V2PN->getParent() == BB)
179 V2 = V2PN->getIncomingValueForBlock(Pred);
181 // If there is a conflict, bail out.
182 if (V1 != V2) return false;
191 /// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and
192 /// an unconditional branch in it.
193 void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) {
194 BranchInst *BI = cast<BranchInst>(BB->getTerminator());
195 BasicBlock *DestBB = BI->getSuccessor(0);
197 DOUT << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB;
199 // If the destination block has a single pred, then this is a trivial edge,
201 if (DestBB->getSinglePredecessor()) {
202 // If DestBB has single-entry PHI nodes, fold them.
203 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) {
204 PN->replaceAllUsesWith(PN->getIncomingValue(0));
205 PN->eraseFromParent();
208 // Splice all the PHI nodes from BB over to DestBB.
209 DestBB->getInstList().splice(DestBB->begin(), BB->getInstList(),
212 // Anything that branched to BB now branches to DestBB.
213 BB->replaceAllUsesWith(DestBB);
216 BB->eraseFromParent();
218 DOUT << "AFTER:\n" << *DestBB << "\n\n\n";
222 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
223 // to handle the new incoming edges it is about to have.
225 for (BasicBlock::iterator BBI = DestBB->begin();
226 (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
227 // Remove the incoming value for BB, and remember it.
228 Value *InVal = PN->removeIncomingValue(BB, false);
230 // Two options: either the InVal is a phi node defined in BB or it is some
231 // value that dominates BB.
232 PHINode *InValPhi = dyn_cast<PHINode>(InVal);
233 if (InValPhi && InValPhi->getParent() == BB) {
234 // Add all of the input values of the input PHI as inputs of this phi.
235 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
236 PN->addIncoming(InValPhi->getIncomingValue(i),
237 InValPhi->getIncomingBlock(i));
239 // Otherwise, add one instance of the dominating value for each edge that
240 // we will be adding.
241 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
242 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
243 PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
245 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
246 PN->addIncoming(InVal, *PI);
251 // The PHIs are now updated, change everything that refers to BB to use
252 // DestBB and remove BB.
253 BB->replaceAllUsesWith(DestBB);
254 BB->eraseFromParent();
256 DOUT << "AFTER:\n" << *DestBB << "\n\n\n";
260 /// SplitEdgeNicely - Split the critical edge from TI to it's specified
261 /// successor if it will improve codegen. We only do this if the successor has
262 /// phi nodes (otherwise critical edges are ok). If there is already another
263 /// predecessor of the succ that is empty (and thus has no phi nodes), use it
264 /// instead of introducing a new block.
265 static void SplitEdgeNicely(TerminatorInst *TI, unsigned SuccNum, Pass *P) {
266 BasicBlock *TIBB = TI->getParent();
267 BasicBlock *Dest = TI->getSuccessor(SuccNum);
268 assert(isa<PHINode>(Dest->begin()) &&
269 "This should only be called if Dest has a PHI!");
271 /// TIPHIValues - This array is lazily computed to determine the values of
272 /// PHIs in Dest that TI would provide.
273 std::vector<Value*> TIPHIValues;
275 // Check to see if Dest has any blocks that can be used as a split edge for
277 for (pred_iterator PI = pred_begin(Dest), E = pred_end(Dest); PI != E; ++PI) {
278 BasicBlock *Pred = *PI;
279 // To be usable, the pred has to end with an uncond branch to the dest.
280 BranchInst *PredBr = dyn_cast<BranchInst>(Pred->getTerminator());
281 if (!PredBr || !PredBr->isUnconditional() ||
282 // Must be empty other than the branch.
283 &Pred->front() != PredBr ||
284 // Cannot be the entry block; its label does not get emitted.
285 Pred == &(Dest->getParent()->getEntryBlock()))
288 // Finally, since we know that Dest has phi nodes in it, we have to make
289 // sure that jumping to Pred will have the same affect as going to Dest in
290 // terms of PHI values.
293 bool FoundMatch = true;
294 for (BasicBlock::iterator I = Dest->begin();
295 (PN = dyn_cast<PHINode>(I)); ++I, ++PHINo) {
296 if (PHINo == TIPHIValues.size())
297 TIPHIValues.push_back(PN->getIncomingValueForBlock(TIBB));
299 // If the PHI entry doesn't work, we can't use this pred.
300 if (TIPHIValues[PHINo] != PN->getIncomingValueForBlock(Pred)) {
306 // If we found a workable predecessor, change TI to branch to Succ.
308 Dest->removePredecessor(TIBB);
309 TI->setSuccessor(SuccNum, Pred);
314 SplitCriticalEdge(TI, SuccNum, P, true);
317 /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop
318 /// copy (e.g. it's casting from one pointer type to another, int->uint, or
319 /// int->sbyte on PPC), sink it into user blocks to reduce the number of virtual
320 /// registers that must be created and coalesced.
322 /// Return true if any changes are made.
323 static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){
324 // If this is a noop copy,
325 MVT::ValueType SrcVT = TLI.getValueType(CI->getOperand(0)->getType());
326 MVT::ValueType DstVT = TLI.getValueType(CI->getType());
328 // This is an fp<->int conversion?
329 if (MVT::isInteger(SrcVT) != MVT::isInteger(DstVT))
332 // If this is an extension, it will be a zero or sign extension, which
334 if (SrcVT < DstVT) return false;
336 // If these values will be promoted, find out what they will be promoted
337 // to. This helps us consider truncates on PPC as noop copies when they
339 if (TLI.getTypeAction(SrcVT) == TargetLowering::Promote)
340 SrcVT = TLI.getTypeToTransformTo(SrcVT);
341 if (TLI.getTypeAction(DstVT) == TargetLowering::Promote)
342 DstVT = TLI.getTypeToTransformTo(DstVT);
344 // If, after promotion, these are the same types, this is a noop copy.
348 BasicBlock *DefBB = CI->getParent();
350 /// InsertedCasts - Only insert a cast in each block once.
351 DenseMap<BasicBlock*, CastInst*> InsertedCasts;
353 bool MadeChange = false;
354 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
356 Use &TheUse = UI.getUse();
357 Instruction *User = cast<Instruction>(*UI);
359 // Figure out which BB this cast is used in. For PHI's this is the
360 // appropriate predecessor block.
361 BasicBlock *UserBB = User->getParent();
362 if (PHINode *PN = dyn_cast<PHINode>(User)) {
363 unsigned OpVal = UI.getOperandNo()/2;
364 UserBB = PN->getIncomingBlock(OpVal);
367 // Preincrement use iterator so we don't invalidate it.
370 // If this user is in the same block as the cast, don't change the cast.
371 if (UserBB == DefBB) continue;
373 // If we have already inserted a cast into this block, use it.
374 CastInst *&InsertedCast = InsertedCasts[UserBB];
377 BasicBlock::iterator InsertPt = UserBB->begin();
378 while (isa<PHINode>(InsertPt)) ++InsertPt;
381 CastInst::create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "",
386 // Replace a use of the cast with a use of the new cast.
387 TheUse = InsertedCast;
390 // If we removed all uses, nuke the cast.
392 CI->eraseFromParent();
397 /// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce
398 /// the number of virtual registers that must be created and coalesced. This is
399 /// a clear win except on targets with multiple condition code registers (powerPC),
400 /// where it might lose; some adjustment may be wanted there.
402 /// Return true if any changes are made.
403 static bool OptimizeCmpExpression(CmpInst *CI){
405 BasicBlock *DefBB = CI->getParent();
407 /// InsertedCmp - Only insert a cmp in each block once.
408 DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
410 bool MadeChange = false;
411 for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
413 Use &TheUse = UI.getUse();
414 Instruction *User = cast<Instruction>(*UI);
416 // Preincrement use iterator so we don't invalidate it.
419 // Don't bother for PHI nodes.
420 if (isa<PHINode>(User))
423 // Figure out which BB this cmp is used in.
424 BasicBlock *UserBB = User->getParent();
426 // If this user is in the same block as the cmp, don't change the cmp.
427 if (UserBB == DefBB) continue;
429 // If we have already inserted a cmp into this block, use it.
430 CmpInst *&InsertedCmp = InsertedCmps[UserBB];
433 BasicBlock::iterator InsertPt = UserBB->begin();
434 while (isa<PHINode>(InsertPt)) ++InsertPt;
437 CmpInst::create(CI->getOpcode(), CI->getPredicate(), CI->getOperand(0),
438 CI->getOperand(1), "", InsertPt);
442 // Replace a use of the cmp with a use of the new cmp.
443 TheUse = InsertedCmp;
446 // If we removed all uses, nuke the cmp.
448 CI->eraseFromParent();
453 /// EraseDeadInstructions - Erase any dead instructions
454 static void EraseDeadInstructions(Value *V) {
455 Instruction *I = dyn_cast<Instruction>(V);
456 if (!I || !I->use_empty()) return;
458 SmallPtrSet<Instruction*, 16> Insts;
461 while (!Insts.empty()) {
464 if (isInstructionTriviallyDead(I)) {
465 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
466 if (Instruction *U = dyn_cast<Instruction>(I->getOperand(i)))
468 I->eraseFromParent();
474 /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode which
475 /// holds actual Value*'s for register values.
476 struct ExtAddrMode : public TargetLowering::AddrMode {
479 ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
483 static std::ostream &operator<<(std::ostream &OS, const ExtAddrMode &AM) {
484 bool NeedPlus = false;
487 OS << (NeedPlus ? " + " : "")
488 << "GV:%" << AM.BaseGV->getName(), NeedPlus = true;
491 OS << (NeedPlus ? " + " : "") << AM.BaseOffs, NeedPlus = true;
494 OS << (NeedPlus ? " + " : "")
495 << "Base:%" << AM.BaseReg->getName(), NeedPlus = true;
497 OS << (NeedPlus ? " + " : "")
498 << AM.Scale << "*%" << AM.ScaledReg->getName(), NeedPlus = true;
503 void ExtAddrMode::dump() const {
504 cerr << *this << "\n";
507 static bool TryMatchingScaledValue(Value *ScaleReg, int64_t Scale,
508 const Type *AccessTy, ExtAddrMode &AddrMode,
509 SmallVector<Instruction*, 16> &AddrModeInsts,
510 const TargetLowering &TLI, unsigned Depth);
512 /// FindMaximalLegalAddressingMode - If we can, try to merge the computation of
513 /// Addr into the specified addressing mode. If Addr can't be added to AddrMode
514 /// this returns false. This assumes that Addr is either a pointer type or
515 /// intptr_t for the target.
516 static bool FindMaximalLegalAddressingMode(Value *Addr, const Type *AccessTy,
517 ExtAddrMode &AddrMode,
518 SmallVector<Instruction*, 16> &AddrModeInsts,
519 const TargetLowering &TLI,
522 // If this is a global variable, fold it into the addressing mode if possible.
523 if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
524 if (AddrMode.BaseGV == 0) {
525 AddrMode.BaseGV = GV;
526 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
530 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
531 AddrMode.BaseOffs += CI->getSExtValue();
532 if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
534 AddrMode.BaseOffs -= CI->getSExtValue();
535 } else if (isa<ConstantPointerNull>(Addr)) {
539 // Look through constant exprs and instructions.
540 unsigned Opcode = ~0U;
542 if (Instruction *I = dyn_cast<Instruction>(Addr)) {
543 Opcode = I->getOpcode();
545 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
546 Opcode = CE->getOpcode();
550 // Limit recursion to avoid exponential behavior.
551 if (Depth == 5) { AddrInst = 0; Opcode = ~0U; }
553 // If this is really an instruction, add it to our list of related
555 if (Instruction *I = dyn_cast_or_null<Instruction>(AddrInst))
556 AddrModeInsts.push_back(I);
559 case Instruction::PtrToInt:
560 // PtrToInt is always a noop, as we know that the int type is pointer sized.
561 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy,
562 AddrMode, AddrModeInsts, TLI, Depth))
565 case Instruction::IntToPtr:
566 // This inttoptr is a no-op if the integer type is pointer sized.
567 if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
568 TLI.getPointerTy()) {
569 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy,
570 AddrMode, AddrModeInsts, TLI, Depth))
574 case Instruction::Add: {
575 // Check to see if we can merge in the RHS then the LHS. If so, we win.
576 ExtAddrMode BackupAddrMode = AddrMode;
577 unsigned OldSize = AddrModeInsts.size();
578 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(1), AccessTy,
579 AddrMode, AddrModeInsts, TLI, Depth+1) &&
580 FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy,
581 AddrMode, AddrModeInsts, TLI, Depth+1))
584 // Restore the old addr mode info.
585 AddrMode = BackupAddrMode;
586 AddrModeInsts.resize(OldSize);
588 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
589 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy,
590 AddrMode, AddrModeInsts, TLI, Depth+1) &&
591 FindMaximalLegalAddressingMode(AddrInst->getOperand(1), AccessTy,
592 AddrMode, AddrModeInsts, TLI, Depth+1))
595 // Otherwise we definitely can't merge the ADD in.
596 AddrMode = BackupAddrMode;
597 AddrModeInsts.resize(OldSize);
600 case Instruction::Or: {
601 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
603 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
606 case Instruction::Mul:
607 case Instruction::Shl: {
608 // Can only handle X*C and X << C, and can only handle this when the scale
609 // field is available.
610 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
612 int64_t Scale = RHS->getSExtValue();
613 if (Opcode == Instruction::Shl)
616 if (TryMatchingScaledValue(AddrInst->getOperand(0), Scale, AccessTy,
617 AddrMode, AddrModeInsts, TLI, Depth))
621 case Instruction::GetElementPtr: {
622 // Scan the GEP. We check it if it contains constant offsets and at most
623 // one variable offset.
624 int VariableOperand = -1;
625 unsigned VariableScale = 0;
627 int64_t ConstantOffset = 0;
628 const TargetData *TD = TLI.getTargetData();
629 gep_type_iterator GTI = gep_type_begin(AddrInst);
630 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
631 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
632 const StructLayout *SL = TD->getStructLayout(STy);
634 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
635 ConstantOffset += SL->getElementOffset(Idx);
637 uint64_t TypeSize = TD->getTypeSize(GTI.getIndexedType());
638 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
639 ConstantOffset += CI->getSExtValue()*TypeSize;
640 } else if (TypeSize) { // Scales of zero don't do anything.
641 // We only allow one variable index at the moment.
642 if (VariableOperand != -1) {
643 VariableOperand = -2;
647 // Remember the variable index.
649 VariableScale = TypeSize;
654 // If the GEP had multiple variable indices, punt.
655 if (VariableOperand == -2)
658 // A common case is for the GEP to only do a constant offset. In this case,
659 // just add it to the disp field and check validity.
660 if (VariableOperand == -1) {
661 AddrMode.BaseOffs += ConstantOffset;
662 if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
663 // Check to see if we can fold the base pointer in too.
664 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy,
665 AddrMode, AddrModeInsts, TLI,
669 AddrMode.BaseOffs -= ConstantOffset;
671 // Check that this has no base reg yet. If so, we won't have a place to
672 // put the base of the GEP (assuming it is not a null ptr).
673 bool SetBaseReg = false;
674 if (AddrMode.HasBaseReg) {
675 if (!isa<ConstantPointerNull>(AddrInst->getOperand(0)))
678 AddrMode.HasBaseReg = true;
679 AddrMode.BaseReg = AddrInst->getOperand(0);
683 // See if the scale amount is valid for this target.
684 AddrMode.BaseOffs += ConstantOffset;
685 if (TryMatchingScaledValue(AddrInst->getOperand(VariableOperand),
686 VariableScale, AccessTy, AddrMode,
687 AddrModeInsts, TLI, Depth)) {
688 if (!SetBaseReg) return true;
690 // If this match succeeded, we know that we can form an address with the
691 // GepBase as the basereg. See if we can match *more*.
692 AddrMode.HasBaseReg = false;
693 AddrMode.BaseReg = 0;
694 if (FindMaximalLegalAddressingMode(AddrInst->getOperand(0), AccessTy,
695 AddrMode, AddrModeInsts, TLI,
698 // Strange, shouldn't happen. Restore the base reg and succeed the easy
700 AddrMode.HasBaseReg = true;
701 AddrMode.BaseReg = AddrInst->getOperand(0);
705 AddrMode.BaseOffs -= ConstantOffset;
707 AddrMode.HasBaseReg = false;
708 AddrMode.BaseReg = 0;
715 if (Instruction *I = dyn_cast_or_null<Instruction>(AddrInst)) {
716 assert(AddrModeInsts.back() == I && "Stack imbalance");
717 AddrModeInsts.pop_back();
720 // Worse case, the target should support [reg] addressing modes. :)
721 if (!AddrMode.HasBaseReg) {
722 AddrMode.HasBaseReg = true;
723 // Still check for legality in case the target supports [imm] but not [i+r].
724 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) {
725 AddrMode.BaseReg = Addr;
728 AddrMode.HasBaseReg = false;
731 // If the base register is already taken, see if we can do [r+r].
732 if (AddrMode.Scale == 0) {
734 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) {
735 AddrMode.ScaledReg = Addr;
744 /// TryMatchingScaledValue - Try adding ScaleReg*Scale to the specified
745 /// addressing mode. Return true if this addr mode is legal for the target,
747 static bool TryMatchingScaledValue(Value *ScaleReg, int64_t Scale,
748 const Type *AccessTy, ExtAddrMode &AddrMode,
749 SmallVector<Instruction*, 16> &AddrModeInsts,
750 const TargetLowering &TLI, unsigned Depth) {
751 // If we already have a scale of this value, we can add to it, otherwise, we
752 // need an available scale field.
753 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
756 ExtAddrMode InputAddrMode = AddrMode;
758 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
759 // [A+B + A*7] -> [B+A*8].
760 AddrMode.Scale += Scale;
761 AddrMode.ScaledReg = ScaleReg;
763 if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) {
764 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
765 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
766 // X*Scale + C*Scale to addr mode.
767 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(ScaleReg);
768 if (BinOp && BinOp->getOpcode() == Instruction::Add &&
769 isa<ConstantInt>(BinOp->getOperand(1)) && InputAddrMode.ScaledReg ==0) {
771 InputAddrMode.Scale = Scale;
772 InputAddrMode.ScaledReg = BinOp->getOperand(0);
773 InputAddrMode.BaseOffs +=
774 cast<ConstantInt>(BinOp->getOperand(1))->getSExtValue()*Scale;
775 if (TLI.isLegalAddressingMode(InputAddrMode, AccessTy)) {
776 AddrModeInsts.push_back(BinOp);
777 AddrMode = InputAddrMode;
782 // Otherwise, not (x+c)*scale, just return what we have.
786 // Otherwise, back this attempt out.
787 AddrMode.Scale -= Scale;
788 if (AddrMode.Scale == 0) AddrMode.ScaledReg = 0;
794 /// IsNonLocalValue - Return true if the specified values are defined in a
795 /// different basic block than BB.
796 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
797 if (Instruction *I = dyn_cast<Instruction>(V))
798 return I->getParent() != BB;
802 /// OptimizeLoadStoreInst - Load and Store Instructions have often have
803 /// addressing modes that can do significant amounts of computation. As such,
804 /// instruction selection will try to get the load or store to do as much
805 /// computation as possible for the program. The problem is that isel can only
806 /// see within a single block. As such, we sink as much legal addressing mode
807 /// stuff into the block as possible.
808 bool CodeGenPrepare::OptimizeLoadStoreInst(Instruction *LdStInst, Value *Addr,
809 const Type *AccessTy,
810 DenseMap<Value*,Value*> &SunkAddrs) {
811 // Figure out what addressing mode will be built up for this operation.
812 SmallVector<Instruction*, 16> AddrModeInsts;
813 ExtAddrMode AddrMode;
814 bool Success = FindMaximalLegalAddressingMode(Addr, AccessTy, AddrMode,
815 AddrModeInsts, *TLI, 0);
816 Success = Success; assert(Success && "Couldn't select *anything*?");
818 // Check to see if any of the instructions supersumed by this addr mode are
819 // non-local to I's BB.
820 bool AnyNonLocal = false;
821 for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) {
822 if (IsNonLocalValue(AddrModeInsts[i], LdStInst->getParent())) {
828 // If all the instructions matched are already in this BB, don't do anything.
830 DEBUG(cerr << "CGP: Found local addrmode: " << AddrMode << "\n");
834 // Insert this computation right after this user. Since our caller is
835 // scanning from the top of the BB to the bottom, reuse of the expr are
836 // guaranteed to happen later.
837 BasicBlock::iterator InsertPt = LdStInst;
839 // Now that we determined the addressing expression we want to use and know
840 // that we have to sink it into this block. Check to see if we have already
841 // done this for some other load/store instr in this block. If so, reuse the
843 Value *&SunkAddr = SunkAddrs[Addr];
845 DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n");
846 if (SunkAddr->getType() != Addr->getType())
847 SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt);
849 DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n");
850 const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType();
853 // Start with the scale value.
854 if (AddrMode.Scale) {
855 Value *V = AddrMode.ScaledReg;
856 if (V->getType() == IntPtrTy) {
858 } else if (isa<PointerType>(V->getType())) {
859 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt);
860 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
861 cast<IntegerType>(V->getType())->getBitWidth()) {
862 V = new TruncInst(V, IntPtrTy, "sunkaddr", InsertPt);
864 V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt);
866 if (AddrMode.Scale != 1)
867 V = BinaryOperator::createMul(V, ConstantInt::get(IntPtrTy,
869 "sunkaddr", InsertPt);
873 // Add in the base register.
874 if (AddrMode.BaseReg) {
875 Value *V = AddrMode.BaseReg;
876 if (V->getType() != IntPtrTy)
877 V = new PtrToIntInst(V, IntPtrTy, "sunkaddr", InsertPt);
879 Result = BinaryOperator::createAdd(Result, V, "sunkaddr", InsertPt);
884 // Add in the BaseGV if present.
885 if (AddrMode.BaseGV) {
886 Value *V = new PtrToIntInst(AddrMode.BaseGV, IntPtrTy, "sunkaddr",
889 Result = BinaryOperator::createAdd(Result, V, "sunkaddr", InsertPt);
894 // Add in the Base Offset if present.
895 if (AddrMode.BaseOffs) {
896 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
898 Result = BinaryOperator::createAdd(Result, V, "sunkaddr", InsertPt);
904 SunkAddr = Constant::getNullValue(Addr->getType());
906 SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt);
909 LdStInst->replaceUsesOfWith(Addr, SunkAddr);
911 if (Addr->use_empty())
912 EraseDeadInstructions(Addr);
916 // In this pass we look for GEP and cast instructions that are used
917 // across basic blocks and rewrite them to improve basic-block-at-a-time
919 bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
920 bool MadeChange = false;
922 // Split all critical edges where the dest block has a PHI and where the phi
923 // has shared immediate operands.
924 TerminatorInst *BBTI = BB.getTerminator();
925 if (BBTI->getNumSuccessors() > 1) {
926 for (unsigned i = 0, e = BBTI->getNumSuccessors(); i != e; ++i)
927 if (isa<PHINode>(BBTI->getSuccessor(i)->begin()) &&
928 isCriticalEdge(BBTI, i, true))
929 SplitEdgeNicely(BBTI, i, this);
933 // Keep track of non-local addresses that have been sunk into this block.
934 // This allows us to avoid inserting duplicate code for blocks with multiple
935 // load/stores of the same address.
936 DenseMap<Value*, Value*> SunkAddrs;
938 for (BasicBlock::iterator BBI = BB.begin(), E = BB.end(); BBI != E; ) {
939 Instruction *I = BBI++;
941 if (CastInst *CI = dyn_cast<CastInst>(I)) {
942 // If the source of the cast is a constant, then this should have
943 // already been constant folded. The only reason NOT to constant fold
944 // it is if something (e.g. LSR) was careful to place the constant
945 // evaluation in a block other than then one that uses it (e.g. to hoist
946 // the address of globals out of a loop). If this is the case, we don't
947 // want to forward-subst the cast.
948 if (isa<Constant>(CI->getOperand(0)))
952 MadeChange |= OptimizeNoopCopyExpression(CI, *TLI);
953 } else if (CmpInst *CI = dyn_cast<CmpInst>(I)) {
954 MadeChange |= OptimizeCmpExpression(CI);
955 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
957 MadeChange |= OptimizeLoadStoreInst(I, I->getOperand(0), LI->getType(),
959 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
961 MadeChange |= OptimizeLoadStoreInst(I, SI->getOperand(1),
962 SI->getOperand(0)->getType(),
964 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
965 if (GEPI->hasAllZeroIndices()) {
966 /// The GEP operand must be a pointer, so must its result -> BitCast
967 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
968 GEPI->getName(), GEPI);
969 GEPI->replaceAllUsesWith(NC);
970 GEPI->eraseFromParent();
974 } else if (CallInst *CI = dyn_cast<CallInst>(I)) {
975 // If we found an inline asm expession, and if the target knows how to
976 // lower it to normal LLVM code, do so now.
977 if (TLI && isa<InlineAsm>(CI->getCalledValue()))
978 if (const TargetAsmInfo *TAI =
979 TLI->getTargetMachine().getTargetAsmInfo()) {
980 if (TAI->ExpandInlineAsm(CI))