X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FPeepholeOptimizer.cpp;h=28f2d2f9e9641df7427daec2e84f7935cc673e2a;hb=8821f3c6b230b92f5be047b1d10905a0ac932658;hp=203f2bffb7bcbb7bcae15629d37c509741383b7d;hpb=3b26eb62946a64409f81c8089a362eb582114342;p=oota-llvm.git diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp index 203f2bffb7b..28f2d2f9e96 100644 --- a/lib/CodeGen/PeepholeOptimizer.cpp +++ b/lib/CodeGen/PeepholeOptimizer.cpp @@ -31,29 +31,54 @@ // same flag that the "cmp" instruction sets and that "bz" uses, then we can // eliminate the "cmp" instruction. // -// - Optimize Bitcast pairs: +// Another instance, in this code: // -// v1 = bitcast v0 -// v2 = bitcast v1 -// = v2 +// sub r1, r3 | sub r1, imm +// cmp r3, r1 or cmp r1, r3 | cmp r1, imm +// bge L1 +// +// If the branch instruction can use flag from "sub", then we can replace +// "sub" with "subs" and eliminate the "cmp" instruction. +// +// - Optimize Loads: +// +// Loads that can be folded into a later instruction. A load is foldable +// if it loads to virtual registers and the virtual register defined has +// a single use. +// +// - Optimize Copies and Bitcast: +// +// Rewrite copies and bitcasts to avoid cross register bank copies +// when possible. +// E.g., Consider the following example, where capital and lower +// letters denote different register file: +// b = copy A <-- cross-bank copy +// C = copy b <-- cross-bank copy // => -// v1 = bitcast v0 -// = v0 +// b = copy A <-- cross-bank copy +// C = copy A <-- same-bank copy // +// E.g., for bitcast: +// b = bitcast A <-- cross-bank copy +// C = bitcast b <-- cross-bank copy +// => +// b = bitcast A <-- cross-bank copy +// C = copy A <-- same-bank copy //===----------------------------------------------------------------------===// #define DEBUG_TYPE "peephole-opt" #include "llvm/CodeGen/Passes.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/SmallPtrSet.h" -#include "llvm/ADT/SmallSet.h" -#include "llvm/ADT/Statistic.h" using namespace llvm; // Optimize Extensions @@ -66,9 +91,11 @@ DisablePeephole("disable-peephole", cl::Hidden, cl::init(false), cl::desc("Disable the peephole optimizer")); STATISTIC(NumReuse, "Number of extension results reused"); -STATISTIC(NumBitcasts, "Number of bitcasts eliminated"); STATISTIC(NumCmps, "Number of compares eliminated"); STATISTIC(NumImmFold, "Number of move immediate folded"); +STATISTIC(NumLoadFold, "Number of loads folded"); +STATISTIC(NumSelects, "Number of selects optimized"); +STATISTIC(NumCopiesBitcasts, "Number of copies/bitcasts optimized"); namespace { class PeepholeOptimizer : public MachineFunctionPass { @@ -95,16 +122,18 @@ namespace { } private: - bool OptimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB); - bool OptimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB); - bool OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, + bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB); + bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, SmallPtrSet &LocalMIs); + bool optimizeSelect(MachineInstr *MI); + bool optimizeCopyOrBitcast(MachineInstr *MI); bool isMoveImmediate(MachineInstr *MI, SmallSet &ImmDefRegs, DenseMap &ImmDefMIs); - bool FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, + bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, SmallSet &ImmDefRegs, DenseMap &ImmDefMIs); + bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg); }; } @@ -116,7 +145,7 @@ INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts", "Peephole Optimizations", false, false) -/// OptimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads +/// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads /// a single register and writes a single register and it does not modify the /// source, and if the source value is preserved as a sub-register of the /// result, then replace all reachable uses of the source with the subreg of the @@ -126,7 +155,7 @@ INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts", /// the code. Since this code does not currently share EXTRACTs, just ignore all /// debug uses. bool PeepholeOptimizer:: -OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, +optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, SmallPtrSet &LocalMIs) { unsigned SrcReg, DstReg, SubIdx; if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) @@ -136,16 +165,30 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, TargetRegisterInfo::isPhysicalRegister(SrcReg)) return false; - MachineRegisterInfo::use_nodbg_iterator UI = MRI->use_nodbg_begin(SrcReg); - if (++UI == MRI->use_nodbg_end()) + if (MRI->hasOneNonDBGUse(SrcReg)) // No other uses. return false; + // Ensure DstReg can get a register class that actually supports + // sub-registers. Don't change the class until we commit. + const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); + DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx); + if (!DstRC) + return false; + + // The ext instr may be operating on a sub-register of SrcReg as well. + // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit + // register. + // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of + // SrcReg:SubIdx should be replaced. + bool UseSrcSubIdx = TM->getRegisterInfo()-> + getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0; + // The source has other uses. See if we can replace the other uses with use of // the result of the extension. SmallPtrSet ReachedBBs; - UI = MRI->use_nodbg_begin(DstReg); - for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end(); + for (MachineRegisterInfo::use_nodbg_iterator + UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end(); UI != UE; ++UI) ReachedBBs.insert(UI->getParent()); @@ -156,8 +199,8 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, SmallVector ExtendedUses; bool ExtendLife = true; - UI = MRI->use_nodbg_begin(SrcReg); - for (MachineRegisterInfo::use_nodbg_iterator UE = MRI->use_nodbg_end(); + for (MachineRegisterInfo::use_nodbg_iterator + UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end(); UI != UE; ++UI) { MachineOperand &UseMO = UI.getOperand(); MachineInstr *UseMI = &*UI; @@ -169,6 +212,10 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, continue; } + // Only accept uses of SrcReg:SubIdx. + if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx) + continue; + // It's an error to translate this: // // %reg1025 = %reg1024 @@ -223,9 +270,9 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, // Look for PHI uses of the extended result, we don't want to extend the // liveness of a PHI input. It breaks all kinds of assumptions down // stream. A PHI use is expected to be the kill of its source values. - UI = MRI->use_nodbg_begin(DstReg); for (MachineRegisterInfo::use_nodbg_iterator - UE = MRI->use_nodbg_end(); UI != UE; ++UI) + UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end(); + UI != UE; ++UI) if (UI->isPHI()) PHIBBs.insert(UI->getParent()); @@ -237,11 +284,21 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, if (PHIBBs.count(UseMBB)) continue; + // About to add uses of DstReg, clear DstReg's kill flags. + if (!Changed) { + MRI->clearKillFlags(DstReg); + MRI->constrainRegClass(DstReg, DstRC); + } + unsigned NewVR = MRI->createVirtualRegister(RC); - BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(), - TII->get(TargetOpcode::COPY), NewVR) + MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(), + TII->get(TargetOpcode::COPY), NewVR) .addReg(DstReg, 0, SubIdx); - + // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set. + if (UseSrcSubIdx) { + Copy->getOperand(0).setSubReg(SubIdx); + Copy->getOperand(0).setIsUndef(); + } UseMO->setReg(NewVR); ++NumReuse; Changed = true; @@ -251,98 +308,212 @@ OptimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, return Changed; } -/// OptimizeBitcastInstr - If the instruction is a bitcast instruction A that -/// cannot be optimized away during isel (e.g. ARM::VMOVSR, which bitcast -/// a value cross register classes), and the source is defined by another -/// bitcast instruction B. And if the register class of source of B matches -/// the register class of instruction A, then it is legal to replace all uses -/// of the def of A with source of B. e.g. -/// %vreg0 = VMOVSR %vreg1 -/// %vreg3 = VMOVRS %vreg0 -/// Replace all uses of vreg3 with vreg1. - -bool PeepholeOptimizer::OptimizeBitcastInstr(MachineInstr *MI, - MachineBasicBlock *MBB) { - unsigned NumDefs = MI->getDesc().getNumDefs(); - unsigned NumSrcs = MI->getDesc().getNumOperands() - NumDefs; - if (NumDefs != 1) +/// optimizeCmpInstr - If the instruction is a compare and the previous +/// instruction it's comparing against all ready sets (or could be modified to +/// set) the same flag as the compare, then we can remove the comparison and use +/// the flag from the previous instruction. +bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI, + MachineBasicBlock *MBB) { + // If this instruction is a comparison against zero and isn't comparing a + // physical register, we can try to optimize it. + unsigned SrcReg, SrcReg2; + int CmpMask, CmpValue; + if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) || + TargetRegisterInfo::isPhysicalRegister(SrcReg) || + (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2))) + return false; + + // Attempt to optimize the comparison instruction. + if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) { + ++NumCmps; + return true; + } + + return false; +} + +/// Optimize a select instruction. +bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI) { + unsigned TrueOp = 0; + unsigned FalseOp = 0; + bool Optimizable = false; + SmallVector Cond; + if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable)) return false; + if (!Optimizable) + return false; + if (!TII->optimizeSelect(MI)) + return false; + MI->eraseFromParent(); + ++NumSelects; + return true; +} - unsigned Def = 0; - unsigned Src = 0; - for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); - if (!MO.isReg()) - continue; - unsigned Reg = MO.getReg(); - if (!Reg) +/// \brief Check if the registers defined by the pair (RegisterClass, SubReg) +/// share the same register file. +static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, + const TargetRegisterClass *DefRC, + unsigned DefSubReg, + const TargetRegisterClass *SrcRC, + unsigned SrcSubReg) { + // Same register class. + if (DefRC == SrcRC) + return true; + + // Both operands are sub registers. Check if they share a register class. + unsigned SrcIdx, DefIdx; + if (SrcSubReg && DefSubReg) + return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg, + SrcIdx, DefIdx) != NULL; + // At most one of the register is a sub register, make it Src to avoid + // duplicating the test. + if (!SrcSubReg) { + std::swap(DefSubReg, SrcSubReg); + std::swap(DefRC, SrcRC); + } + + // One of the register is a sub register, check if we can get a superclass. + if (SrcSubReg) + return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != NULL; + // Plain copy. + return TRI.getCommonSubClass(DefRC, SrcRC) != NULL; +} + +/// \brief Get the index of the definition and source for \p Copy +/// instruction. +/// \pre Copy.isCopy() or Copy.isBitcast(). +/// \return True if the Copy instruction has only one register source +/// and one register definition. Otherwise, \p DefIdx and \p SrcIdx +/// are invalid. +static bool getCopyOrBitcastDefUseIdx(const MachineInstr &Copy, + unsigned &DefIdx, unsigned &SrcIdx) { + assert((Copy.isCopy() || Copy.isBitcast()) && "Wrong operation type."); + if (Copy.isCopy()) { + // Copy instruction are supposed to be: Def = Src. + if (Copy.getDesc().getNumOperands() != 2) + return false; + DefIdx = 0; + SrcIdx = 1; + assert(Copy.getOperand(DefIdx).isDef() && "Use comes before def!"); + return true; + } + // Bitcast case. + // Bitcasts with more than one def are not supported. + if (Copy.getDesc().getNumDefs() != 1) + return false; + // Initialize SrcIdx to an undefined operand. + SrcIdx = Copy.getDesc().getNumOperands(); + for (unsigned OpIdx = 0, EndOpIdx = SrcIdx; OpIdx != EndOpIdx; ++OpIdx) { + const MachineOperand &MO = Copy.getOperand(OpIdx); + if (!MO.isReg() || !MO.getReg()) continue; if (MO.isDef()) - Def = Reg; - else if (Src) + DefIdx = OpIdx; + else if (SrcIdx != EndOpIdx) // Multiple sources? return false; - else - Src = Reg; + SrcIdx = OpIdx; } + return true; +} - assert(Def && Src && "Malformed bitcast instruction!"); - - MachineInstr *DefMI = MRI->getVRegDef(Src); - if (!DefMI || !DefMI->isBitcast()) +/// \brief Optimize a copy or bitcast instruction to avoid cross +/// register bank copy. The optimization looks through a chain of +/// copies and try to find a source that has a compatible register +/// class. +/// Two register classes are considered to be compatible if they share +/// the same register bank. +/// New copies issued by this optimization are register allocator +/// friendly. This optimization does not remove any copy as it may +/// overconstraint the register allocator, but replaces some when +/// possible. +/// \pre \p MI is a Copy (MI->isCopy() is true) +/// \return True, when \p MI has been optimized. In that case, \p MI has +/// been removed from its parent. +bool PeepholeOptimizer::optimizeCopyOrBitcast(MachineInstr *MI) { + unsigned DefIdx, SrcIdx; + if (!MI || !getCopyOrBitcastDefUseIdx(*MI, DefIdx, SrcIdx)) return false; - unsigned SrcSrc = 0; - NumDefs = DefMI->getDesc().getNumDefs(); - NumSrcs = DefMI->getDesc().getNumOperands() - NumDefs; - if (NumDefs != 1) + const MachineOperand &MODef = MI->getOperand(DefIdx); + assert(MODef.isReg() && "Copies must be between registers."); + unsigned Def = MODef.getReg(); + + if (TargetRegisterInfo::isPhysicalRegister(Def)) return false; - for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) { - const MachineOperand &MO = DefMI->getOperand(i); - if (!MO.isReg() || MO.isDef()) - continue; - unsigned Reg = MO.getReg(); - if (!Reg) - continue; - if (!MO.isDef()) { - if (SrcSrc) - // Multiple sources? - return false; - else - SrcSrc = Reg; - } - } - if (MRI->getRegClass(SrcSrc) != MRI->getRegClass(Def)) + const TargetRegisterClass *DefRC = MRI->getRegClass(Def); + unsigned DefSubReg = MODef.getSubReg(); + + unsigned Src; + unsigned SrcSubReg; + bool ShouldRewrite = false; + MachineInstr *Copy = MI; + const TargetRegisterInfo &TRI = *TM->getRegisterInfo(); + + // Follow the chain of copies until we reach the top or find a + // more suitable source. + do { + unsigned CopyDefIdx, CopySrcIdx; + if (!getCopyOrBitcastDefUseIdx(*Copy, CopyDefIdx, CopySrcIdx)) + break; + const MachineOperand &MO = Copy->getOperand(CopySrcIdx); + assert(MO.isReg() && "Copies must be between registers."); + Src = MO.getReg(); + + if (TargetRegisterInfo::isPhysicalRegister(Src)) + break; + + const TargetRegisterClass *SrcRC = MRI->getRegClass(Src); + SrcSubReg = MO.getSubReg(); + + // If this source does not incur a cross register bank copy, use it. + ShouldRewrite = shareSameRegisterFile(TRI, DefRC, DefSubReg, SrcRC, + SrcSubReg); + // Follow the chain of copies: get the definition of Src. + Copy = MRI->getVRegDef(Src); + } while (!ShouldRewrite && Copy && (Copy->isCopy() || Copy->isBitcast())); + + // If we did not find a more suitable source, there is nothing to optimize. + if (!ShouldRewrite || Src == MI->getOperand(SrcIdx).getReg()) return false; - MRI->replaceRegWith(Def, SrcSrc); - MRI->clearKillFlags(SrcSrc); + // Rewrite the copy to avoid a cross register bank penalty. + unsigned NewVR = TargetRegisterInfo::isPhysicalRegister(Def) ? Def : + MRI->createVirtualRegister(DefRC); + MachineInstr *NewCopy = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), + TII->get(TargetOpcode::COPY), NewVR) + .addReg(Src, 0, SrcSubReg); + NewCopy->getOperand(0).setSubReg(DefSubReg); + + MRI->replaceRegWith(Def, NewVR); + MRI->clearKillFlags(NewVR); MI->eraseFromParent(); - ++NumBitcasts; + ++NumCopiesBitcasts; return true; } -/// OptimizeCmpInstr - If the instruction is a compare and the previous -/// instruction it's comparing against all ready sets (or could be modified to -/// set) the same flag as the compare, then we can remove the comparison and use -/// the flag from the previous instruction. -bool PeepholeOptimizer::OptimizeCmpInstr(MachineInstr *MI, - MachineBasicBlock *MBB) { - // If this instruction is a comparison against zero and isn't comparing a - // physical register, we can try to optimize it. - unsigned SrcReg; - int CmpMask, CmpValue; - if (!TII->AnalyzeCompare(MI, SrcReg, CmpMask, CmpValue) || - TargetRegisterInfo::isPhysicalRegister(SrcReg)) +/// isLoadFoldable - Check whether MI is a candidate for folding into a later +/// instruction. We only fold loads to virtual registers and the virtual +/// register defined has a single use. +bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI, + unsigned &FoldAsLoadDefReg) { + if (!MI->canFoldAsLoad() || !MI->mayLoad()) + return false; + const MCInstrDesc &MCID = MI->getDesc(); + if (MCID.getNumDefs() != 1) return false; - // Attempt to optimize the comparison instruction. - if (TII->OptimizeCompareInstr(MI, SrcReg, CmpMask, CmpValue, MRI)) { - ++NumCmps; + unsigned Reg = MI->getOperand(0).getReg(); + // To reduce compilation time, we check MRI->hasOneUse when inserting + // loads. It should be checked when processing uses of the load, since + // uses can be removed during peephole. + if (!MI->getOperand(0).getSubReg() && + TargetRegisterInfo::isVirtualRegister(Reg) && + MRI->hasOneUse(Reg)) { + FoldAsLoadDefReg = Reg; return true; } - return false; } @@ -364,10 +535,10 @@ bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI, return false; } -/// FoldImmediate - Try folding register operands that are defined by move +/// foldImmediate - Try folding register operands that are defined by move /// immediate instructions, i.e. a trivial constant folding optimization, if /// and only if the def and use are in the same BB. -bool PeepholeOptimizer::FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, +bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, SmallSet &ImmDefRegs, DenseMap &ImmDefMIs) { for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { @@ -390,6 +561,9 @@ bool PeepholeOptimizer::FoldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, } bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { + DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n"); + DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n'); + if (DisablePeephole) return false; @@ -403,6 +577,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { SmallPtrSet LocalMIs; SmallSet ImmDefRegs; DenseMap ImmDefMIs; + unsigned FoldAsLoadDefReg; for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { MachineBasicBlock *MBB = &*I; @@ -410,50 +585,73 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { LocalMIs.clear(); ImmDefRegs.clear(); ImmDefMIs.clear(); + FoldAsLoadDefReg = 0; - bool First = true; - MachineBasicBlock::iterator PMII; for (MachineBasicBlock::iterator MII = I->begin(), MIE = I->end(); MII != MIE; ) { MachineInstr *MI = &*MII; + // We may be erasing MI below, increment MII now. + ++MII; LocalMIs.insert(MI); + // If there exists an instruction which belongs to the following + // categories, we will discard the load candidate. if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() || MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() || MI->hasUnmodeledSideEffects()) { - ++MII; + FoldAsLoadDefReg = 0; continue; } - - if (MI->isBitcast()) { - if (OptimizeBitcastInstr(MI, MBB)) { - // MI is deleted. - LocalMIs.erase(MI); - Changed = true; - MII = First ? I->begin() : llvm::next(PMII); - continue; - } - } else if (MI->isCompare()) { - if (OptimizeCmpInstr(MI, MBB)) { - // MI is deleted. - LocalMIs.erase(MI); - Changed = true; - MII = First ? I->begin() : llvm::next(PMII); - continue; - } + if (MI->mayStore() || MI->isCall()) + FoldAsLoadDefReg = 0; + + if (((MI->isBitcast() || MI->isCopy()) && optimizeCopyOrBitcast(MI)) || + (MI->isCompare() && optimizeCmpInstr(MI, MBB)) || + (MI->isSelect() && optimizeSelect(MI))) { + // MI is deleted. + LocalMIs.erase(MI); + Changed = true; + continue; } if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) { SeenMoveImm = true; } else { - Changed |= OptimizeExtInstr(MI, MBB, LocalMIs); + Changed |= optimizeExtInstr(MI, MBB, LocalMIs); + // optimizeExtInstr might have created new instructions after MI + // and before the already incremented MII. Adjust MII so that the + // next iteration sees the new instructions. + MII = MI; + ++MII; if (SeenMoveImm) - Changed |= FoldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs); + Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs); } - First = false; - PMII = MII; - ++MII; + // Check whether MI is a load candidate for folding into a later + // instruction. If MI is not a candidate, check whether we can fold an + // earlier load into MI. + if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) { + // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr + // can enable folding by converting SUB to CMP. + MachineInstr *DefMI = 0; + MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI, + FoldAsLoadDefReg, DefMI); + if (FoldMI) { + // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI. + DEBUG(dbgs() << "Replacing: " << *MI); + DEBUG(dbgs() << " With: " << *FoldMI); + LocalMIs.erase(MI); + LocalMIs.erase(DefMI); + LocalMIs.insert(FoldMI); + MI->eraseFromParent(); + DefMI->eraseFromParent(); + ++NumLoadFold; + + // MI is replaced with FoldMI. + Changed = true; + continue; + } + } } }