1 //===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Perform peephole optimizations on the machine code:
12 // - Optimize Extensions
14 // Optimization of sign / zero extension instructions. It may be extended to
15 // handle other instructions with similar properties.
17 // On some targets, some instructions, e.g. X86 sign / zero extension, may
18 // leave the source value in the lower part of the result. This optimization
19 // will replace some uses of the pre-extension value with uses of the
20 // sub-register of the results.
22 // - Optimize Comparisons
24 // Optimization of comparison instructions. For instance, in this code:
30 // If the "sub" instruction all ready sets (or could be modified to set) the
31 // same flag that the "cmp" instruction sets and that "bz" uses, then we can
32 // eliminate the "cmp" instruction.
34 // Another instance, in this code:
36 // sub r1, r3 | sub r1, imm
37 // cmp r3, r1 or cmp r1, r3 | cmp r1, imm
40 // If the branch instruction can use flag from "sub", then we can replace
41 // "sub" with "subs" and eliminate the "cmp" instruction.
43 // - Optimize Bitcast pairs:
52 //===----------------------------------------------------------------------===//
54 #define DEBUG_TYPE "peephole-opt"
55 #include "llvm/CodeGen/Passes.h"
56 #include "llvm/CodeGen/MachineDominators.h"
57 #include "llvm/CodeGen/MachineInstrBuilder.h"
58 #include "llvm/CodeGen/MachineRegisterInfo.h"
59 #include "llvm/Target/TargetInstrInfo.h"
60 #include "llvm/Target/TargetRegisterInfo.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/ADT/DenseMap.h"
63 #include "llvm/ADT/SmallPtrSet.h"
64 #include "llvm/ADT/SmallSet.h"
65 #include "llvm/ADT/Statistic.h"
68 // Optimize Extensions
70 Aggressive("aggressive-ext-opt", cl::Hidden,
71 cl::desc("Aggressive extension optimization"));
74 DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
75 cl::desc("Disable the peephole optimizer"));
77 STATISTIC(NumReuse, "Number of extension results reused");
78 STATISTIC(NumBitcasts, "Number of bitcasts eliminated");
79 STATISTIC(NumCmps, "Number of compares eliminated");
80 STATISTIC(NumImmFold, "Number of move immediate folded");
83 class PeepholeOptimizer : public MachineFunctionPass {
84 const TargetMachine *TM;
85 const TargetInstrInfo *TII;
86 MachineRegisterInfo *MRI;
87 MachineDominatorTree *DT; // Machine dominator tree
90 static char ID; // Pass identification
91 PeepholeOptimizer() : MachineFunctionPass(ID) {
92 initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry());
95 virtual bool runOnMachineFunction(MachineFunction &MF);
97 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
99 MachineFunctionPass::getAnalysisUsage(AU);
101 AU.addRequired<MachineDominatorTree>();
102 AU.addPreserved<MachineDominatorTree>();
107 bool optimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB);
108 bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
109 bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
110 SmallPtrSet<MachineInstr*, 8> &LocalMIs);
111 bool isMoveImmediate(MachineInstr *MI,
112 SmallSet<unsigned, 4> &ImmDefRegs,
113 DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
114 bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
115 SmallSet<unsigned, 4> &ImmDefRegs,
116 DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
120 char PeepholeOptimizer::ID = 0;
121 char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
122 INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts",
123 "Peephole Optimizations", false, false)
124 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
125 INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
126 "Peephole Optimizations", false, false)
128 /// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
129 /// a single register and writes a single register and it does not modify the
130 /// source, and if the source value is preserved as a sub-register of the
131 /// result, then replace all reachable uses of the source with the subreg of the
134 /// Do not generate an EXTRACT that is used only in a debug use, as this changes
135 /// the code. Since this code does not currently share EXTRACTs, just ignore all
137 bool PeepholeOptimizer::
138 optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
139 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
140 unsigned SrcReg, DstReg, SubIdx;
141 if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
144 if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
145 TargetRegisterInfo::isPhysicalRegister(SrcReg))
148 if (MRI->hasOneNonDBGUse(SrcReg))
152 // Ensure DstReg can get a register class that actually supports
153 // sub-registers. Don't change the class until we commit.
154 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
155 DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
159 // The source has other uses. See if we can replace the other uses with use of
160 // the result of the extension.
161 SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
162 for (MachineRegisterInfo::use_nodbg_iterator
163 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
165 ReachedBBs.insert(UI->getParent());
167 // Uses that are in the same BB of uses of the result of the instruction.
168 SmallVector<MachineOperand*, 8> Uses;
170 // Uses that the result of the instruction can reach.
171 SmallVector<MachineOperand*, 8> ExtendedUses;
173 bool ExtendLife = true;
174 for (MachineRegisterInfo::use_nodbg_iterator
175 UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end();
177 MachineOperand &UseMO = UI.getOperand();
178 MachineInstr *UseMI = &*UI;
182 if (UseMI->isPHI()) {
187 // It's an error to translate this:
189 // %reg1025 = <sext> %reg1024
191 // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
195 // %reg1025 = <sext> %reg1024
197 // %reg1027 = COPY %reg1025:4
198 // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
200 // The problem here is that SUBREG_TO_REG is there to assert that an
201 // implicit zext occurs. It doesn't insert a zext instruction. If we allow
202 // the COPY here, it will give us the value after the <sext>, not the
203 // original value of %reg1024 before <sext>.
204 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
207 MachineBasicBlock *UseMBB = UseMI->getParent();
209 // Local uses that come after the extension.
210 if (!LocalMIs.count(UseMI))
211 Uses.push_back(&UseMO);
212 } else if (ReachedBBs.count(UseMBB)) {
213 // Non-local uses where the result of the extension is used. Always
214 // replace these unless it's a PHI.
215 Uses.push_back(&UseMO);
216 } else if (Aggressive && DT->dominates(MBB, UseMBB)) {
217 // We may want to extend the live range of the extension result in order
218 // to replace these uses.
219 ExtendedUses.push_back(&UseMO);
221 // Both will be live out of the def MBB anyway. Don't extend live range of
222 // the extension result.
228 if (ExtendLife && !ExtendedUses.empty())
229 // Extend the liveness of the extension result.
230 std::copy(ExtendedUses.begin(), ExtendedUses.end(),
231 std::back_inserter(Uses));
233 // Now replace all uses.
234 bool Changed = false;
236 SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
238 // Look for PHI uses of the extended result, we don't want to extend the
239 // liveness of a PHI input. It breaks all kinds of assumptions down
240 // stream. A PHI use is expected to be the kill of its source values.
241 for (MachineRegisterInfo::use_nodbg_iterator
242 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end();
245 PHIBBs.insert(UI->getParent());
247 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
248 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
249 MachineOperand *UseMO = Uses[i];
250 MachineInstr *UseMI = UseMO->getParent();
251 MachineBasicBlock *UseMBB = UseMI->getParent();
252 if (PHIBBs.count(UseMBB))
255 // About to add uses of DstReg, clear DstReg's kill flags.
257 MRI->clearKillFlags(DstReg);
258 MRI->constrainRegClass(DstReg, DstRC);
261 unsigned NewVR = MRI->createVirtualRegister(RC);
262 BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
263 TII->get(TargetOpcode::COPY), NewVR)
264 .addReg(DstReg, 0, SubIdx);
266 UseMO->setReg(NewVR);
275 /// optimizeBitcastInstr - If the instruction is a bitcast instruction A that
276 /// cannot be optimized away during isel (e.g. ARM::VMOVSR, which bitcast
277 /// a value cross register classes), and the source is defined by another
278 /// bitcast instruction B. And if the register class of source of B matches
279 /// the register class of instruction A, then it is legal to replace all uses
280 /// of the def of A with source of B. e.g.
281 /// %vreg0<def> = VMOVSR %vreg1
282 /// %vreg3<def> = VMOVRS %vreg0
283 /// Replace all uses of vreg3 with vreg1.
285 bool PeepholeOptimizer::optimizeBitcastInstr(MachineInstr *MI,
286 MachineBasicBlock *MBB) {
287 unsigned NumDefs = MI->getDesc().getNumDefs();
288 unsigned NumSrcs = MI->getDesc().getNumOperands() - NumDefs;
294 for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) {
295 const MachineOperand &MO = MI->getOperand(i);
298 unsigned Reg = MO.getReg();
310 assert(Def && Src && "Malformed bitcast instruction!");
312 MachineInstr *DefMI = MRI->getVRegDef(Src);
313 if (!DefMI || !DefMI->isBitcast())
317 NumDefs = DefMI->getDesc().getNumDefs();
318 NumSrcs = DefMI->getDesc().getNumOperands() - NumDefs;
321 for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) {
322 const MachineOperand &MO = DefMI->getOperand(i);
323 if (!MO.isReg() || MO.isDef())
325 unsigned Reg = MO.getReg();
337 if (MRI->getRegClass(SrcSrc) != MRI->getRegClass(Def))
340 MRI->replaceRegWith(Def, SrcSrc);
341 MRI->clearKillFlags(SrcSrc);
342 MI->eraseFromParent();
347 /// optimizeCmpInstr - If the instruction is a compare and the previous
348 /// instruction it's comparing against all ready sets (or could be modified to
349 /// set) the same flag as the compare, then we can remove the comparison and use
350 /// the flag from the previous instruction.
351 bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
352 MachineBasicBlock *MBB) {
353 // If this instruction is a comparison against zero and isn't comparing a
354 // physical register, we can try to optimize it.
356 int CmpMask, CmpValue;
357 if (!TII->AnalyzeCompare(MI, SrcReg, CmpMask, CmpValue) ||
358 TargetRegisterInfo::isPhysicalRegister(SrcReg))
361 // Attempt to optimize the comparison instruction.
362 if (TII->OptimizeCompareInstr(MI, SrcReg, CmpMask, CmpValue, MRI)) {
370 bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
371 SmallSet<unsigned, 4> &ImmDefRegs,
372 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
373 const MCInstrDesc &MCID = MI->getDesc();
374 if (!MI->isMoveImmediate())
376 if (MCID.getNumDefs() != 1)
378 unsigned Reg = MI->getOperand(0).getReg();
379 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
380 ImmDefMIs.insert(std::make_pair(Reg, MI));
381 ImmDefRegs.insert(Reg);
388 /// foldImmediate - Try folding register operands that are defined by move
389 /// immediate instructions, i.e. a trivial constant folding optimization, if
390 /// and only if the def and use are in the same BB.
391 bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
392 SmallSet<unsigned, 4> &ImmDefRegs,
393 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
394 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
395 MachineOperand &MO = MI->getOperand(i);
396 if (!MO.isReg() || MO.isDef())
398 unsigned Reg = MO.getReg();
399 if (!TargetRegisterInfo::isVirtualRegister(Reg))
401 if (ImmDefRegs.count(Reg) == 0)
403 DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
404 assert(II != ImmDefMIs.end());
405 if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
413 bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
417 TM = &MF.getTarget();
418 TII = TM->getInstrInfo();
419 MRI = &MF.getRegInfo();
420 DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0;
422 bool Changed = false;
424 SmallPtrSet<MachineInstr*, 8> LocalMIs;
425 SmallSet<unsigned, 4> ImmDefRegs;
426 DenseMap<unsigned, MachineInstr*> ImmDefMIs;
427 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
428 MachineBasicBlock *MBB = &*I;
430 bool SeenMoveImm = false;
436 MachineBasicBlock::iterator PMII;
437 for (MachineBasicBlock::iterator
438 MII = I->begin(), MIE = I->end(); MII != MIE; ) {
439 MachineInstr *MI = &*MII;
442 if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() ||
443 MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() ||
444 MI->hasUnmodeledSideEffects()) {
449 if (MI->isBitcast()) {
450 if (optimizeBitcastInstr(MI, MBB)) {
454 MII = First ? I->begin() : llvm::next(PMII);
457 } else if (MI->isCompare()) {
458 if (optimizeCmpInstr(MI, MBB)) {
462 MII = First ? I->begin() : llvm::next(PMII);
467 if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
470 Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
472 Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);