1 //===-- PeepholeOptimizer.cpp - X86 Peephole Optimizer --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a peephole optimizer for the X86.
12 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/MachineFunctionPass.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/Target/MRegisterInfo.h"
18 #include "Support/Statistic.h"
19 #include "Support/STLExtras.h"
24 Statistic<> NumPHOpts("x86-peephole",
25 "Number of peephole optimization performed");
26 struct PH : public MachineFunctionPass {
27 virtual bool runOnMachineFunction(MachineFunction &MF);
29 bool PeepholeOptimize(MachineBasicBlock &MBB,
30 MachineBasicBlock::iterator &I);
32 virtual const char *getPassName() const { return "X86 Peephole Optimizer"; }
36 FunctionPass *llvm::createX86PeepholeOptimizerPass() { return new PH(); }
38 bool PH::runOnMachineFunction(MachineFunction &MF) {
41 for (MachineFunction::iterator BI = MF.begin(), E = MF.end(); BI != E; ++BI)
42 for (MachineBasicBlock::iterator I = BI->begin(); I != BI->end(); )
43 if (PeepholeOptimize(*BI, I)) {
53 bool PH::PeepholeOptimize(MachineBasicBlock &MBB,
54 MachineBasicBlock::iterator &I) {
55 assert(I != MBB.end());
56 MachineBasicBlock::iterator NextI = next(I);
59 MachineInstr *Next = (NextI != MBB.end()) ? &*NextI : (MachineInstr*)0;
61 switch (MI->getOpcode()) {
64 case X86::MOVrr32: // Destroy X = X copies...
65 if (MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
71 // A large number of X86 instructions have forms which take an 8-bit
72 // immediate despite the fact that the operands are 16 or 32 bits. Because
73 // this can save three bytes of code size (and icache space), we want to
74 // shrink them if possible.
75 case X86::IMULrri16: case X86::IMULrri32:
76 assert(MI->getNumOperands() == 3 && "These should all have 3 operands!");
77 if (MI->getOperand(2).isImmediate()) {
78 int Val = MI->getOperand(2).getImmedValue();
79 // If the value is the same when signed extended from 8 bits...
80 if (Val == (signed int)(signed char)Val) {
82 switch (MI->getOpcode()) {
83 default: assert(0 && "Unknown opcode value!");
84 case X86::IMULrri16: Opcode = X86::IMULrri16b; break;
85 case X86::IMULrri32: Opcode = X86::IMULrri32b; break;
87 unsigned R0 = MI->getOperand(0).getReg();
88 unsigned R1 = MI->getOperand(1).getReg();
89 I = MBB.insert(MBB.erase(I),
90 BuildMI(Opcode, 2, R0).addReg(R1).addZImm((char)Val));
97 case X86::IMULrmi16: case X86::IMULrmi32:
98 assert(MI->getNumOperands() == 6 && "These should all have 6 operands!");
99 if (MI->getOperand(5).isImmediate()) {
100 int Val = MI->getOperand(5).getImmedValue();
101 // If the value is the same when signed extended from 8 bits...
102 if (Val == (signed int)(signed char)Val) {
104 switch (MI->getOpcode()) {
105 default: assert(0 && "Unknown opcode value!");
106 case X86::IMULrmi16: Opcode = X86::IMULrmi16b; break;
107 case X86::IMULrmi32: Opcode = X86::IMULrmi32b; break;
109 unsigned R0 = MI->getOperand(0).getReg();
110 unsigned R1 = MI->getOperand(1).getReg();
111 unsigned Scale = MI->getOperand(2).getImmedValue();
112 unsigned R2 = MI->getOperand(3).getReg();
113 unsigned Offset = MI->getOperand(4).getImmedValue();
114 I = MBB.insert(MBB.erase(I),
115 BuildMI(Opcode, 5, R0).addReg(R1).addZImm(Scale).
116 addReg(R2).addSImm(Offset).addZImm((char)Val));
123 case X86::ADDri16: case X86::ADDri32:
124 case X86::ADDmi16: case X86::ADDmi32:
125 case X86::SUBri16: case X86::SUBri32:
126 case X86::ANDri16: case X86::ANDri32:
127 case X86::ORri16: case X86::ORri32:
128 case X86::XORri16: case X86::XORri32:
129 assert(MI->getNumOperands() == 2 && "These should all have 2 operands!");
130 if (MI->getOperand(1).isImmediate()) {
131 int Val = MI->getOperand(1).getImmedValue();
132 // If the value is the same when signed extended from 8 bits...
133 if (Val == (signed int)(signed char)Val) {
135 switch (MI->getOpcode()) {
136 default: assert(0 && "Unknown opcode value!");
137 case X86::ADDri16: Opcode = X86::ADDri16b; break;
138 case X86::ADDri32: Opcode = X86::ADDri32b; break;
139 case X86::ADDmi16: Opcode = X86::ADDmi16b; break;
140 case X86::ADDmi32: Opcode = X86::ADDmi32b; break;
141 case X86::SUBri16: Opcode = X86::SUBri16b; break;
142 case X86::SUBri32: Opcode = X86::SUBri32b; break;
143 case X86::ANDri16: Opcode = X86::ANDri16b; break;
144 case X86::ANDri32: Opcode = X86::ANDri32b; break;
145 case X86::ORri16: Opcode = X86::ORri16b; break;
146 case X86::ORri32: Opcode = X86::ORri32b; break;
147 case X86::XORri16: Opcode = X86::XORri16b; break;
148 case X86::XORri32: Opcode = X86::XORri32b; break;
150 unsigned R0 = MI->getOperand(0).getReg();
151 I = MBB.insert(MBB.erase(I),
152 BuildMI(Opcode, 1, R0, MOTy::UseAndDef).addZImm((char)Val));
159 case X86::ANDmi16: case X86::ANDmi32:
160 assert(MI->getNumOperands() == 5 && "These should all have 5 operands!");
161 if (MI->getOperand(4).isImmediate()) {
162 int Val = MI->getOperand(4).getImmedValue();
163 // If the value is the same when signed extended from 8 bits...
164 if (Val == (signed int)(signed char)Val) {
166 switch (MI->getOpcode()) {
167 default: assert(0 && "Unknown opcode value!");
168 case X86::ANDmi16: Opcode = X86::ANDmi16b; break;
169 case X86::ANDmi32: Opcode = X86::ANDmi32b; break;
171 unsigned R0 = MI->getOperand(0).getReg();
172 unsigned Scale = MI->getOperand(1).getImmedValue();
173 unsigned R1 = MI->getOperand(2).getReg();
174 unsigned Offset = MI->getOperand(3).getImmedValue();
175 I = MBB.insert(MBB.erase(I),
176 BuildMI(Opcode, 5).addReg(R0).addZImm(Scale).
177 addReg(R1).addSImm(Offset).addZImm((char)Val));
184 case X86::MOVri32: Size++;
185 case X86::MOVri16: Size++;
187 // FIXME: We can only do this transformation if we know that flags are not
188 // used here, because XOR clobbers the flags!
189 if (MI->getOperand(1).isImmediate()) { // avoid mov EAX, <value>
190 int Val = MI->getOperand(1).getImmedValue();
191 if (Val == 0) { // mov EAX, 0 -> xor EAX, EAX
192 static const unsigned Opcode[] ={X86::XORrr8,X86::XORrr16,X86::XORrr32};
193 unsigned Reg = MI->getOperand(0).getReg();
194 I = MBB.insert(MBB.erase(I),
195 BuildMI(Opcode[Size], 2, Reg).addReg(Reg).addReg(Reg));
197 } else if (Val == -1) { // mov EAX, -1 -> or EAX, -1
198 // TODO: 'or Reg, -1' has a smaller encoding than 'mov Reg, -1'
203 case X86::BSWAPr32: // Change bswap EAX, bswap EAX into nothing
204 if (Next->getOpcode() == X86::BSWAPr32 &&
205 MI->getOperand(0).getReg() == Next->getOperand(0).getReg()) {
206 I = MBB.erase(MBB.erase(I));
216 class UseDefChains : public MachineFunctionPass {
217 std::vector<MachineInstr*> DefiningInst;
219 // getDefinition - Return the machine instruction that defines the specified
220 // SSA virtual register.
221 MachineInstr *getDefinition(unsigned Reg) {
222 assert(MRegisterInfo::isVirtualRegister(Reg) &&
223 "use-def chains only exist for SSA registers!");
224 assert(Reg - MRegisterInfo::FirstVirtualRegister < DefiningInst.size() &&
225 "Unknown register number!");
226 assert(DefiningInst[Reg-MRegisterInfo::FirstVirtualRegister] &&
227 "Unknown register number!");
228 return DefiningInst[Reg-MRegisterInfo::FirstVirtualRegister];
231 // setDefinition - Update the use-def chains to indicate that MI defines
233 void setDefinition(unsigned Reg, MachineInstr *MI) {
234 if (Reg-MRegisterInfo::FirstVirtualRegister >= DefiningInst.size())
235 DefiningInst.resize(Reg-MRegisterInfo::FirstVirtualRegister+1);
236 DefiningInst[Reg-MRegisterInfo::FirstVirtualRegister] = MI;
239 // removeDefinition - Update the use-def chains to forget about Reg
241 void removeDefinition(unsigned Reg) {
242 assert(getDefinition(Reg)); // Check validity
243 DefiningInst[Reg-MRegisterInfo::FirstVirtualRegister] = 0;
246 virtual bool runOnMachineFunction(MachineFunction &MF) {
247 for (MachineFunction::iterator BI = MF.begin(), E = MF.end(); BI!=E; ++BI)
248 for (MachineBasicBlock::iterator I = BI->begin(); I != BI->end(); ++I) {
249 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
250 MachineOperand &MO = I->getOperand(i);
251 if (MO.isRegister() && MO.isDef() && !MO.isUse() &&
252 MRegisterInfo::isVirtualRegister(MO.getReg()))
253 setDefinition(MO.getReg(), I);
259 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
260 AU.setPreservesAll();
261 MachineFunctionPass::getAnalysisUsage(AU);
264 virtual void releaseMemory() {
265 std::vector<MachineInstr*>().swap(DefiningInst);
269 RegisterAnalysis<UseDefChains> X("use-def-chains",
270 "use-def chain construction for machine code");
275 Statistic<> NumSSAPHOpts("x86-ssa-peephole",
276 "Number of SSA peephole optimization performed");
278 /// SSAPH - This pass is an X86-specific, SSA-based, peephole optimizer. This
279 /// pass is really a bad idea: a better instruction selector should completely
280 /// supersume it. However, that will take some time to develop, and the
281 /// simple things this can do are important now.
282 class SSAPH : public MachineFunctionPass {
285 virtual bool runOnMachineFunction(MachineFunction &MF);
287 bool PeepholeOptimize(MachineBasicBlock &MBB,
288 MachineBasicBlock::iterator &I);
290 virtual const char *getPassName() const {
291 return "X86 SSA-based Peephole Optimizer";
294 /// Propagate - Set MI[DestOpNo] = Src[SrcOpNo], optionally change the
295 /// opcode of the instruction, then return true.
296 bool Propagate(MachineInstr *MI, unsigned DestOpNo,
297 MachineInstr *Src, unsigned SrcOpNo, unsigned NewOpcode = 0){
298 MI->getOperand(DestOpNo) = Src->getOperand(SrcOpNo);
299 if (NewOpcode) MI->setOpcode(NewOpcode);
303 /// OptimizeAddress - If we can fold the addressing arithmetic for this
304 /// memory instruction into the instruction itself, do so and return true.
305 bool OptimizeAddress(MachineInstr *MI, unsigned OpNo);
307 /// getDefininingInst - If the specified operand is a read of an SSA
308 /// register, return the machine instruction defining it, otherwise, return
310 MachineInstr *getDefiningInst(MachineOperand &MO) {
311 if (MO.isDef() || !MO.isRegister() ||
312 !MRegisterInfo::isVirtualRegister(MO.getReg())) return 0;
313 return UDC->getDefinition(MO.getReg());
316 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
317 AU.addRequired<UseDefChains>();
318 AU.addPreserved<UseDefChains>();
319 MachineFunctionPass::getAnalysisUsage(AU);
324 FunctionPass *llvm::createX86SSAPeepholeOptimizerPass() { return new SSAPH(); }
326 bool SSAPH::runOnMachineFunction(MachineFunction &MF) {
327 bool Changed = false;
330 UDC = &getAnalysis<UseDefChains>();
333 LocalChanged = false;
335 for (MachineFunction::iterator BI = MF.begin(), E = MF.end(); BI != E; ++BI)
336 for (MachineBasicBlock::iterator I = BI->begin(); I != BI->end(); )
337 if (PeepholeOptimize(*BI, I)) {
342 Changed |= LocalChanged;
343 } while (LocalChanged);
348 static bool isValidScaleAmount(unsigned Scale) {
349 return Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8;
352 /// OptimizeAddress - If we can fold the addressing arithmetic for this
353 /// memory instruction into the instruction itself, do so and return true.
354 bool SSAPH::OptimizeAddress(MachineInstr *MI, unsigned OpNo) {
355 MachineOperand &BaseRegOp = MI->getOperand(OpNo+0);
356 MachineOperand &ScaleOp = MI->getOperand(OpNo+1);
357 MachineOperand &IndexRegOp = MI->getOperand(OpNo+2);
358 MachineOperand &DisplacementOp = MI->getOperand(OpNo+3);
360 unsigned BaseReg = BaseRegOp.hasAllocatedReg() ? BaseRegOp.getReg() : 0;
361 unsigned Scale = ScaleOp.getImmedValue();
362 unsigned IndexReg = IndexRegOp.hasAllocatedReg() ? IndexRegOp.getReg() : 0;
364 bool Changed = false;
366 // If the base register is unset, and the index register is set with a scale
367 // of 1, move it to be the base register.
368 if (BaseRegOp.hasAllocatedReg() && BaseReg == 0 &&
369 Scale == 1 && IndexReg != 0) {
370 BaseRegOp.setReg(IndexReg);
371 IndexRegOp.setReg(0);
375 // Attempt to fold instructions used by the base register into the instruction
376 if (MachineInstr *DefInst = getDefiningInst(BaseRegOp)) {
377 switch (DefInst->getOpcode()) {
379 // If there is no displacement set for this instruction set one now.
380 // FIXME: If we can fold two immediates together, we should do so!
381 if (DisplacementOp.isImmediate() && !DisplacementOp.getImmedValue()) {
382 if (DefInst->getOperand(1).isImmediate()) {
384 return Propagate(MI, OpNo+3, DefInst, 1);
390 // If the source is a register-register add, and we do not yet have an
391 // index register, fold the add into the memory address.
393 BaseRegOp = DefInst->getOperand(1);
394 IndexRegOp = DefInst->getOperand(2);
395 ScaleOp.setImmedValue(1);
401 // If this shift could be folded into the index portion of the address if
402 // it were the index register, move it to the index register operand now,
403 // so it will be folded in below.
404 if ((Scale == 1 || (IndexReg == 0 && IndexRegOp.hasAllocatedReg())) &&
405 DefInst->getOperand(2).getImmedValue() < 4) {
406 std::swap(BaseRegOp, IndexRegOp);
407 ScaleOp.setImmedValue(1); Scale = 1;
408 std::swap(IndexReg, BaseReg);
415 // Attempt to fold instructions used by the index into the instruction
416 if (MachineInstr *DefInst = getDefiningInst(IndexRegOp)) {
417 switch (DefInst->getOpcode()) {
419 // Figure out what the resulting scale would be if we folded this shift.
420 unsigned ResScale = Scale * (1 << DefInst->getOperand(2).getImmedValue());
421 if (isValidScaleAmount(ResScale)) {
422 IndexRegOp = DefInst->getOperand(1);
423 ScaleOp.setImmedValue(ResScale);
434 bool SSAPH::PeepholeOptimize(MachineBasicBlock &MBB,
435 MachineBasicBlock::iterator &I) {
436 MachineBasicBlock::iterator NextI = next(I);
438 MachineInstr *MI = I;
439 MachineInstr *Next = (NextI != MBB.end()) ? &*NextI : (MachineInstr*)0;
441 bool Changed = false;
443 // Scan the operands of this instruction. If any operands are
444 // register-register copies, replace the operand with the source.
445 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i)
446 // Is this an SSA register use?
447 if (MachineInstr *DefInst = getDefiningInst(MI->getOperand(i)))
448 // If the operand is a vreg-vreg copy, it is always safe to replace the
449 // source value with the input operand.
450 if (DefInst->getOpcode() == X86::MOVrr8 ||
451 DefInst->getOpcode() == X86::MOVrr16 ||
452 DefInst->getOpcode() == X86::MOVrr32) {
453 // Don't propagate physical registers into PHI nodes...
454 if (MI->getOpcode() != X86::PHI ||
455 (DefInst->getOperand(1).isRegister() &&
456 MRegisterInfo::isVirtualRegister(DefInst->getOperand(1).getReg())))
457 Changed = Propagate(MI, i, DefInst, 1);
461 // Perform instruction specific optimizations.
462 switch (MI->getOpcode()) {
464 // Register to memory stores. Format: <base,scale,indexreg,immdisp>, srcreg
465 case X86::MOVmr32: case X86::MOVmr16: case X86::MOVmr8:
466 case X86::MOVmi32: case X86::MOVmi16: case X86::MOVmi8:
467 // Check to see if we can fold the source instruction into this one...
468 if (MachineInstr *SrcInst = getDefiningInst(MI->getOperand(4))) {
469 switch (SrcInst->getOpcode()) {
470 // Fold the immediate value into the store, if possible.
471 case X86::MOVri8: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi8);
472 case X86::MOVri16: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi16);
473 case X86::MOVri32: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi32);
478 // If we can optimize the addressing expression, do so now.
479 if (OptimizeAddress(MI, 0))
486 // If we can optimize the addressing expression, do so now.
487 if (OptimizeAddress(MI, 1))