1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "t2-reduce-size"
12 #include "ARMBaseInstrInfo.h"
13 #include "ARMBaseRegisterInfo.h"
14 #include "ARMSubtarget.h"
15 #include "MCTargetDesc/ARMAddressingModes.h"
16 #include "Thumb2InstrInfo.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PostOrderIterator.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/IR/Function.h" // To access Function attributes
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/raw_ostream.h"
29 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
30 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones");
31 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones");
33 static cl::opt<int> ReduceLimit("t2-reduce-limit",
34 cl::init(-1), cl::Hidden);
35 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2",
36 cl::init(-1), cl::Hidden);
37 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3",
38 cl::init(-1), cl::Hidden);
41 /// ReduceTable - A static table with information on mapping from wide
44 uint16_t WideOpc; // Wide opcode
45 uint16_t NarrowOpc1; // Narrow opcode to transform to
46 uint16_t NarrowOpc2; // Narrow opcode when it's two-address
47 uint8_t Imm1Limit; // Limit of immediate field (bits)
48 uint8_t Imm2Limit; // Limit of immediate field when it's two-address
49 unsigned LowRegs1 : 1; // Only possible if low-registers are used
50 unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
51 unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa.
53 // 2 - Always set CPSR.
55 unsigned PartFlag : 1; // 16-bit instruction does partial flag update
56 unsigned Special : 1; // Needs to be dealt with specially
57 unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift)
60 static const ReduceEntry ReduceTable[] = {
61 // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM
62 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 },
63 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 },
64 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 },
65 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 },
66 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
67 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 },
68 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
69 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
70 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 },
71 //FIXME: Disable CMN, as CCodes are backwards from compare expectations
72 //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
73 { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
74 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 },
75 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 },
76 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 },
77 // FIXME: adr.n immediate offset must be multiple of 4.
78 //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
79 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
80 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 },
81 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
82 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
83 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 },
84 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 },
85 // FIXME: Do we need the 16-bit 'S' variant?
86 { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 },
87 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 },
88 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
89 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 },
90 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
91 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
92 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
93 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 },
94 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
95 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
96 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 },
97 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 },
98 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
99 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 },
100 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
101 { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
102 { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
103 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
104 { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
105 { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
107 // FIXME: Clean this up after splitting each Thumb load / store opcode
108 // into multiple ones.
109 { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
110 { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
111 { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
112 { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
113 { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
114 { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
115 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
116 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
117 { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
118 { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
119 { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
120 { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
121 { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
122 { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
124 { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
125 { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 },
126 { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 },
127 // ARM::t2STM (with no basereg writeback) has no Thumb1 equivalent
128 { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
129 { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 }
132 class Thumb2SizeReduce : public MachineFunctionPass {
137 const Thumb2InstrInfo *TII;
138 const ARMSubtarget *STI;
140 virtual bool runOnMachineFunction(MachineFunction &MF);
142 virtual const char *getPassName() const {
143 return "Thumb2 instruction size reduction pass";
147 /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
148 DenseMap<unsigned, unsigned> ReduceOpcodeMap;
150 bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop);
152 bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
153 bool is2Addr, ARMCC::CondCodes Pred,
154 bool LiveCPSR, bool &HasCC, bool &CCDead);
156 bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
157 const ReduceEntry &Entry);
159 bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
160 const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop);
162 /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
164 bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
165 const ReduceEntry &Entry, bool LiveCPSR,
168 /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
169 /// non-two-address instruction.
170 bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
171 const ReduceEntry &Entry, bool LiveCPSR,
174 /// ReduceMI - Attempt to reduce MI, return true on success.
175 bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
176 bool LiveCPSR, bool IsSelfLoop);
178 /// ReduceMBB - Reduce width of instructions in the specified basic block.
179 bool ReduceMBB(MachineBasicBlock &MBB);
184 // Last instruction to define CPSR in the current block.
185 MachineInstr *CPSRDef;
186 // Was CPSR last defined by a high latency instruction?
187 // When CPSRDef is null, this refers to CPSR defs in predecessors.
188 bool HighLatencyCPSR;
191 // The flags leaving this block have high latency.
192 bool HighLatencyCPSR;
193 // Has this block been visited yet?
196 MBBInfo() : HighLatencyCPSR(false), Visited(false) {}
199 SmallVector<MBBInfo, 8> BlockInfo;
201 char Thumb2SizeReduce::ID = 0;
204 Thumb2SizeReduce::Thumb2SizeReduce() : MachineFunctionPass(ID) {
205 OptimizeSize = MinimizeSize = false;
206 for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
207 unsigned FromOpc = ReduceTable[i].WideOpc;
208 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
209 assert(false && "Duplicated entries?");
213 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
214 for (const uint16_t *Regs = MCID.getImplicitDefs(); *Regs; ++Regs)
215 if (*Regs == ARM::CPSR)
220 // Check for a likely high-latency flag def.
221 static bool isHighLatencyCPSR(MachineInstr *Def) {
222 switch(Def->getOpcode()) {
230 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations,
231 /// the 's' 16-bit instruction partially update CPSR. Abort the
232 /// transformation to avoid adding false dependency on last CPSR setting
233 /// instruction which hurts the ability for out-of-order execution engine
234 /// to do register renaming magic.
235 /// This function checks if there is a read-of-write dependency between the
236 /// last instruction that defines the CPSR and the current instruction. If there
237 /// is, then there is no harm done since the instruction cannot be retired
238 /// before the CPSR setting instruction anyway.
239 /// Note, we are not doing full dependency analysis here for the sake of compile
240 /// time. We're not looking for cases like:
242 /// r1 = add.w r0, ...
245 /// In this case it would have been ok to narrow the mul.w to muls since there
246 /// are indirect RAW dependency between the muls and the mul.w
248 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) {
249 // Disable the check for -Oz (aka OptimizeForSizeHarder).
250 if (MinimizeSize || !STI->avoidCPSRPartialUpdate())
254 // If this BB loops back to itself, conservatively avoid narrowing the
255 // first instruction that does partial flag update.
256 return HighLatencyCPSR || FirstInSelfLoop;
258 SmallSet<unsigned, 2> Defs;
259 for (unsigned i = 0, e = CPSRDef->getNumOperands(); i != e; ++i) {
260 const MachineOperand &MO = CPSRDef->getOperand(i);
261 if (!MO.isReg() || MO.isUndef() || MO.isUse())
263 unsigned Reg = MO.getReg();
264 if (Reg == 0 || Reg == ARM::CPSR)
269 for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
270 const MachineOperand &MO = Use->getOperand(i);
271 if (!MO.isReg() || MO.isUndef() || MO.isDef())
273 unsigned Reg = MO.getReg();
278 // If the current CPSR has high latency, try to avoid the false dependency.
282 // tMOVi8 usually doesn't start long dependency chains, and there are a lot
283 // of them, so always shrink them when CPSR doesn't have high latency.
284 if (Use->getOpcode() == ARM::t2MOVi ||
285 Use->getOpcode() == ARM::t2MOVi16)
288 // No read-after-write dependency. The narrowing will add false dependency.
293 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
294 bool is2Addr, ARMCC::CondCodes Pred,
295 bool LiveCPSR, bool &HasCC, bool &CCDead) {
296 if ((is2Addr && Entry.PredCC2 == 0) ||
297 (!is2Addr && Entry.PredCC1 == 0)) {
298 if (Pred == ARMCC::AL) {
299 // Not predicated, must set CPSR.
301 // Original instruction was not setting CPSR, but CPSR is not
302 // currently live anyway. It's ok to set it. The CPSR def is
312 // Predicated, must not set CPSR.
316 } else if ((is2Addr && Entry.PredCC2 == 2) ||
317 (!is2Addr && Entry.PredCC1 == 2)) {
318 /// Old opcode has an optional def of CPSR.
321 // If old opcode does not implicitly define CPSR, then it's not ok since
322 // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP.
323 if (!HasImplicitCPSRDef(MI->getDesc()))
327 // 16-bit instruction does not set CPSR.
335 static bool VerifyLowRegs(MachineInstr *MI) {
336 unsigned Opc = MI->getOpcode();
337 bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA ||
338 Opc == ARM::t2LDMDB || Opc == ARM::t2LDMIA_UPD ||
339 Opc == ARM::t2LDMDB_UPD);
340 bool isLROk = (Opc == ARM::t2STMIA_UPD || Opc == ARM::t2STMDB_UPD);
341 bool isSPOk = isPCOk || isLROk;
342 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
343 const MachineOperand &MO = MI->getOperand(i);
344 if (!MO.isReg() || MO.isImplicit())
346 unsigned Reg = MO.getReg();
347 if (Reg == 0 || Reg == ARM::CPSR)
349 if (isPCOk && Reg == ARM::PC)
351 if (isLROk && Reg == ARM::LR)
353 if (Reg == ARM::SP) {
356 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
357 // Special case for these ldr / str with sp as base register.
360 if (!isARMLowRegister(Reg))
367 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
368 const ReduceEntry &Entry) {
369 if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt))
373 bool HasImmOffset = false;
374 bool HasShift = false;
375 bool HasOffReg = true;
376 bool isLdStMul = false;
377 unsigned Opc = Entry.NarrowOpc1;
378 unsigned OpNum = 3; // First 'rest' of operands.
379 uint8_t ImmLimit = Entry.Imm1Limit;
381 switch (Entry.WideOpc) {
383 llvm_unreachable("Unexpected Thumb2 load / store opcode!");
386 if (MI->getOperand(1).getReg() == ARM::SP) {
387 Opc = Entry.NarrowOpc2;
388 ImmLimit = Entry.Imm2Limit;
420 unsigned BaseReg = MI->getOperand(0).getReg();
421 if (!isARMLowRegister(BaseReg) || Entry.WideOpc != ARM::t2LDMIA)
424 // For the non-writeback version (this one), the base register must be
425 // one of the registers being loaded.
427 for (unsigned i = 4; i < MI->getNumOperands(); ++i) {
428 if (MI->getOperand(i).getReg() == BaseReg) {
441 case ARM::t2LDMIA_RET: {
442 unsigned BaseReg = MI->getOperand(1).getReg();
443 if (BaseReg != ARM::SP)
445 Opc = Entry.NarrowOpc2; // tPOP_RET
450 case ARM::t2LDMIA_UPD:
451 case ARM::t2LDMDB_UPD:
452 case ARM::t2STMIA_UPD:
453 case ARM::t2STMDB_UPD: {
456 unsigned BaseReg = MI->getOperand(1).getReg();
457 if (BaseReg == ARM::SP &&
458 (Entry.WideOpc == ARM::t2LDMIA_UPD ||
459 Entry.WideOpc == ARM::t2STMDB_UPD)) {
460 Opc = Entry.NarrowOpc2; // tPOP or tPUSH
462 } else if (!isARMLowRegister(BaseReg) ||
463 (Entry.WideOpc != ARM::t2LDMIA_UPD &&
464 Entry.WideOpc != ARM::t2STMIA_UPD)) {
473 unsigned OffsetReg = 0;
474 bool OffsetKill = false;
476 OffsetReg = MI->getOperand(2).getReg();
477 OffsetKill = MI->getOperand(2).isKill();
479 if (MI->getOperand(3).getImm())
480 // Thumb1 addressing mode doesn't support shift.
484 unsigned OffsetImm = 0;
486 OffsetImm = MI->getOperand(2).getImm();
487 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
489 if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset)
490 // Make sure the immediate field fits.
494 // Add the 16-bit load / store instruction.
495 DebugLoc dl = MI->getDebugLoc();
496 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc));
498 MIB.addOperand(MI->getOperand(0));
499 MIB.addOperand(MI->getOperand(1));
502 MIB.addImm(OffsetImm / Scale);
504 assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
507 MIB.addReg(OffsetReg, getKillRegState(OffsetKill));
510 // Transfer the rest of operands.
511 for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
512 MIB.addOperand(MI->getOperand(OpNum));
514 // Transfer memoperands.
515 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
517 // Transfer MI flags.
518 MIB.setMIFlags(MI->getFlags());
520 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
528 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
529 const ReduceEntry &Entry,
530 bool LiveCPSR, bool IsSelfLoop) {
531 unsigned Opc = MI->getOpcode();
532 if (Opc == ARM::t2ADDri) {
533 // If the source register is SP, try to reduce to tADDrSPi, otherwise
534 // it's a normal reduce.
535 if (MI->getOperand(1).getReg() != ARM::SP) {
536 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
538 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
540 // Try to reduce to tADDrSPi.
541 unsigned Imm = MI->getOperand(2).getImm();
542 // The immediate must be in range, the destination register must be a low
543 // reg, the predicate must be "always" and the condition flags must not
545 if (Imm & 3 || Imm > 1020)
547 if (!isARMLowRegister(MI->getOperand(0).getReg()))
549 if (MI->getOperand(3).getImm() != ARMCC::AL)
551 const MCInstrDesc &MCID = MI->getDesc();
552 if (MCID.hasOptionalDef() &&
553 MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR)
556 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(),
557 TII->get(ARM::tADDrSPi))
558 .addOperand(MI->getOperand(0))
559 .addOperand(MI->getOperand(1))
560 .addImm(Imm / 4); // The tADDrSPi has an implied scale by four.
563 // Transfer MI flags.
564 MIB.setMIFlags(MI->getFlags());
566 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB);
573 if (Entry.LowRegs1 && !VerifyLowRegs(MI))
576 if (MI->mayLoad() || MI->mayStore())
577 return ReduceLoadStore(MBB, MI, Entry);
582 case ARM::t2ADDSrr: {
583 unsigned PredReg = 0;
584 if (getInstrPredicate(MI, PredReg) == ARMCC::AL) {
587 case ARM::t2ADDSri: {
588 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
593 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
604 if (MI->getOperand(2).getImm() == 0)
605 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
608 // Can convert only 'pure' immediate operands, not immediates obtained as
609 // globals' addresses.
610 if (MI->getOperand(1).isImm())
611 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
614 // Try to reduce to the lo-reg only version first. Why there are two
615 // versions of the instruction is a mystery.
616 // It would be nice to just have two entries in the master table that
617 // are prioritized, but the table assumes a unique entry for each
618 // source insn opcode. So for now, we hack a local entry record to use.
619 static const ReduceEntry NarrowEntry =
620 { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 };
621 if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop))
623 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
630 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
631 const ReduceEntry &Entry,
632 bool LiveCPSR, bool IsSelfLoop) {
634 if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
637 if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
638 STI->avoidMOVsShifterOperand())
639 // Don't issue movs with shifter operand for some CPUs unless we
640 // are optimizing / minimizing for size.
643 unsigned Reg0 = MI->getOperand(0).getReg();
644 unsigned Reg1 = MI->getOperand(1).getReg();
645 // t2MUL is "special". The tied source operand is second, not first.
646 if (MI->getOpcode() == ARM::t2MUL) {
647 unsigned Reg2 = MI->getOperand(2).getReg();
648 // Early exit if the regs aren't all low regs.
649 if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1)
650 || !isARMLowRegister(Reg2))
653 // If the other operand also isn't the same as the destination, we
657 // Try to commute the operands to make it a 2-address instruction.
658 MachineInstr *CommutedMI = TII->commuteInstruction(MI);
662 } else if (Reg0 != Reg1) {
663 // Try to commute the operands to make it a 2-address instruction.
664 unsigned CommOpIdx1, CommOpIdx2;
665 if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
666 CommOpIdx1 != 1 || MI->getOperand(CommOpIdx2).getReg() != Reg0)
668 MachineInstr *CommutedMI = TII->commuteInstruction(MI);
672 if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
674 if (Entry.Imm2Limit) {
675 unsigned Imm = MI->getOperand(2).getImm();
676 unsigned Limit = (1 << Entry.Imm2Limit) - 1;
680 unsigned Reg2 = MI->getOperand(2).getReg();
681 if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
685 // Check if it's possible / necessary to transfer the predicate.
686 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2);
687 unsigned PredReg = 0;
688 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
689 bool SkipPred = false;
690 if (Pred != ARMCC::AL) {
691 if (!NewMCID.isPredicable())
692 // Can't transfer predicate, fail.
695 SkipPred = !NewMCID.isPredicable();
700 const MCInstrDesc &MCID = MI->getDesc();
701 if (MCID.hasOptionalDef()) {
702 unsigned NumOps = MCID.getNumOperands();
703 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
704 if (HasCC && MI->getOperand(NumOps-1).isDead())
707 if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
710 // Avoid adding a false dependency on partial flag update by some 16-bit
711 // instructions which has the 's' bit set.
712 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
713 canAddPseudoFlagDep(MI, IsSelfLoop))
716 // Add the 16-bit instruction.
717 DebugLoc dl = MI->getDebugLoc();
718 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
719 MIB.addOperand(MI->getOperand(0));
720 if (NewMCID.hasOptionalDef()) {
722 AddDefaultT1CC(MIB, CCDead);
727 // Transfer the rest of operands.
728 unsigned NumOps = MCID.getNumOperands();
729 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
730 if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
732 if (SkipPred && MCID.OpInfo[i].isPredicate())
734 MIB.addOperand(MI->getOperand(i));
737 // Transfer MI flags.
738 MIB.setMIFlags(MI->getFlags());
740 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
748 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
749 const ReduceEntry &Entry,
750 bool LiveCPSR, bool IsSelfLoop) {
751 if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
754 if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
755 STI->avoidMOVsShifterOperand())
756 // Don't issue movs with shifter operand for some CPUs unless we
757 // are optimizing / minimizing for size.
760 unsigned Limit = ~0U;
762 Limit = (1 << Entry.Imm1Limit) - 1;
764 const MCInstrDesc &MCID = MI->getDesc();
765 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
766 if (MCID.OpInfo[i].isPredicate())
768 const MachineOperand &MO = MI->getOperand(i);
770 unsigned Reg = MO.getReg();
771 if (!Reg || Reg == ARM::CPSR)
773 if (Entry.LowRegs1 && !isARMLowRegister(Reg))
775 } else if (MO.isImm() &&
776 !MCID.OpInfo[i].isPredicate()) {
777 if (((unsigned)MO.getImm()) > Limit)
782 // Check if it's possible / necessary to transfer the predicate.
783 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1);
784 unsigned PredReg = 0;
785 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
786 bool SkipPred = false;
787 if (Pred != ARMCC::AL) {
788 if (!NewMCID.isPredicable())
789 // Can't transfer predicate, fail.
792 SkipPred = !NewMCID.isPredicable();
797 if (MCID.hasOptionalDef()) {
798 unsigned NumOps = MCID.getNumOperands();
799 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
800 if (HasCC && MI->getOperand(NumOps-1).isDead())
803 if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
806 // Avoid adding a false dependency on partial flag update by some 16-bit
807 // instructions which has the 's' bit set.
808 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
809 canAddPseudoFlagDep(MI, IsSelfLoop))
812 // Add the 16-bit instruction.
813 DebugLoc dl = MI->getDebugLoc();
814 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
815 MIB.addOperand(MI->getOperand(0));
816 if (NewMCID.hasOptionalDef()) {
818 AddDefaultT1CC(MIB, CCDead);
823 // Transfer the rest of operands.
824 unsigned NumOps = MCID.getNumOperands();
825 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
826 if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
828 if ((MCID.getOpcode() == ARM::t2RSBSri ||
829 MCID.getOpcode() == ARM::t2RSBri ||
830 MCID.getOpcode() == ARM::t2SXTB ||
831 MCID.getOpcode() == ARM::t2SXTH ||
832 MCID.getOpcode() == ARM::t2UXTB ||
833 MCID.getOpcode() == ARM::t2UXTH) && i == 2)
834 // Skip the zero immediate operand, it's now implicit.
836 bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
837 if (SkipPred && isPred)
839 const MachineOperand &MO = MI->getOperand(i);
840 if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR)
841 // Skip implicit def of CPSR. Either it's modeled as an optional
842 // def now or it's already an implicit def on the new instruction.
846 if (!MCID.isPredicable() && NewMCID.isPredicable())
849 // Transfer MI flags.
850 MIB.setMIFlags(MI->getFlags());
852 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
859 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
861 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
862 const MachineOperand &MO = MI.getOperand(i);
863 if (!MO.isReg() || MO.isUndef() || MO.isUse())
865 if (MO.getReg() != ARM::CPSR)
873 return HasDef || LiveCPSR;
876 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
877 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
878 const MachineOperand &MO = MI.getOperand(i);
879 if (!MO.isReg() || MO.isUndef() || MO.isDef())
881 if (MO.getReg() != ARM::CPSR)
883 assert(LiveCPSR && "CPSR liveness tracking is wrong!");
893 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
894 bool LiveCPSR, bool IsSelfLoop) {
895 unsigned Opcode = MI->getOpcode();
896 DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
897 if (OPI == ReduceOpcodeMap.end())
899 const ReduceEntry &Entry = ReduceTable[OPI->second];
901 // Don't attempt normal reductions on "special" cases for now.
903 return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
905 // Try to transform to a 16-bit two-address instruction.
906 if (Entry.NarrowOpc2 &&
907 ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
910 // Try to transform to a 16-bit non-two-address instruction.
911 if (Entry.NarrowOpc1 &&
912 ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
918 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
919 bool Modified = false;
921 // Yes, CPSR could be livein.
922 bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
923 MachineInstr *BundleMI = 0;
926 HighLatencyCPSR = false;
928 // Check predecessors for the latest CPSRDef.
929 for (MachineBasicBlock::pred_iterator
930 I = MBB.pred_begin(), E = MBB.pred_end(); I != E; ++I) {
931 const MBBInfo &PInfo = BlockInfo[(*I)->getNumber()];
932 if (!PInfo.Visited) {
933 // Since blocks are visited in RPO, this must be a back-edge.
936 if (PInfo.HighLatencyCPSR) {
937 HighLatencyCPSR = true;
942 // If this BB loops back to itself, conservatively avoid narrowing the
943 // first instruction that does partial flag update.
944 bool IsSelfLoop = MBB.isSuccessor(&MBB);
945 MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end();
946 MachineBasicBlock::instr_iterator NextMII;
947 for (; MII != E; MII = NextMII) {
948 NextMII = llvm::next(MII);
950 MachineInstr *MI = &*MII;
951 if (MI->isBundle()) {
955 if (MI->isDebugValue())
958 LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
960 // Does NextMII belong to the same bundle as MI?
961 bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred();
963 if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) {
965 MachineBasicBlock::instr_iterator I = prior(NextMII);
967 // Removing and reinserting the first instruction in a bundle will break
968 // up the bundle. Fix the bundling if it was broken.
969 if (NextInSameBundle && !NextMII->isBundledWithPred())
970 NextMII->bundleWithPred();
973 if (!NextInSameBundle && MI->isInsideBundle()) {
974 // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill
975 // marker is only on the BUNDLE instruction. Process the BUNDLE
976 // instruction as we finish with the bundled instruction to work around
977 // the inconsistency.
978 if (BundleMI->killsRegister(ARM::CPSR))
980 MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
981 if (MO && !MO->isDead())
985 bool DefCPSR = false;
986 LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR);
988 // Calls don't really set CPSR.
990 HighLatencyCPSR = false;
992 } else if (DefCPSR) {
993 // This is the last CPSR defining instruction.
995 HighLatencyCPSR = isHighLatencyCPSR(CPSRDef);
1000 MBBInfo &Info = BlockInfo[MBB.getNumber()];
1001 Info.HighLatencyCPSR = HighLatencyCPSR;
1002 Info.Visited = true;
1006 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
1007 const TargetMachine &TM = MF.getTarget();
1008 TII = static_cast<const Thumb2InstrInfo*>(TM.getInstrInfo());
1009 STI = &TM.getSubtarget<ARMSubtarget>();
1011 // Optimizing / minimizing size?
1012 AttributeSet FnAttrs = MF.getFunction()->getAttributes();
1013 OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
1014 Attribute::OptimizeForSize);
1015 MinimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
1016 Attribute::MinSize);
1019 BlockInfo.resize(MF.getNumBlockIDs());
1021 // Visit blocks in reverse post-order so LastCPSRDef is known for all
1023 ReversePostOrderTraversal<MachineFunction*> RPOT(&MF);
1024 bool Modified = false;
1025 for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator
1026 I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
1027 Modified |= ReduceMBB(**I);
1031 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
1033 FunctionPass *llvm::createThumb2SizeReductionPass() {
1034 return new Thumb2SizeReduce();