1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 /// The pass tries to use the 32-bit encoding for instructions when possible.
9 //===----------------------------------------------------------------------===//
13 #include "AMDGPUMCInstLower.h"
14 #include "AMDGPUSubtarget.h"
15 #include "SIInstrInfo.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/IR/Constants.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include "llvm/Target/TargetMachine.h"
27 #define DEBUG_TYPE "si-shrink-instructions"
29 STATISTIC(NumInstructionsShrunk,
30 "Number of 64-bit instruction reduced to 32-bit.");
31 STATISTIC(NumLiteralConstantsFolded,
32 "Number of literal constants folded into 32-bit instructions.");
35 void initializeSIShrinkInstructionsPass(PassRegistry&);
42 class SIShrinkInstructions : public MachineFunctionPass {
47 SIShrinkInstructions() : MachineFunctionPass(ID) {
50 bool runOnMachineFunction(MachineFunction &MF) override;
52 const char *getPassName() const override {
53 return "SI Shrink Instructions";
56 void getAnalysisUsage(AnalysisUsage &AU) const override {
58 MachineFunctionPass::getAnalysisUsage(AU);
62 } // End anonymous namespace.
64 INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE,
65 "SI Lower il Copies", false, false)
66 INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE,
67 "SI Lower il Copies", false, false)
69 char SIShrinkInstructions::ID = 0;
71 FunctionPass *llvm::createSIShrinkInstructionsPass() {
72 return new SIShrinkInstructions();
75 static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
76 const MachineRegisterInfo &MRI) {
80 if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
81 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
83 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
86 static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
87 const SIRegisterInfo &TRI,
88 const MachineRegisterInfo &MRI) {
90 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
91 // Can't shrink instruction with three operands.
92 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add
93 // a special case for it. It can only be shrunk if the third operand
94 // is vcc. We should handle this the same way we handle vopc, by addding
95 // a register allocation hint pre-regalloc and then do the shrining
100 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
101 const MachineOperand *Src1Mod =
102 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
104 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
107 // We don't need to check src0, all input types are legal, so just make sure
108 // src0 isn't using any modifiers.
109 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
112 // Check output modifiers
113 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
116 if (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
122 /// \brief This function checks \p MI for operands defined by a move immediate
123 /// instruction and then folds the literal constant into the instruction if it
124 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
125 /// and will only fold literal constants if we are still in SSA.
126 static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
127 MachineRegisterInfo &MRI, bool TryToCommute = true) {
132 assert(TII->isVOP1(MI.getOpcode()) || TII->isVOP2(MI.getOpcode()) ||
133 TII->isVOPC(MI.getOpcode()));
135 const SIRegisterInfo &TRI = TII->getRegisterInfo();
136 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
137 MachineOperand &Src0 = MI.getOperand(Src0Idx);
139 // Only one literal constant is allowed per instruction, so if src0 is a
140 // literal constant then we can't do any folding.
142 TII->isLiteralConstant(Src0, TII->getOpSize(MI, Src0Idx)))
145 // Literal constants and SGPRs can only be used in Src0, so if Src0 is an
146 // SGPR, we cannot commute the instruction, so we can't fold any literal
148 if (Src0.isReg() && !isVGPR(&Src0, TRI, MRI))
153 unsigned Reg = Src0.getReg();
154 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
155 if (Def && Def->isMoveImmediate()) {
156 MachineOperand &MovSrc = Def->getOperand(1);
157 bool ConstantFolded = false;
159 if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) {
160 Src0.ChangeToImmediate(MovSrc.getImm());
161 ConstantFolded = true;
163 if (ConstantFolded) {
164 if (MRI.use_empty(Reg))
165 Def->eraseFromParent();
166 ++NumLiteralConstantsFolded;
172 // We have failed to fold src0, so commute the instruction and try again.
173 if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI))
174 foldImmediates(MI, TII, MRI, false);
178 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
179 MachineRegisterInfo &MRI = MF.getRegInfo();
180 const SIInstrInfo *TII =
181 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
182 const SIRegisterInfo &TRI = TII->getRegisterInfo();
183 std::vector<unsigned> I1Defs;
185 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
188 MachineBasicBlock &MBB = *BI;
189 MachineBasicBlock::iterator I, Next;
190 for (I = MBB.begin(); I != MBB.end(); I = Next) {
192 MachineInstr &MI = *I;
194 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
195 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
196 const MachineOperand &Src = MI.getOperand(1);
199 if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src, 4))
200 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
206 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
209 if (!canShrink(MI, TII, TRI, MRI)) {
210 // Try commuting the instruction and see if that enables us to shrink
212 if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
213 !canShrink(MI, TII, TRI, MRI))
217 // getVOPe32 could be -1 here if we started with an instruction that had
218 // a 32-bit encoding and then commuted it to an instruction that did not.
219 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
222 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
224 if (TII->isVOPC(Op32)) {
225 unsigned DstReg = MI.getOperand(0).getReg();
226 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
227 // VOPC instructions can only write to the VCC register. We can't
228 // force them to use VCC here, because the register allocator has
229 // trouble with sequences like this, which cause the allocator to run
230 // out of registers if vreg0 and vreg1 belong to the VCCReg register
234 // S_AND_B64 vreg0, vreg1
236 // So, instead of forcing the instruction to write to VCC, we provide
237 // a hint to the register allocator to use VCC and then we we will run
238 // this pass again after RA and shrink it if it outputs to VCC.
239 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
242 if (DstReg != AMDGPU::VCC)
246 // We can shrink this instruction
247 DEBUG(dbgs() << "Shrinking "; MI.dump(); dbgs() << '\n';);
249 MachineInstrBuilder Inst32 =
250 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
253 Inst32.addOperand(MI.getOperand(0));
255 Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
257 const MachineOperand *Src1 =
258 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
260 Inst32.addOperand(*Src1);
262 ++NumInstructionsShrunk;
263 MI.eraseFromParent();
265 foldImmediates(*Inst32, TII, MRI);
266 DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');