1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 /// The pass tries to use the 32-bit encoding for instructions when possible.
9 //===----------------------------------------------------------------------===//
13 #include "AMDGPUMCInstLower.h"
14 #include "AMDGPUSubtarget.h"
15 #include "SIInstrInfo.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/IR/Constants.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Target/TargetMachine.h"
26 #define DEBUG_TYPE "si-shrink-instructions"
28 STATISTIC(NumInstructionsShrunk,
29 "Number of 64-bit instruction reduced to 32-bit.");
30 STATISTIC(NumLiteralConstantsFolded,
31 "Number of literal constants folded into 32-bit instructions.");
34 void initializeSIShrinkInstructionsPass(PassRegistry&);
41 class SIShrinkInstructions : public MachineFunctionPass {
46 SIShrinkInstructions() : MachineFunctionPass(ID) {
49 bool runOnMachineFunction(MachineFunction &MF) override;
51 const char *getPassName() const override {
52 return "SI Shrink Instructions";
55 void getAnalysisUsage(AnalysisUsage &AU) const override {
57 MachineFunctionPass::getAnalysisUsage(AU);
61 } // End anonymous namespace.
63 INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE,
64 "SI Lower il Copies", false, false)
65 INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE,
66 "SI Lower il Copies", false, false)
68 char SIShrinkInstructions::ID = 0;
70 FunctionPass *llvm::createSIShrinkInstructionsPass() {
71 return new SIShrinkInstructions();
74 static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
75 const MachineRegisterInfo &MRI) {
79 if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
80 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
82 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
85 static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
86 const SIRegisterInfo &TRI,
87 const MachineRegisterInfo &MRI) {
89 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
90 // Can't shrink instruction with three operands.
94 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
95 const MachineOperand *Src1Mod =
96 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
98 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
101 // We don't need to check src0, all input types are legal, so just make sure
102 // src0 isn't using any modifiers.
103 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
106 // Check output modifiers
107 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
110 if (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
116 /// \brief This function checks \p MI for operands defined by a move immediate
117 /// instruction and then folds the literal constant into the instruction if it
118 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
119 /// and will only fold literal constants if we are still in SSA.
120 static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
121 MachineRegisterInfo &MRI, bool TryToCommute = true) {
126 assert(TII->isVOP1(MI.getOpcode()) || TII->isVOP2(MI.getOpcode()) ||
127 TII->isVOPC(MI.getOpcode()));
129 const SIRegisterInfo &TRI = TII->getRegisterInfo();
130 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
131 MachineOperand &Src0 = MI.getOperand(Src0Idx);
133 // Only one literal constant is allowed per instruction, so if src0 is a
134 // literal constant then we can't do any folding.
136 TII->isLiteralConstant(Src0, TII->getOpSize(MI, Src0Idx)))
139 // Literal constants and SGPRs can only be used in Src0, so if Src0 is an
140 // SGPR, we cannot commute the instruction, so we can't fold any literal
142 if (Src0.isReg() && !isVGPR(&Src0, TRI, MRI))
147 unsigned Reg = Src0.getReg();
148 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
149 if (Def && Def->isMoveImmediate()) {
150 MachineOperand &MovSrc = Def->getOperand(1);
151 bool ConstantFolded = false;
153 if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) {
154 Src0.ChangeToImmediate(MovSrc.getImm());
155 ConstantFolded = true;
157 if (ConstantFolded) {
158 if (MRI.use_empty(Reg))
159 Def->eraseFromParent();
160 ++NumLiteralConstantsFolded;
166 // We have failed to fold src0, so commute the instruction and try again.
167 if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI))
168 foldImmediates(MI, TII, MRI, false);
172 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
173 MachineRegisterInfo &MRI = MF.getRegInfo();
174 const SIInstrInfo *TII =
175 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
176 const SIRegisterInfo &TRI = TII->getRegisterInfo();
177 std::vector<unsigned> I1Defs;
179 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
182 MachineBasicBlock &MBB = *BI;
183 MachineBasicBlock::iterator I, Next;
184 for (I = MBB.begin(); I != MBB.end(); I = Next) {
186 MachineInstr &MI = *I;
188 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
189 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
190 const MachineOperand &Src = MI.getOperand(1);
193 if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src, 4))
194 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
200 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
203 if (!canShrink(MI, TII, TRI, MRI)) {
204 // Try commuting the instruction and see if that enables us to shrink
206 if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
207 !canShrink(MI, TII, TRI, MRI))
211 // getVOPe32 could be -1 here if we started with an instruction that had
212 // a 32-bit encoding and then commuted it to an instruction that did not.
213 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
216 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
218 if (TII->isVOPC(Op32)) {
219 unsigned DstReg = MI.getOperand(0).getReg();
220 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
221 // VOPC instructions can only write to the VCC register. We can't
222 // force them to use VCC here, because the register allocator has
223 // trouble with sequences like this, which cause the allocator to run
224 // out of registers if vreg0 and vreg1 belong to the VCCReg register
228 // S_AND_B64 vreg0, vreg1
230 // So, instead of forcing the instruction to write to VCC, we provide
231 // a hint to the register allocator to use VCC and then we we will run
232 // this pass again after RA and shrink it if it outputs to VCC.
233 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
236 if (DstReg != AMDGPU::VCC)
240 // We can shrink this instruction
241 DEBUG(dbgs() << "Shrinking "; MI.dump(); dbgs() << '\n';);
243 MachineInstrBuilder Inst32 =
244 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
247 Inst32.addOperand(MI.getOperand(0));
249 Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
251 const MachineOperand *Src1 =
252 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
254 Inst32.addOperand(*Src1);
256 ++NumInstructionsShrunk;
257 MI.eraseFromParent();
259 foldImmediates(*Inst32, TII, MRI);
260 DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');