1 //===-- SIShrinkInstructions.cpp - Shrink Instructions --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 /// The pass tries to use the 32-bit encoding for instructions when possible.
9 //===----------------------------------------------------------------------===//
13 #include "AMDGPUMCInstLower.h"
14 #include "AMDGPUSubtarget.h"
15 #include "SIInstrInfo.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/IR/Constants.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Target/TargetMachine.h"
26 #define DEBUG_TYPE "si-shrink-instructions"
28 STATISTIC(NumInstructionsShrunk,
29 "Number of 64-bit instruction reduced to 32-bit.");
30 STATISTIC(NumLiteralConstantsFolded,
31 "Number of literal constants folded into 32-bit instructions.");
34 void initializeSIShrinkInstructionsPass(PassRegistry&);
41 class SIShrinkInstructions : public MachineFunctionPass {
46 SIShrinkInstructions() : MachineFunctionPass(ID) {
49 bool runOnMachineFunction(MachineFunction &MF) override;
51 const char *getPassName() const override {
52 return "SI Shrink Instructions";
55 void getAnalysisUsage(AnalysisUsage &AU) const override {
57 MachineFunctionPass::getAnalysisUsage(AU);
61 } // End anonymous namespace.
63 INITIALIZE_PASS_BEGIN(SIShrinkInstructions, DEBUG_TYPE,
64 "SI Lower il Copies", false, false)
65 INITIALIZE_PASS_END(SIShrinkInstructions, DEBUG_TYPE,
66 "SI Lower il Copies", false, false)
68 char SIShrinkInstructions::ID = 0;
70 FunctionPass *llvm::createSIShrinkInstructionsPass() {
71 return new SIShrinkInstructions();
74 static bool isVGPR(const MachineOperand *MO, const SIRegisterInfo &TRI,
75 const MachineRegisterInfo &MRI) {
79 if (TargetRegisterInfo::isVirtualRegister(MO->getReg()))
80 return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()));
82 return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()));
85 static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII,
86 const SIRegisterInfo &TRI,
87 const MachineRegisterInfo &MRI) {
89 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2);
90 // Can't shrink instruction with three operands.
94 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1);
95 const MachineOperand *Src1Mod =
96 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers);
98 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0)))
101 // We don't need to check src0, all input types are legal, so just make sure
102 // src0 isn't using any modifiers.
103 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers))
106 // Check output modifiers
107 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod))
110 if (TII->hasModifiersSet(MI, AMDGPU::OpName::clamp))
116 /// \brief This function checks \p MI for operands defined by a move immediate
117 /// instruction and then folds the literal constant into the instruction if it
118 /// can. This function assumes that \p MI is a VOP1, VOP2, or VOPC instruction
119 /// and will only fold literal constants if we are still in SSA.
120 static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
121 MachineRegisterInfo &MRI, bool TryToCommute = true) {
126 assert(TII->isVOP1(MI.getOpcode()) || TII->isVOP2(MI.getOpcode()) ||
127 TII->isVOPC(MI.getOpcode()));
129 const SIRegisterInfo &TRI = TII->getRegisterInfo();
130 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
132 // Only one literal constant is allowed per instruction, so if src0 is a
133 // literal constant then we can't do any folding.
134 if (Src0->isImm() && TII->isLiteralConstant(*Src0))
138 // Literal constants and SGPRs can only be used in Src0, so if Src0 is an
139 // SGPR, we cannot commute the instruction, so we can't fold any literal
141 if (Src0->isReg() && !isVGPR(Src0, TRI, MRI))
146 unsigned Reg = Src0->getReg();
147 MachineInstr *Def = MRI.getUniqueVRegDef(Reg);
148 if (Def && Def->isMoveImmediate()) {
149 MachineOperand &MovSrc = Def->getOperand(1);
150 bool ConstantFolded = false;
152 if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) {
153 Src0->ChangeToImmediate(MovSrc.getImm());
154 ConstantFolded = true;
156 if (ConstantFolded) {
157 if (MRI.use_empty(Reg))
158 Def->eraseFromParent();
159 ++NumLiteralConstantsFolded;
165 // We have failed to fold src0, so commute the instruction and try again.
166 if (TryToCommute && MI.isCommutable() && TII->commuteInstruction(&MI))
167 foldImmediates(MI, TII, MRI, false);
171 bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
172 MachineRegisterInfo &MRI = MF.getRegInfo();
173 const SIInstrInfo *TII =
174 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
175 const SIRegisterInfo &TRI = TII->getRegisterInfo();
176 std::vector<unsigned> I1Defs;
178 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
181 MachineBasicBlock &MBB = *BI;
182 MachineBasicBlock::iterator I, Next;
183 for (I = MBB.begin(); I != MBB.end(); I = Next) {
185 MachineInstr &MI = *I;
187 // Try to use S_MOVK_I32, which will save 4 bytes for small immediates.
188 if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
189 const MachineOperand &Src = MI.getOperand(1);
192 if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src))
193 MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));
199 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
202 if (!canShrink(MI, TII, TRI, MRI)) {
203 // Try commuting the instruction and see if that enables us to shrink
205 if (!MI.isCommutable() || !TII->commuteInstruction(&MI) ||
206 !canShrink(MI, TII, TRI, MRI))
210 // getVOPe32 could be -1 here if we started with an instruction that had
211 // a 32-bit encoding and then commuted it to an instruction that did not.
212 if (!TII->hasVALU32BitEncoding(MI.getOpcode()))
215 int Op32 = AMDGPU::getVOPe32(MI.getOpcode());
217 if (TII->isVOPC(Op32)) {
218 unsigned DstReg = MI.getOperand(0).getReg();
219 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
220 // VOPC instructions can only write to the VCC register. We can't
221 // force them to use VCC here, because the register allocator has
222 // trouble with sequences like this, which cause the allocator to run
223 // out of registers if vreg0 and vreg1 belong to the VCCReg register
227 // S_AND_B64 vreg0, vreg1
229 // So, instead of forcing the instruction to write to VCC, we provide
230 // a hint to the register allocator to use VCC and then we we will run
231 // this pass again after RA and shrink it if it outputs to VCC.
232 MRI.setRegAllocationHint(MI.getOperand(0).getReg(), 0, AMDGPU::VCC);
235 if (DstReg != AMDGPU::VCC)
239 // We can shrink this instruction
240 DEBUG(dbgs() << "Shrinking "; MI.dump(); dbgs() << '\n';);
242 MachineInstrBuilder Inst32 =
243 BuildMI(MBB, I, MI.getDebugLoc(), TII->get(Op32));
246 Inst32.addOperand(MI.getOperand(0));
248 Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0));
250 const MachineOperand *Src1 =
251 TII->getNamedOperand(MI, AMDGPU::OpName::src1);
253 Inst32.addOperand(*Src1);
255 ++NumInstructionsShrunk;
256 MI.eraseFromParent();
258 foldImmediates(*Inst32, TII, MRI);
259 DEBUG(dbgs() << "e32 MI = " << *Inst32 << '\n');