1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
13 #include "AMDGPUSubtarget.h"
14 #include "SIInstrInfo.h"
15 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
16 #include "llvm/CodeGen/MachineDominators.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/IR/LLVMContext.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Target/TargetMachine.h"
25 #define DEBUG_TYPE "si-fold-operands"
30 class SIFoldOperands : public MachineFunctionPass {
35 SIFoldOperands() : MachineFunctionPass(ID) {
36 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
39 bool runOnMachineFunction(MachineFunction &MF) override;
41 const char *getPassName() const override {
42 return "SI Fold Operands";
45 void getAnalysisUsage(AnalysisUsage &AU) const override {
46 AU.addRequired<MachineDominatorTree>();
48 MachineFunctionPass::getAnalysisUsage(AU);
52 struct FoldCandidate {
55 MachineOperand *OpToFold;
58 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
59 UseMI(MI), UseOpNo(OpNo) {
61 if (FoldOp->isImm()) {
63 ImmToFold = FoldOp->getImm();
65 assert(FoldOp->isReg());
75 } // End anonymous namespace.
77 INITIALIZE_PASS_BEGIN(SIFoldOperands, DEBUG_TYPE,
78 "SI Fold Operands", false, false)
79 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
80 INITIALIZE_PASS_END(SIFoldOperands, DEBUG_TYPE,
81 "SI Fold Operands", false, false)
83 char SIFoldOperands::ID = 0;
85 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
87 FunctionPass *llvm::createSIFoldOperandsPass() {
88 return new SIFoldOperands();
91 static bool isSafeToFold(unsigned Opcode) {
93 case AMDGPU::V_MOV_B32_e32:
94 case AMDGPU::V_MOV_B32_e64:
95 case AMDGPU::V_MOV_B64_PSEUDO:
96 case AMDGPU::S_MOV_B32:
97 case AMDGPU::S_MOV_B64:
105 static bool updateOperand(FoldCandidate &Fold,
106 const TargetRegisterInfo &TRI) {
107 MachineInstr *MI = Fold.UseMI;
108 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
112 Old.ChangeToImmediate(Fold.ImmToFold);
116 MachineOperand *New = Fold.OpToFold;
117 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
118 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
119 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
123 // FIXME: Handle physical registers.
128 static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
129 MachineInstr *MI, unsigned OpNo,
130 MachineOperand *OpToFold,
131 const SIInstrInfo *TII) {
132 if (!TII->isOperandLegal(MI, OpNo, OpToFold)) {
133 // Operand is not legal, so try to commute the instruction to
134 // see if this makes it possible to fold.
135 unsigned CommuteIdx0;
136 unsigned CommuteIdx1;
137 bool CanCommute = TII->findCommutedOpIndices(MI, CommuteIdx0, CommuteIdx1);
140 if (CommuteIdx0 == OpNo)
142 else if (CommuteIdx1 == OpNo)
146 if (!CanCommute || !TII->commuteInstruction(MI))
149 if (!TII->isOperandLegal(MI, OpNo, OpToFold))
153 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
157 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
158 MachineRegisterInfo &MRI = MF.getRegInfo();
159 const SIInstrInfo *TII =
160 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
161 const SIRegisterInfo &TRI = TII->getRegisterInfo();
163 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
166 MachineBasicBlock &MBB = *BI;
167 MachineBasicBlock::iterator I, Next;
168 for (I = MBB.begin(); I != MBB.end(); I = Next) {
170 MachineInstr &MI = *I;
172 if (!isSafeToFold(MI.getOpcode()))
175 unsigned OpSize = TII->getOpSize(MI, 1);
176 MachineOperand &OpToFold = MI.getOperand(1);
177 bool FoldingImm = OpToFold.isImm();
179 // FIXME: We could also be folding things like FrameIndexes and
181 if (!FoldingImm && !OpToFold.isReg())
184 // Folding immediates with more than one use will increase program size.
185 // FIXME: This will also reduce register usage, which may be better
186 // in some cases. A better heuristic is needed.
187 if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
188 !MRI.hasOneUse(MI.getOperand(0).getReg()))
191 // FIXME: Fold operands with subregs.
192 if (OpToFold.isReg() &&
193 (!TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()) ||
194 OpToFold.getSubReg()))
197 std::vector<FoldCandidate> FoldList;
198 for (MachineRegisterInfo::use_iterator
199 Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
202 MachineInstr *UseMI = Use->getParent();
203 const MachineOperand &UseOp = UseMI->getOperand(Use.getOperandNo());
205 // FIXME: Fold operands with subregs.
206 if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
207 UseOp.isImplicit())) {
214 unsigned UseReg = UseOp.getReg();
215 const TargetRegisterClass *UseRC
216 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
217 MRI.getRegClass(UseReg) :
218 TRI.getRegClass(UseReg);
220 Imm = APInt(64, OpToFold.getImm());
222 // Split 64-bit constants into 32-bits for folding.
223 if (UseOp.getSubReg()) {
224 if (UseRC->getSize() != 8)
227 if (UseOp.getSubReg() == AMDGPU::sub0) {
228 Imm = Imm.getLoBits(32);
230 assert(UseOp.getSubReg() == AMDGPU::sub1);
231 Imm = Imm.getHiBits(32);
235 // In order to fold immediates into copies, we need to change the
237 if (UseMI->getOpcode() == AMDGPU::COPY) {
238 unsigned DestReg = UseMI->getOperand(0).getReg();
239 const TargetRegisterClass *DestRC
240 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
241 MRI.getRegClass(DestReg) :
242 TRI.getRegClass(DestReg);
244 unsigned MovOp = TII->getMovOpcode(DestRC);
245 if (MovOp == AMDGPU::COPY)
248 UseMI->setDesc(TII->get(MovOp));
252 const MCInstrDesc &UseDesc = UseMI->getDesc();
254 // Don't fold into target independent nodes. Target independent opcodes
255 // don't have defined register classes.
256 if (UseDesc.isVariadic() ||
257 UseDesc.OpInfo[Use.getOperandNo()].RegClass == -1)
261 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
262 tryAddToFoldList(FoldList, UseMI, Use.getOperandNo(), &ImmOp, TII);
266 tryAddToFoldList(FoldList, UseMI, Use.getOperandNo(), &OpToFold, TII);
268 // FIXME: We could try to change the instruction from 64-bit to 32-bit
269 // to enable more folding opportunites. The shrink operands pass
270 // already does this.
273 for (FoldCandidate &Fold : FoldList) {
274 if (updateOperand(Fold, TRI)) {
277 assert(Fold.OpToFold && Fold.OpToFold->isReg());
278 Fold.OpToFold->setIsKill(false);
280 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
281 Fold.UseOpNo << " of " << *Fold.UseMI << '\n');