1 //===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// Copies from VGPR to SGPR registers are illegal and the register coalescer
12 /// will sometimes generate these illegal copies in situations like this:
14 /// Register Class <vsrc> is the union of <vgpr> and <sgpr>
17 /// %vreg0 <sgpr> = SCALAR_INST
18 /// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
20 /// BRANCH %cond BB1, BB2
22 /// %vreg2 <vgpr> = VECTOR_INST
23 /// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
25 /// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
26 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
29 /// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30 /// code will look like this:
33 /// %vreg0 <sgpr> = SCALAR_INST
35 /// BRANCH %cond BB1, BB2
37 /// %vreg2 <vgpr> = VECTOR_INST
38 /// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
40 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
41 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
43 /// Now that the result of the PHI instruction is an SGPR, the register
44 /// allocator is now forced to constrain the register class of %vreg3 to
45 /// <sgpr> so we end up with final code like this:
48 /// %vreg0 <sgpr> = SCALAR_INST
50 /// BRANCH %cond BB1, BB2
52 /// %vreg2 <vgpr> = VECTOR_INST
53 /// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
55 /// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
56 /// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
58 /// Now this code contains an illegal copy from a VGPR to an SGPR.
60 /// In order to avoid this problem, this pass searches for PHI instructions
61 /// which define a <vsrc> register and constrains its definition class to
62 /// <vgpr> if the user of the PHI's definition register is a vector instruction.
63 /// If the PHI's definition class is constrained to <vgpr> then the coalescer
64 /// will be unable to perform the COPY removal from the above example which
65 /// ultimately led to the creation of an illegal COPY.
66 //===----------------------------------------------------------------------===//
69 #include "AMDGPUSubtarget.h"
70 #include "SIInstrInfo.h"
71 #include "llvm/CodeGen/MachineFunctionPass.h"
72 #include "llvm/CodeGen/MachineInstrBuilder.h"
73 #include "llvm/CodeGen/MachineRegisterInfo.h"
74 #include "llvm/Support/Debug.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Target/TargetMachine.h"
80 #define DEBUG_TYPE "sgpr-copies"
84 class SIFixSGPRCopies : public MachineFunctionPass {
88 const TargetRegisterClass *inferRegClassFromUses(const SIRegisterInfo *TRI,
89 const MachineRegisterInfo &MRI,
91 unsigned SubReg) const;
92 const TargetRegisterClass *inferRegClassFromDef(const SIRegisterInfo *TRI,
93 const MachineRegisterInfo &MRI,
95 unsigned SubReg) const;
96 bool isVGPRToSGPRCopy(const MachineInstr &Copy, const SIRegisterInfo *TRI,
97 const MachineRegisterInfo &MRI) const;
100 SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
102 bool runOnMachineFunction(MachineFunction &MF) override;
104 const char *getPassName() const override {
105 return "SI Fix SGPR copies";
110 } // End anonymous namespace
112 char SIFixSGPRCopies::ID = 0;
114 FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
115 return new SIFixSGPRCopies(tm);
118 static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
119 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
120 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
121 if (!MI.getOperand(i).isReg() ||
122 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
125 if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
131 /// This functions walks the use list of Reg until it finds an Instruction
132 /// that isn't a COPY returns the register class of that instruction.
133 /// \return The register defined by the first non-COPY instruction.
134 const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
135 const SIRegisterInfo *TRI,
136 const MachineRegisterInfo &MRI,
138 unsigned SubReg) const {
139 // The Reg parameter to the function must always be defined by either a PHI
140 // or a COPY, therefore it cannot be a physical register.
141 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
142 "Reg cannot be a physical register");
144 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
145 RC = TRI->getSubRegClass(RC, SubReg);
146 for (MachineRegisterInfo::use_instr_iterator
147 I = MRI.use_instr_begin(Reg), E = MRI.use_instr_end(); I != E; ++I) {
148 switch (I->getOpcode()) {
150 RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
151 I->getOperand(0).getReg(),
152 I->getOperand(0).getSubReg()));
160 const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromDef(
161 const SIRegisterInfo *TRI,
162 const MachineRegisterInfo &MRI,
164 unsigned SubReg) const {
165 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
166 const TargetRegisterClass *RC = TRI->getPhysRegClass(Reg);
167 return TRI->getSubRegClass(RC, SubReg);
169 MachineInstr *Def = MRI.getVRegDef(Reg);
170 if (Def->getOpcode() != AMDGPU::COPY) {
171 return TRI->getSubRegClass(MRI.getRegClass(Reg), SubReg);
174 return inferRegClassFromDef(TRI, MRI, Def->getOperand(1).getReg(),
175 Def->getOperand(1).getSubReg());
178 bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
179 const SIRegisterInfo *TRI,
180 const MachineRegisterInfo &MRI) const {
182 unsigned DstReg = Copy.getOperand(0).getReg();
183 unsigned SrcReg = Copy.getOperand(1).getReg();
184 unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
185 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
186 const TargetRegisterClass *SrcRC;
188 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
189 DstRC == &AMDGPU::M0RegRegClass ||
190 MRI.getRegClass(SrcReg) == &AMDGPU::VReg_1RegClass)
193 SrcRC = TRI->getSubRegClass(MRI.getRegClass(SrcReg), SrcSubReg);
194 return TRI->isSGPRClass(DstRC) && TRI->hasVGPRs(SrcRC);
197 bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
198 MachineRegisterInfo &MRI = MF.getRegInfo();
199 const SIRegisterInfo *TRI =
200 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
201 const SIInstrInfo *TII =
202 static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
203 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
206 MachineBasicBlock &MBB = *BI;
207 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
209 MachineInstr &MI = *I;
210 if (MI.getOpcode() == AMDGPU::COPY && isVGPRToSGPRCopy(MI, TRI, MRI)) {
211 DEBUG(dbgs() << "Fixing VGPR -> SGPR copy:\n");
212 DEBUG(MI.print(dbgs()));
217 switch (MI.getOpcode()) {
220 DEBUG(dbgs() << " Fixing PHI:\n");
221 DEBUG(MI.print(dbgs()));
223 for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
224 unsigned Reg = MI.getOperand(i).getReg();
225 const TargetRegisterClass *RC = inferRegClassFromDef(TRI, MRI, Reg,
226 MI.getOperand(0).getSubReg());
227 MRI.constrainRegClass(Reg, RC);
229 unsigned Reg = MI.getOperand(0).getReg();
230 const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
231 MI.getOperand(0).getSubReg());
232 if (TRI->getCommonSubClass(RC, &AMDGPU::VReg_32RegClass)) {
233 MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
236 if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
239 // If a PHI node defines an SGPR and any of its operands are VGPRs,
240 // then we need to move it to the VALU.
242 // Also, if a PHI node defines an SGPR and has all SGPR operands
243 // we must move it to the VALU, because the SGPR operands will
244 // all end up being assigned the same register, which means
245 // there is a potential for a conflict if different threads take
246 // different control flow paths.
254 // sgpr2 = PHI sgpr0, sgpr1
265 // FIXME: This is OK if the branching decision is made based on an
267 bool SGPRBranch = false;
269 // The one exception to this rule is when one of the operands
270 // is defined by a SI_BREAK, SI_IF_BREAK, or SI_ELSE_BREAK
271 // instruction. In this case, there we know the program will
272 // never enter the second block (the loop) without entering
273 // the first block (where the condition is computed), so there
274 // is no chance for values to be over-written.
276 bool HasBreakDef = false;
277 for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
278 unsigned Reg = MI.getOperand(i).getReg();
279 if (TRI->hasVGPRs(MRI.getRegClass(Reg))) {
283 MachineInstr *DefInstr = MRI.getUniqueVRegDef(Reg);
285 switch(DefInstr->getOpcode()) {
287 case AMDGPU::SI_BREAK:
288 case AMDGPU::SI_IF_BREAK:
289 case AMDGPU::SI_ELSE_BREAK:
290 // If we see a PHI instruction that defines an SGPR, then that PHI
291 // instruction has already been considered and should have
292 // a *_BREAK as an operand.
299 if (!SGPRBranch && !HasBreakDef)
303 case AMDGPU::REG_SEQUENCE: {
304 if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
305 !hasVGPROperands(MI, TRI))
308 DEBUG(dbgs() << "Fixing REG_SEQUENCE:\n");
309 DEBUG(MI.print(dbgs()));
314 case AMDGPU::INSERT_SUBREG: {
315 const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
316 DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
317 Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
318 Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
319 if (TRI->isSGPRClass(DstRC) &&
320 (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
321 DEBUG(dbgs() << " Fixing INSERT_SUBREG:\n");
322 DEBUG(MI.print(dbgs()));