1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/LLVMContext.h"
26 SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {}
28 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
29 MCRegAliasIterator R(Reg, this, true);
31 for (; R.isValid(); ++R)
35 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
36 BitVector Reserved(getNumRegs());
37 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
39 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
40 // this seems likely to result in bugs, so I'm marking them as reserved.
41 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
42 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
44 // Reserve the last 2 registers so we will always have at least 2 more that
45 // will physically contain VCC.
46 reserveRegisterTuples(Reserved, AMDGPU::SGPR102_SGPR103);
48 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
50 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
51 // SI/CI have 104 SGPRs. VI has 102. We need to shift down the reservation
53 reserveRegisterTuples(Reserved, AMDGPU::SGPR98_SGPR99);
54 reserveRegisterTuples(Reserved, AMDGPU::SGPR100_SGPR101);
57 // Tonga and Iceland can only allocate a fixed number of SGPRs due
59 if (ST.hasSGPRInitBug()) {
60 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
61 // Reserve some SGPRs for FLAT_SCRATCH and VCC (4 SGPRs).
62 // Assume XNACK_MASK is unused.
63 unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4;
65 for (unsigned i = Limit; i < NumSGPRs; ++i) {
66 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
67 reserveRegisterTuples(Reserved, Reg);
74 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
77 const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
78 // FIXME: We should adjust the max number of waves based on LDS size.
79 unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
80 STI.getMaxWavesPerCU());
81 unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
83 for (regclass_iterator I = regclass_begin(), E = regclass_end();
86 unsigned NumSubRegs = std::max((int)(*I)->getSize() / 4, 1);
89 if (isSGPRClass(*I)) {
90 Limit = SGPRLimit / NumSubRegs;
92 Limit = VGPRLimit / NumSubRegs;
95 const int *Sets = getRegClassPressureSets(*I);
97 for (unsigned i = 0; Sets[i] != -1; ++i) {
98 if (Sets[i] == (int)Idx)
105 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
106 return Fn.getFrameInfo()->hasStackObjects();
109 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
112 case AMDGPU::SI_SPILL_S512_SAVE:
113 case AMDGPU::SI_SPILL_S512_RESTORE:
114 case AMDGPU::SI_SPILL_V512_SAVE:
115 case AMDGPU::SI_SPILL_V512_RESTORE:
117 case AMDGPU::SI_SPILL_S256_SAVE:
118 case AMDGPU::SI_SPILL_S256_RESTORE:
119 case AMDGPU::SI_SPILL_V256_SAVE:
120 case AMDGPU::SI_SPILL_V256_RESTORE:
122 case AMDGPU::SI_SPILL_S128_SAVE:
123 case AMDGPU::SI_SPILL_S128_RESTORE:
124 case AMDGPU::SI_SPILL_V128_SAVE:
125 case AMDGPU::SI_SPILL_V128_RESTORE:
127 case AMDGPU::SI_SPILL_V96_SAVE:
128 case AMDGPU::SI_SPILL_V96_RESTORE:
130 case AMDGPU::SI_SPILL_S64_SAVE:
131 case AMDGPU::SI_SPILL_S64_RESTORE:
132 case AMDGPU::SI_SPILL_V64_SAVE:
133 case AMDGPU::SI_SPILL_V64_RESTORE:
135 case AMDGPU::SI_SPILL_S32_SAVE:
136 case AMDGPU::SI_SPILL_S32_RESTORE:
137 case AMDGPU::SI_SPILL_V32_SAVE:
138 case AMDGPU::SI_SPILL_V32_RESTORE:
140 default: llvm_unreachable("Invalid spill opcode");
144 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
145 unsigned LoadStoreOp,
147 unsigned ScratchRsrcReg,
148 unsigned ScratchOffset,
150 RegScavenger *RS) const {
152 MachineBasicBlock *MBB = MI->getParent();
153 const MachineFunction *MF = MI->getParent()->getParent();
154 const SIInstrInfo *TII =
155 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
156 LLVMContext &Ctx = MF->getFunction()->getContext();
157 DebugLoc DL = MI->getDebugLoc();
158 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
160 bool RanOutOfSGPRs = false;
161 unsigned SOffset = ScratchOffset;
163 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
164 unsigned Size = NumSubRegs * 4;
166 if (!isUInt<12>(Offset + Size)) {
167 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
168 if (SOffset == AMDGPU::NoRegister) {
169 RanOutOfSGPRs = true;
170 SOffset = AMDGPU::SGPR0;
172 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
173 .addReg(ScratchOffset)
179 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
181 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
182 unsigned SubReg = NumSubRegs > 1 ?
183 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
185 bool IsKill = (i == e - 1);
187 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
188 .addReg(SubReg, getDefRegState(IsLoad))
189 .addReg(ScratchRsrcReg, getKillRegState(IsKill))
195 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
196 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
200 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
201 int SPAdj, unsigned FIOperandNum,
202 RegScavenger *RS) const {
203 MachineFunction *MF = MI->getParent()->getParent();
204 MachineBasicBlock *MBB = MI->getParent();
205 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
206 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
207 const SIInstrInfo *TII =
208 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
209 DebugLoc DL = MI->getDebugLoc();
211 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
212 int Index = MI->getOperand(FIOperandNum).getIndex();
214 switch (MI->getOpcode()) {
215 // SGPR register spill
216 case AMDGPU::SI_SPILL_S512_SAVE:
217 case AMDGPU::SI_SPILL_S256_SAVE:
218 case AMDGPU::SI_SPILL_S128_SAVE:
219 case AMDGPU::SI_SPILL_S64_SAVE:
220 case AMDGPU::SI_SPILL_S32_SAVE: {
221 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
223 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
224 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
225 &AMDGPU::SGPR_32RegClass, i);
226 struct SIMachineFunctionInfo::SpilledReg Spill =
227 MFI->getSpilledReg(MF, Index, i);
229 if (Spill.VGPR == AMDGPU::NoRegister) {
230 LLVMContext &Ctx = MF->getFunction()->getContext();
231 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
234 BuildMI(*MBB, MI, DL,
235 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
241 MI->eraseFromParent();
245 // SGPR register restore
246 case AMDGPU::SI_SPILL_S512_RESTORE:
247 case AMDGPU::SI_SPILL_S256_RESTORE:
248 case AMDGPU::SI_SPILL_S128_RESTORE:
249 case AMDGPU::SI_SPILL_S64_RESTORE:
250 case AMDGPU::SI_SPILL_S32_RESTORE: {
251 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
253 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
254 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
255 &AMDGPU::SGPR_32RegClass, i);
256 struct SIMachineFunctionInfo::SpilledReg Spill =
257 MFI->getSpilledReg(MF, Index, i);
259 if (Spill.VGPR == AMDGPU::NoRegister) {
260 LLVMContext &Ctx = MF->getFunction()->getContext();
261 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
264 BuildMI(*MBB, MI, DL,
265 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
269 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
272 // TODO: only do this when it is needed
273 switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
274 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
275 // "VALU writes SGPR" -> "SMRD reads that SGPR" needs "S_NOP 3" on SI
276 TII->insertNOPs(MI, 3);
278 case AMDGPUSubtarget::SEA_ISLANDS:
280 default: // VOLCANIC_ISLANDS and later
281 // "VALU writes SGPR -> VMEM reads that SGPR" needs "S_NOP 4" on VI
282 // and later. This also applies to VALUs which write VCC, but we're
283 // unlikely to see VMEM use VCC.
284 TII->insertNOPs(MI, 4);
287 MI->eraseFromParent();
291 // VGPR register spill
292 case AMDGPU::SI_SPILL_V512_SAVE:
293 case AMDGPU::SI_SPILL_V256_SAVE:
294 case AMDGPU::SI_SPILL_V128_SAVE:
295 case AMDGPU::SI_SPILL_V96_SAVE:
296 case AMDGPU::SI_SPILL_V64_SAVE:
297 case AMDGPU::SI_SPILL_V32_SAVE:
298 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
299 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
300 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
301 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
302 FrameInfo->getObjectOffset(Index), RS);
303 MI->eraseFromParent();
305 case AMDGPU::SI_SPILL_V32_RESTORE:
306 case AMDGPU::SI_SPILL_V64_RESTORE:
307 case AMDGPU::SI_SPILL_V96_RESTORE:
308 case AMDGPU::SI_SPILL_V128_RESTORE:
309 case AMDGPU::SI_SPILL_V256_RESTORE:
310 case AMDGPU::SI_SPILL_V512_RESTORE: {
311 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
312 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
313 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
314 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
315 FrameInfo->getObjectOffset(Index), RS);
316 MI->eraseFromParent();
321 int64_t Offset = FrameInfo->getObjectOffset(Index);
322 FIOp.ChangeToImmediate(Offset);
323 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
324 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
325 BuildMI(*MBB, MI, MI->getDebugLoc(),
326 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
328 FIOp.ChangeToRegister(TmpReg, false, false, true);
334 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
335 return getEncodingValue(Reg) & 0xff;
338 // FIXME: This is very slow. It might be worth creating a map from physreg to
340 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
341 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
343 static const TargetRegisterClass *const BaseClasses[] = {
344 &AMDGPU::VGPR_32RegClass,
345 &AMDGPU::SReg_32RegClass,
346 &AMDGPU::VReg_64RegClass,
347 &AMDGPU::SReg_64RegClass,
348 &AMDGPU::VReg_96RegClass,
349 &AMDGPU::VReg_128RegClass,
350 &AMDGPU::SReg_128RegClass,
351 &AMDGPU::VReg_256RegClass,
352 &AMDGPU::SReg_256RegClass,
353 &AMDGPU::VReg_512RegClass,
354 &AMDGPU::SReg_512RegClass
357 for (const TargetRegisterClass *BaseClass : BaseClasses) {
358 if (BaseClass->contains(Reg)) {
365 // TODO: It might be helpful to have some target specific flags in
366 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
367 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
368 switch (RC->getSize()) {
370 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
372 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
374 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
376 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
378 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
380 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
382 llvm_unreachable("Invalid register class size");
386 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
387 const TargetRegisterClass *SRC) const {
388 switch (SRC->getSize()) {
390 return &AMDGPU::VGPR_32RegClass;
392 return &AMDGPU::VReg_64RegClass;
394 return &AMDGPU::VReg_96RegClass;
396 return &AMDGPU::VReg_128RegClass;
398 return &AMDGPU::VReg_256RegClass;
400 return &AMDGPU::VReg_512RegClass;
402 llvm_unreachable("Invalid register class size");
406 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
407 const TargetRegisterClass *RC, unsigned SubIdx) const {
408 if (SubIdx == AMDGPU::NoSubRegister)
411 // If this register has a sub-register, we can safely assume it is a 32-bit
412 // register, because all of SI's sub-registers are 32-bit.
413 if (isSGPRClass(RC)) {
414 return &AMDGPU::SGPR_32RegClass;
416 return &AMDGPU::VGPR_32RegClass;
420 bool SIRegisterInfo::shouldRewriteCopySrc(
421 const TargetRegisterClass *DefRC,
423 const TargetRegisterClass *SrcRC,
424 unsigned SrcSubReg) const {
425 // We want to prefer the smallest register class possible, so we don't want to
426 // stop and rewrite on anything that looks like a subregister
427 // extract. Operations mostly don't care about the super register class, so we
428 // only want to stop on the most basic of copies between the smae register
431 // e.g. if we have something like
434 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
435 // vreg3 = COPY vreg2, sub0
437 // We want to look through the COPY to find:
438 // => vreg3 = COPY vreg0
441 return getCommonSubClass(DefRC, SrcRC) != nullptr;
444 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
445 const TargetRegisterClass *SubRC,
446 unsigned Channel) const {
451 case 0: return AMDGPU::VCC_LO;
452 case 1: return AMDGPU::VCC_HI;
453 default: llvm_unreachable("Invalid SubIdx for VCC");
456 case AMDGPU::FLAT_SCR:
459 return AMDGPU::FLAT_SCR_LO;
461 return AMDGPU::FLAT_SCR_HI;
463 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
470 return AMDGPU::EXEC_LO;
472 return AMDGPU::EXEC_HI;
474 llvm_unreachable("Invalid SubIdx for EXEC");
479 const TargetRegisterClass *RC = getPhysRegClass(Reg);
480 // 32-bit registers don't have sub-registers, so we can just return the
481 // Reg. We need to have this check here, because the calculation below
482 // using getHWRegIndex() will fail with special 32-bit registers like
483 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
484 if (RC->getSize() == 4) {
485 assert(Channel == 0);
489 unsigned Index = getHWRegIndex(Reg);
490 return SubRC->getRegister(Index + Channel);
493 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
494 return OpType == AMDGPU::OPERAND_REG_IMM32;
497 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
498 if (opCanUseLiteralConstant(OpType))
501 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
504 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
505 enum PreloadedValue Value) const {
507 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
509 case SIRegisterInfo::TGID_X:
510 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
511 case SIRegisterInfo::TGID_Y:
512 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
513 case SIRegisterInfo::TGID_Z:
514 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
515 case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
516 if (MFI->getShaderType() != ShaderType::COMPUTE)
517 return MFI->ScratchOffsetReg;
518 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
519 case SIRegisterInfo::SCRATCH_PTR:
520 return AMDGPU::SGPR2_SGPR3;
521 case SIRegisterInfo::INPUT_PTR:
522 return AMDGPU::SGPR0_SGPR1;
523 case SIRegisterInfo::TIDIG_X:
524 return AMDGPU::VGPR0;
525 case SIRegisterInfo::TIDIG_Y:
526 return AMDGPU::VGPR1;
527 case SIRegisterInfo::TIDIG_Z:
528 return AMDGPU::VGPR2;
530 llvm_unreachable("unexpected preloaded value type");
533 /// \brief Returns a register that is not used at any point in the function.
534 /// If all registers are used, then this function will return
535 // AMDGPU::NoRegister.
536 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
537 const TargetRegisterClass *RC) const {
538 for (unsigned Reg : *RC)
539 if (!MRI.isPhysRegUsed(Reg))
541 return AMDGPU::NoRegister;
544 unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
559 unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
560 unsigned WaveCount) const {
561 if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {