1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/LLVMContext.h"
26 SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {}
28 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
29 MCRegAliasIterator R(Reg, this, true);
31 for (; R.isValid(); ++R)
35 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
36 BitVector Reserved(getNumRegs());
37 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
39 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
40 // this seems likely to result in bugs, so I'm marking them as reserved.
41 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
42 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
44 // Reserve the last 2 registers so we will always have at least 2 more that
45 // will physically contain VCC.
46 reserveRegisterTuples(Reserved, AMDGPU::SGPR102_SGPR103);
48 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
50 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
51 // SI/CI have 104 SGPRs. VI has 102. We need to shift down the reservation
53 reserveRegisterTuples(Reserved, AMDGPU::SGPR98_SGPR99);
54 reserveRegisterTuples(Reserved, AMDGPU::SGPR100_SGPR101);
57 // Tonga and Iceland can only allocate a fixed number of SGPRs due
59 if (ST.hasSGPRInitBug()) {
60 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
61 // Reserve some SGPRs for FLAT_SCRATCH and VCC (4 SGPRs).
62 // Assume XNACK_MASK is unused.
63 unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4;
65 for (unsigned i = Limit; i < NumSGPRs; ++i) {
66 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
67 reserveRegisterTuples(Reserved, Reg);
71 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
72 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
73 if (ScratchRSrcReg != AMDGPU::NoRegister) {
74 unsigned ScratchOffsetPreloadReg
75 = getPreloadedValue(MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
76 // We will need to use this user SGPR argument for spilling, and thus never
77 // want it to be spilled.
78 reserveRegisterTuples(Reserved, ScratchOffsetPreloadReg);
80 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
82 // TODO: May need to reserve a VGPR if doing LDS spilling.
83 reserveRegisterTuples(Reserved, ScratchRSrcReg);
84 assert(!isSubRegister(ScratchRSrcReg, ScratchOffsetPreloadReg));
90 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
92 const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
93 // FIXME: We should adjust the max number of waves based on LDS size.
94 unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
95 STI.getMaxWavesPerCU());
96 unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
98 unsigned VSLimit = SGPRLimit + VGPRLimit;
100 for (regclass_iterator I = regclass_begin(), E = regclass_end();
102 const TargetRegisterClass *RC = *I;
104 unsigned NumSubRegs = std::max((int)RC->getSize() / 4, 1);
107 if (isPseudoRegClass(RC)) {
108 // FIXME: This is a hack. We should never be considering the pressure of
109 // these since no virtual register should ever have this class.
111 } else if (isSGPRClass(RC)) {
112 Limit = SGPRLimit / NumSubRegs;
114 Limit = VGPRLimit / NumSubRegs;
117 const int *Sets = getRegClassPressureSets(RC);
119 for (unsigned i = 0; Sets[i] != -1; ++i) {
120 if (Sets[i] == (int)Idx)
127 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
128 return Fn.getFrameInfo()->hasStackObjects();
131 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
134 case AMDGPU::SI_SPILL_S512_SAVE:
135 case AMDGPU::SI_SPILL_S512_RESTORE:
136 case AMDGPU::SI_SPILL_V512_SAVE:
137 case AMDGPU::SI_SPILL_V512_RESTORE:
139 case AMDGPU::SI_SPILL_S256_SAVE:
140 case AMDGPU::SI_SPILL_S256_RESTORE:
141 case AMDGPU::SI_SPILL_V256_SAVE:
142 case AMDGPU::SI_SPILL_V256_RESTORE:
144 case AMDGPU::SI_SPILL_S128_SAVE:
145 case AMDGPU::SI_SPILL_S128_RESTORE:
146 case AMDGPU::SI_SPILL_V128_SAVE:
147 case AMDGPU::SI_SPILL_V128_RESTORE:
149 case AMDGPU::SI_SPILL_V96_SAVE:
150 case AMDGPU::SI_SPILL_V96_RESTORE:
152 case AMDGPU::SI_SPILL_S64_SAVE:
153 case AMDGPU::SI_SPILL_S64_RESTORE:
154 case AMDGPU::SI_SPILL_V64_SAVE:
155 case AMDGPU::SI_SPILL_V64_RESTORE:
157 case AMDGPU::SI_SPILL_S32_SAVE:
158 case AMDGPU::SI_SPILL_S32_RESTORE:
159 case AMDGPU::SI_SPILL_V32_SAVE:
160 case AMDGPU::SI_SPILL_V32_RESTORE:
162 default: llvm_unreachable("Invalid spill opcode");
166 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
167 unsigned LoadStoreOp,
169 unsigned ScratchRsrcReg,
170 unsigned ScratchOffset,
172 RegScavenger *RS) const {
174 MachineBasicBlock *MBB = MI->getParent();
175 const MachineFunction *MF = MI->getParent()->getParent();
176 const SIInstrInfo *TII =
177 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
178 LLVMContext &Ctx = MF->getFunction()->getContext();
179 DebugLoc DL = MI->getDebugLoc();
180 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
182 bool RanOutOfSGPRs = false;
183 unsigned SOffset = ScratchOffset;
185 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
186 unsigned Size = NumSubRegs * 4;
188 if (!isUInt<12>(Offset + Size)) {
189 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
190 if (SOffset == AMDGPU::NoRegister) {
191 RanOutOfSGPRs = true;
192 SOffset = AMDGPU::SGPR0;
194 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
195 .addReg(ScratchOffset)
201 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
203 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
204 unsigned SubReg = NumSubRegs > 1 ?
205 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
207 bool IsKill = (i == e - 1);
209 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
210 .addReg(SubReg, getDefRegState(IsLoad))
211 .addReg(ScratchRsrcReg, getKillRegState(IsKill))
217 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
218 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
222 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
223 int SPAdj, unsigned FIOperandNum,
224 RegScavenger *RS) const {
225 MachineFunction *MF = MI->getParent()->getParent();
226 MachineBasicBlock *MBB = MI->getParent();
227 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
228 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
229 const SIInstrInfo *TII =
230 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
231 DebugLoc DL = MI->getDebugLoc();
233 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
234 int Index = MI->getOperand(FIOperandNum).getIndex();
236 switch (MI->getOpcode()) {
237 // SGPR register spill
238 case AMDGPU::SI_SPILL_S512_SAVE:
239 case AMDGPU::SI_SPILL_S256_SAVE:
240 case AMDGPU::SI_SPILL_S128_SAVE:
241 case AMDGPU::SI_SPILL_S64_SAVE:
242 case AMDGPU::SI_SPILL_S32_SAVE: {
243 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
245 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
246 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
247 &AMDGPU::SGPR_32RegClass, i);
248 struct SIMachineFunctionInfo::SpilledReg Spill =
249 MFI->getSpilledReg(MF, Index, i);
251 if (Spill.VGPR == AMDGPU::NoRegister) {
252 LLVMContext &Ctx = MF->getFunction()->getContext();
253 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
256 BuildMI(*MBB, MI, DL,
257 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
262 // FIXME: Since this spills to another register instead of an actual
263 // frame index, we should delete the frame index when all references to
266 MI->eraseFromParent();
270 // SGPR register restore
271 case AMDGPU::SI_SPILL_S512_RESTORE:
272 case AMDGPU::SI_SPILL_S256_RESTORE:
273 case AMDGPU::SI_SPILL_S128_RESTORE:
274 case AMDGPU::SI_SPILL_S64_RESTORE:
275 case AMDGPU::SI_SPILL_S32_RESTORE: {
276 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
278 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
279 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
280 &AMDGPU::SGPR_32RegClass, i);
281 struct SIMachineFunctionInfo::SpilledReg Spill =
282 MFI->getSpilledReg(MF, Index, i);
284 if (Spill.VGPR == AMDGPU::NoRegister) {
285 LLVMContext &Ctx = MF->getFunction()->getContext();
286 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
289 BuildMI(*MBB, MI, DL,
290 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
294 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
297 // TODO: only do this when it is needed
298 switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
299 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
300 // "VALU writes SGPR" -> "SMRD reads that SGPR" needs "S_NOP 3" on SI
301 TII->insertNOPs(MI, 3);
303 case AMDGPUSubtarget::SEA_ISLANDS:
305 default: // VOLCANIC_ISLANDS and later
306 // "VALU writes SGPR -> VMEM reads that SGPR" needs "S_NOP 4" on VI
307 // and later. This also applies to VALUs which write VCC, but we're
308 // unlikely to see VMEM use VCC.
309 TII->insertNOPs(MI, 4);
312 MI->eraseFromParent();
316 // VGPR register spill
317 case AMDGPU::SI_SPILL_V512_SAVE:
318 case AMDGPU::SI_SPILL_V256_SAVE:
319 case AMDGPU::SI_SPILL_V128_SAVE:
320 case AMDGPU::SI_SPILL_V96_SAVE:
321 case AMDGPU::SI_SPILL_V64_SAVE:
322 case AMDGPU::SI_SPILL_V32_SAVE:
323 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
324 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
325 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
326 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
327 FrameInfo->getObjectOffset(Index), RS);
328 MI->eraseFromParent();
330 case AMDGPU::SI_SPILL_V32_RESTORE:
331 case AMDGPU::SI_SPILL_V64_RESTORE:
332 case AMDGPU::SI_SPILL_V96_RESTORE:
333 case AMDGPU::SI_SPILL_V128_RESTORE:
334 case AMDGPU::SI_SPILL_V256_RESTORE:
335 case AMDGPU::SI_SPILL_V512_RESTORE: {
336 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
337 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
338 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
339 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
340 FrameInfo->getObjectOffset(Index), RS);
341 MI->eraseFromParent();
346 int64_t Offset = FrameInfo->getObjectOffset(Index);
347 FIOp.ChangeToImmediate(Offset);
348 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
349 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
350 BuildMI(*MBB, MI, MI->getDebugLoc(),
351 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
353 FIOp.ChangeToRegister(TmpReg, false, false, true);
359 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
360 return getEncodingValue(Reg) & 0xff;
363 // FIXME: This is very slow. It might be worth creating a map from physreg to
365 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
366 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
368 static const TargetRegisterClass *const BaseClasses[] = {
369 &AMDGPU::VGPR_32RegClass,
370 &AMDGPU::SReg_32RegClass,
371 &AMDGPU::VReg_64RegClass,
372 &AMDGPU::SReg_64RegClass,
373 &AMDGPU::VReg_96RegClass,
374 &AMDGPU::VReg_128RegClass,
375 &AMDGPU::SReg_128RegClass,
376 &AMDGPU::VReg_256RegClass,
377 &AMDGPU::SReg_256RegClass,
378 &AMDGPU::VReg_512RegClass,
379 &AMDGPU::SReg_512RegClass
382 for (const TargetRegisterClass *BaseClass : BaseClasses) {
383 if (BaseClass->contains(Reg)) {
390 // TODO: It might be helpful to have some target specific flags in
391 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
392 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
393 switch (RC->getSize()) {
395 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
397 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
399 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
401 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
403 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
405 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
407 llvm_unreachable("Invalid register class size");
411 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
412 const TargetRegisterClass *SRC) const {
413 switch (SRC->getSize()) {
415 return &AMDGPU::VGPR_32RegClass;
417 return &AMDGPU::VReg_64RegClass;
419 return &AMDGPU::VReg_96RegClass;
421 return &AMDGPU::VReg_128RegClass;
423 return &AMDGPU::VReg_256RegClass;
425 return &AMDGPU::VReg_512RegClass;
427 llvm_unreachable("Invalid register class size");
431 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
432 const TargetRegisterClass *RC, unsigned SubIdx) const {
433 if (SubIdx == AMDGPU::NoSubRegister)
436 // If this register has a sub-register, we can safely assume it is a 32-bit
437 // register, because all of SI's sub-registers are 32-bit.
438 if (isSGPRClass(RC)) {
439 return &AMDGPU::SGPR_32RegClass;
441 return &AMDGPU::VGPR_32RegClass;
445 bool SIRegisterInfo::shouldRewriteCopySrc(
446 const TargetRegisterClass *DefRC,
448 const TargetRegisterClass *SrcRC,
449 unsigned SrcSubReg) const {
450 // We want to prefer the smallest register class possible, so we don't want to
451 // stop and rewrite on anything that looks like a subregister
452 // extract. Operations mostly don't care about the super register class, so we
453 // only want to stop on the most basic of copies between the smae register
456 // e.g. if we have something like
459 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
460 // vreg3 = COPY vreg2, sub0
462 // We want to look through the COPY to find:
463 // => vreg3 = COPY vreg0
466 return getCommonSubClass(DefRC, SrcRC) != nullptr;
469 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
470 const TargetRegisterClass *SubRC,
471 unsigned Channel) const {
476 case 0: return AMDGPU::VCC_LO;
477 case 1: return AMDGPU::VCC_HI;
478 default: llvm_unreachable("Invalid SubIdx for VCC");
481 case AMDGPU::FLAT_SCR:
484 return AMDGPU::FLAT_SCR_LO;
486 return AMDGPU::FLAT_SCR_HI;
488 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
495 return AMDGPU::EXEC_LO;
497 return AMDGPU::EXEC_HI;
499 llvm_unreachable("Invalid SubIdx for EXEC");
504 const TargetRegisterClass *RC = getPhysRegClass(Reg);
505 // 32-bit registers don't have sub-registers, so we can just return the
506 // Reg. We need to have this check here, because the calculation below
507 // using getHWRegIndex() will fail with special 32-bit registers like
508 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
509 if (RC->getSize() == 4) {
510 assert(Channel == 0);
514 unsigned Index = getHWRegIndex(Reg);
515 return SubRC->getRegister(Index + Channel);
518 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
519 return OpType == AMDGPU::OPERAND_REG_IMM32;
522 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
523 if (opCanUseLiteralConstant(OpType))
526 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
529 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
530 enum PreloadedValue Value) const {
532 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
533 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
535 case SIRegisterInfo::WORKGROUP_ID_X:
536 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
537 case SIRegisterInfo::WORKGROUP_ID_Y:
538 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
539 case SIRegisterInfo::WORKGROUP_ID_Z:
540 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
541 case SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
542 if (MFI->getShaderType() != ShaderType::COMPUTE)
543 return MFI->ScratchOffsetReg;
544 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
545 case SIRegisterInfo::PRIVATE_SEGMENT_BUFFER:
546 llvm_unreachable("currently unused");
547 case SIRegisterInfo::KERNARG_SEGMENT_PTR:
548 return ST.isAmdHsaOS() ? AMDGPU::SGPR2_SGPR3 : AMDGPU::SGPR0_SGPR1;
549 case SIRegisterInfo::DISPATCH_PTR:
550 assert(MFI->hasDispatchPtr());
551 return AMDGPU::SGPR0_SGPR1;
552 case SIRegisterInfo::QUEUE_PTR:
553 llvm_unreachable("not implemented");
554 case SIRegisterInfo::WORKITEM_ID_X:
555 return AMDGPU::VGPR0;
556 case SIRegisterInfo::WORKITEM_ID_Y:
557 return AMDGPU::VGPR1;
558 case SIRegisterInfo::WORKITEM_ID_Z:
559 return AMDGPU::VGPR2;
561 llvm_unreachable("unexpected preloaded value type");
564 /// \brief Returns a register that is not used at any point in the function.
565 /// If all registers are used, then this function will return
566 // AMDGPU::NoRegister.
567 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
568 const TargetRegisterClass *RC) const {
569 for (unsigned Reg : *RC)
570 if (!MRI.isPhysRegUsed(Reg))
572 return AMDGPU::NoRegister;
575 unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
590 unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
591 unsigned WaveCount) const {
592 if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {