1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
16 #include "SIRegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "SIInstrInfo.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/RegisterScavenging.h"
26 SIRegisterInfo::SIRegisterInfo(const AMDGPUSubtarget &st)
27 : AMDGPURegisterInfo(st)
30 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
31 BitVector Reserved(getNumRegs());
32 Reserved.set(AMDGPU::EXEC);
33 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
37 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
38 MachineFunction &MF) const {
39 return RC->getNumRegs();
42 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
43 return Fn.getFrameInfo()->hasStackObjects();
46 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
47 int SPAdj, unsigned FIOperandNum,
48 RegScavenger *RS) const {
49 MachineFunction *MF = MI->getParent()->getParent();
50 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
51 const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
52 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
53 int Index = MI->getOperand(FIOperandNum).getIndex();
54 int64_t Offset = FrameInfo->getObjectOffset(Index);
56 FIOp.ChangeToImmediate(Offset);
57 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
58 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VReg_32RegClass, MI, SPAdj);
59 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
60 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
62 FIOp.ChangeToRegister(TmpReg, false);
66 const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
70 case MVT::i32: return &AMDGPU::VReg_32RegClass;
74 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
75 return getEncodingValue(Reg) & 0xff;
78 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
79 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
81 const TargetRegisterClass *BaseClasses[] = {
82 &AMDGPU::VReg_32RegClass,
83 &AMDGPU::SReg_32RegClass,
84 &AMDGPU::VReg_64RegClass,
85 &AMDGPU::SReg_64RegClass,
86 &AMDGPU::SReg_128RegClass,
87 &AMDGPU::SReg_256RegClass
90 for (const TargetRegisterClass *BaseClass : BaseClasses) {
91 if (BaseClass->contains(Reg)) {
98 bool SIRegisterInfo::isSGPRClass(const TargetRegisterClass *RC) const {
102 return !hasVGPRs(RC);
105 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
106 return getCommonSubClass(&AMDGPU::VReg_32RegClass, RC) ||
107 getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
108 getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
109 getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
110 getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
111 getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
114 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
115 const TargetRegisterClass *SRC) const {
118 } else if (SRC == &AMDGPU::SCCRegRegClass) {
119 return &AMDGPU::VCCRegRegClass;
120 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
121 return &AMDGPU::VReg_32RegClass;
122 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
123 return &AMDGPU::VReg_64RegClass;
124 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
125 return &AMDGPU::VReg_128RegClass;
126 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
127 return &AMDGPU::VReg_256RegClass;
128 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
129 return &AMDGPU::VReg_512RegClass;
134 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
135 const TargetRegisterClass *RC, unsigned SubIdx) const {
136 if (SubIdx == AMDGPU::NoSubRegister)
139 // If this register has a sub-register, we can safely assume it is a 32-bit
140 // register, because all of SI's sub-registers are 32-bit.
141 if (isSGPRClass(RC)) {
142 return &AMDGPU::SGPR_32RegClass;
144 return &AMDGPU::VGPR_32RegClass;
148 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
149 const TargetRegisterClass *SubRC,
150 unsigned Channel) const {
151 unsigned Index = getHWRegIndex(Reg);
152 return SubRC->getRegister(Index + Channel);
155 bool SIRegisterInfo::regClassCanUseImmediate(int RCID) const {
157 default: return false;
158 case AMDGPU::SSrc_32RegClassID:
159 case AMDGPU::SSrc_64RegClassID:
160 case AMDGPU::VSrc_32RegClassID:
161 case AMDGPU::VSrc_64RegClassID:
166 bool SIRegisterInfo::regClassCanUseImmediate(
167 const TargetRegisterClass *RC) const {
168 return regClassCanUseImmediate(RC->getID());
171 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
172 enum PreloadedValue Value) const {
174 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
176 case SIRegisterInfo::TGID_X:
177 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
178 case SIRegisterInfo::TGID_Y:
179 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
180 case SIRegisterInfo::TGID_Z:
181 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
182 case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
183 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
184 case SIRegisterInfo::SCRATCH_PTR:
185 return AMDGPU::SGPR2_SGPR3;
187 llvm_unreachable("unexpected preloaded value type");