1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
16 #include "SIInstrInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "SIDefines.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/MC/MCInstrDesc.h"
26 SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
27 : AMDGPUInstrInfo(tm),
31 const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
36 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
37 MachineBasicBlock::iterator MI, DebugLoc DL,
38 unsigned DestReg, unsigned SrcReg,
41 // If we are trying to copy to or from SCC, there is a bug somewhere else in
42 // the backend. While it may be theoretically possible to do this, it should
43 // never be necessary.
44 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
46 static const int16_t Sub0_15[] = {
47 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
48 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
49 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
50 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
53 static const int16_t Sub0_7[] = {
54 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
55 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
58 static const int16_t Sub0_3[] = {
59 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
62 static const int16_t Sub0_2[] = {
63 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
66 static const int16_t Sub0_1[] = {
67 AMDGPU::sub0, AMDGPU::sub1, 0
71 const int16_t *SubIndices;
73 if (AMDGPU::M0 == DestReg) {
74 // Check if M0 isn't already set to this value
75 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
76 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
78 if (!I->definesRegister(AMDGPU::M0))
81 unsigned Opc = I->getOpcode();
82 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
85 if (!I->readsRegister(SrcReg))
88 // The copy isn't necessary
93 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
94 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
95 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
96 .addReg(SrcReg, getKillRegState(KillSrc));
99 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
100 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
101 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
102 .addReg(SrcReg, getKillRegState(KillSrc));
105 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
106 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
107 Opcode = AMDGPU::S_MOV_B32;
110 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
111 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
112 Opcode = AMDGPU::S_MOV_B32;
115 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
116 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
117 Opcode = AMDGPU::S_MOV_B32;
118 SubIndices = Sub0_15;
120 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
121 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
122 AMDGPU::SReg_32RegClass.contains(SrcReg));
123 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
124 .addReg(SrcReg, getKillRegState(KillSrc));
127 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
128 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
129 AMDGPU::SReg_64RegClass.contains(SrcReg));
130 Opcode = AMDGPU::V_MOV_B32_e32;
133 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
134 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
135 Opcode = AMDGPU::V_MOV_B32_e32;
138 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
139 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
140 AMDGPU::SReg_128RegClass.contains(SrcReg));
141 Opcode = AMDGPU::V_MOV_B32_e32;
144 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
145 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
146 AMDGPU::SReg_256RegClass.contains(SrcReg));
147 Opcode = AMDGPU::V_MOV_B32_e32;
150 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
151 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
152 AMDGPU::SReg_512RegClass.contains(SrcReg));
153 Opcode = AMDGPU::V_MOV_B32_e32;
154 SubIndices = Sub0_15;
157 llvm_unreachable("Can't copy register!");
160 while (unsigned SubIdx = *SubIndices++) {
161 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
162 get(Opcode), RI.getSubReg(DestReg, SubIdx));
164 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
167 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
171 unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
175 // Try to map original to commuted opcode
176 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
179 // Try to map commuted to original opcode
180 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
186 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
189 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
190 !MI->getOperand(2).isReg())
193 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
196 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
201 MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
203 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
204 MachineInstrBuilder MIB(*MF, MI);
205 MIB.addReg(DstReg, RegState::Define);
212 bool SIInstrInfo::isMov(unsigned Opcode) const {
214 default: return false;
215 case AMDGPU::S_MOV_B32:
216 case AMDGPU::S_MOV_B64:
217 case AMDGPU::V_MOV_B32_e32:
218 case AMDGPU::V_MOV_B32_e64:
224 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
225 return RC != &AMDGPU::EXECRegRegClass;
228 int SIInstrInfo::isMIMG(uint16_t Opcode) const {
229 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
232 //===----------------------------------------------------------------------===//
233 // Indirect addressing callbacks
234 //===----------------------------------------------------------------------===//
236 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
237 unsigned Channel) const {
238 assert(Channel == 0);
243 int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
244 llvm_unreachable("Unimplemented");
247 int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
248 llvm_unreachable("Unimplemented");
251 const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
252 unsigned SourceReg) const {
253 llvm_unreachable("Unimplemented");
256 const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
257 llvm_unreachable("Unimplemented");
260 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
261 MachineBasicBlock *MBB,
262 MachineBasicBlock::iterator I,
264 unsigned Address, unsigned OffsetReg) const {
265 llvm_unreachable("Unimplemented");
268 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
269 MachineBasicBlock *MBB,
270 MachineBasicBlock::iterator I,
272 unsigned Address, unsigned OffsetReg) const {
273 llvm_unreachable("Unimplemented");
276 const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
277 llvm_unreachable("Unimplemented");