1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
16 #include "SIInstrInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/MC/MCInstrDesc.h"
25 SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
26 : AMDGPUInstrInfo(tm),
30 const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
35 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
36 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
40 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
45 const int16_t Sub0_15[] = {
46 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
52 const int16_t Sub0_7[] = {
53 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
57 const int16_t Sub0_3[] = {
58 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
61 const int16_t Sub0_2[] = {
62 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
65 const int16_t Sub0_1[] = {
66 AMDGPU::sub0, AMDGPU::sub1, 0
70 const int16_t *SubIndices;
72 if (AMDGPU::M0 == DestReg) {
73 // Check if M0 isn't already set to this value
74 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
75 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
77 if (!I->definesRegister(AMDGPU::M0))
80 unsigned Opc = I->getOpcode();
81 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
84 if (!I->readsRegister(SrcReg))
87 // The copy isn't necessary
92 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
93 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
94 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
95 .addReg(SrcReg, getKillRegState(KillSrc));
98 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
99 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
100 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
101 .addReg(SrcReg, getKillRegState(KillSrc));
104 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
105 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
106 Opcode = AMDGPU::S_MOV_B32;
109 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
110 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
111 Opcode = AMDGPU::S_MOV_B32;
114 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
115 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
116 Opcode = AMDGPU::S_MOV_B32;
117 SubIndices = Sub0_15;
119 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
120 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
121 AMDGPU::SReg_32RegClass.contains(SrcReg));
122 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
123 .addReg(SrcReg, getKillRegState(KillSrc));
126 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
127 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
128 AMDGPU::SReg_64RegClass.contains(SrcReg));
129 Opcode = AMDGPU::V_MOV_B32_e32;
132 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
133 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
134 Opcode = AMDGPU::V_MOV_B32_e32;
137 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
138 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
139 AMDGPU::SReg_128RegClass.contains(SrcReg));
140 Opcode = AMDGPU::V_MOV_B32_e32;
143 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
144 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
145 AMDGPU::SReg_256RegClass.contains(SrcReg));
146 Opcode = AMDGPU::V_MOV_B32_e32;
149 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
150 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
151 AMDGPU::SReg_512RegClass.contains(SrcReg));
152 Opcode = AMDGPU::V_MOV_B32_e32;
153 SubIndices = Sub0_15;
156 llvm_unreachable("Can't copy register!");
159 while (unsigned SubIdx = *SubIndices++) {
160 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
161 get(Opcode), RI.getSubReg(DestReg, SubIdx));
163 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
166 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
170 unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
174 // Try to map original to commuted opcode
175 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
178 // Try to map commuted to original opcode
179 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
185 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
188 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
189 !MI->getOperand(2).isReg())
192 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
195 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
200 MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
202 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
203 MachineInstrBuilder MIB(*MF, MI);
204 MIB.addReg(DstReg, RegState::Define);
211 bool SIInstrInfo::isMov(unsigned Opcode) const {
213 default: return false;
214 case AMDGPU::S_MOV_B32:
215 case AMDGPU::S_MOV_B64:
216 case AMDGPU::V_MOV_B32_e32:
217 case AMDGPU::V_MOV_B32_e64:
223 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
224 return RC != &AMDGPU::EXECRegRegClass;
227 //===----------------------------------------------------------------------===//
228 // Indirect addressing callbacks
229 //===----------------------------------------------------------------------===//
231 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
232 unsigned Channel) const {
233 assert(Channel == 0);
238 int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
239 llvm_unreachable("Unimplemented");
242 int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
243 llvm_unreachable("Unimplemented");
246 const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
247 unsigned SourceReg) const {
248 llvm_unreachable("Unimplemented");
251 const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
252 llvm_unreachable("Unimplemented");
255 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
256 MachineBasicBlock *MBB,
257 MachineBasicBlock::iterator I,
259 unsigned Address, unsigned OffsetReg) const {
260 llvm_unreachable("Unimplemented");
263 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
264 MachineBasicBlock *MBB,
265 MachineBasicBlock::iterator I,
267 unsigned Address, unsigned OffsetReg) const {
268 llvm_unreachable("Unimplemented");
271 const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
272 llvm_unreachable("Unimplemented");