1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #define GET_INSTRINFO_CTOR_DTOR
26 #define GET_INSTRINFO_NAMED_OPS
27 #define GET_INSTRMAP_INFO
28 #include "AMDGPUGenInstrInfo.inc"
30 // Pin the vtable to this file.
31 void AMDGPUInstrInfo::anchor() {}
33 AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
34 : AMDGPUGenInstrInfo(-1, -1), ST(st) {}
36 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
40 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
41 unsigned &SrcReg, unsigned &DstReg,
42 unsigned &SubIdx) const {
43 // TODO: Implement this function
47 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
48 int &FrameIndex) const {
49 // TODO: Implement this function
53 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
54 int &FrameIndex) const {
55 // TODO: Implement this function
59 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
60 const MachineMemOperand *&MMO,
61 int &FrameIndex) const {
62 // TODO: Implement this function
65 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
66 int &FrameIndex) const {
67 // TODO: Implement this function
70 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
71 int &FrameIndex) const {
72 // TODO: Implement this function
75 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
76 const MachineMemOperand *&MMO,
77 int &FrameIndex) const {
78 // TODO: Implement this function
83 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
84 MachineBasicBlock::iterator &MBBI,
85 LiveVariables *LV) const {
86 // TODO: Implement this function
91 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
92 MachineBasicBlock::iterator MI,
93 unsigned SrcReg, bool isKill,
95 const TargetRegisterClass *RC,
96 const TargetRegisterInfo *TRI) const {
97 llvm_unreachable("Not Implemented");
101 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
102 MachineBasicBlock::iterator MI,
103 unsigned DestReg, int FrameIndex,
104 const TargetRegisterClass *RC,
105 const TargetRegisterInfo *TRI) const {
106 llvm_unreachable("Not Implemented");
109 bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
110 MachineBasicBlock *MBB = MI->getParent();
111 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
112 AMDGPU::OpName::addr);
113 // addr is a custom operand with multiple MI operands, and only the
114 // first MI operand is given a name.
115 int RegOpIdx = OffsetOpIdx + 1;
116 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
117 AMDGPU::OpName::chan);
118 if (isRegisterLoad(*MI)) {
119 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
120 AMDGPU::OpName::dst);
121 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
122 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
123 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
124 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
125 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
126 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
127 getIndirectAddrRegClass()->getRegister(Address));
129 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
132 } else if (isRegisterStore(*MI)) {
133 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
134 AMDGPU::OpName::val);
135 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
136 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
137 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
138 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
139 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
140 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
141 MI->getOperand(ValOpIdx).getReg());
143 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
144 calculateIndirectAddress(RegIndex, Channel),
155 MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
156 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
157 MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
158 // TODO: Implement this function
161 MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
162 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
163 MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
164 // TODO: Implement this function
168 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
169 unsigned Reg, bool UnfoldLoad,
171 SmallVectorImpl<MachineInstr*> &NewMIs) const {
172 // TODO: Implement this function
177 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
178 SmallVectorImpl<SDNode*> &NewNodes) const {
179 // TODO: Implement this function
184 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
185 bool UnfoldLoad, bool UnfoldStore,
186 unsigned *LoadRegIndex) const {
187 // TODO: Implement this function
191 bool AMDGPUInstrInfo::enableClusterLoads() const {
195 // FIXME: This behaves strangely. If, for example, you have 32 load + stores,
196 // the first 16 loads will be interleaved with the stores, and the next 16 will
197 // be clustered as expected. It should really split into 2 16 store batches.
199 // Loads are clustered until this returns false, rather than trying to schedule
200 // groups of stores. This also means we have to deal with saying different
201 // address space loads should be clustered, and ones which might cause bank
204 // This might be deprecated so it might not be worth that much effort to fix.
205 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1,
206 int64_t Offset0, int64_t Offset1,
207 unsigned NumLoads) const {
208 assert(Offset1 > Offset0 &&
209 "Second offset should be larger than first offset!");
210 // If we have less than 16 loads in a row, and the offsets are within 64
211 // bytes, then schedule together.
213 // A cacheline is 64 bytes (for global memory).
214 return (NumLoads <= 16 && (Offset1 - Offset0) < 64);
218 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
220 // TODO: Implement this function
223 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
224 MachineBasicBlock::iterator MI) const {
225 // TODO: Implement this function
228 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
229 // TODO: Implement this function
233 bool AMDGPUInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
234 ArrayRef<MachineOperand> Pred2) const {
235 // TODO: Implement this function
239 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
240 std::vector<MachineOperand> &Pred) const {
241 // TODO: Implement this function
245 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
246 // TODO: Implement this function
247 return MI->getDesc().isPredicable();
251 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
252 // TODO: Implement this function
256 bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
257 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
260 bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
261 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
264 int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
265 const MachineRegisterInfo &MRI = MF.getRegInfo();
266 const MachineFrameInfo *MFI = MF.getFrameInfo();
269 if (MFI->getNumObjects() == 0) {
273 if (MRI.livein_empty()) {
277 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
278 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
279 LE = MRI.livein_end();
281 unsigned Reg = LI->first;
282 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
283 !IndirectRC->contains(Reg))
288 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
290 if (IndirectRC->getRegister(RegIndex) == Reg)
293 Offset = std::max(Offset, (int)RegIndex);
299 int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
301 const MachineFrameInfo *MFI = MF.getFrameInfo();
303 // Variable sized objects are not supported
304 assert(!MFI->hasVarSizedObjects());
306 if (MFI->getNumObjects() == 0) {
310 unsigned IgnoredFrameReg;
311 Offset = MF.getSubtarget().getFrameLowering()->getFrameIndexReference(
312 MF, -1, IgnoredFrameReg);
314 return getIndirectIndexBegin(MF) + Offset;
317 int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
319 default: return Opcode;
320 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
321 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
322 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
326 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
327 // header files, so we need to wrap it in a function that takes unsigned
331 static int getMCOpcode(uint16_t Opcode, unsigned Gen) {
332 return getMCOpcodeGen(Opcode, (enum Subtarget)Gen);
337 // This must be kept in sync with the SISubtarget class in SIInstrInfo.td
343 static enum SISubtarget AMDGPUSubtargetToSISubtarget(unsigned Gen) {
347 case AMDGPUSubtarget::VOLCANIC_ISLANDS:
352 int AMDGPUInstrInfo::pseudoToMCOpcode(int Opcode) const {
353 int MCOp = AMDGPU::getMCOpcode(
354 Opcode, AMDGPUSubtargetToSISubtarget(ST.getGeneration()));
356 // -1 means that Opcode is already a native instruction.
360 // (uint16_t)-1 means that Opcode is a pseudo instruction that has
361 // no encoding in the given subtarget generation.
362 if (MCOp == (uint16_t)-1)
368 ArrayRef<std::pair<int, const char *>>
369 AMDGPUInstrInfo::getSerializableTargetIndices() const {
370 static const std::pair<int, const char *> TargetIndices[] = {
371 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"},
372 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"},
373 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"},
374 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"},
375 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}};
376 return makeArrayRef(TargetIndices);