1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #define GET_INSTRINFO_CTOR
24 #define GET_INSTRINFO_NAMED_OPS
25 #define GET_INSTRMAP_INFO
26 #include "AMDGPUGenInstrInfo.inc"
30 AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
31 : AMDGPUGenInstrInfo(-1,-1), RI(tm), TM(tm) { }
33 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
37 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
38 unsigned &SrcReg, unsigned &DstReg,
39 unsigned &SubIdx) const {
40 // TODO: Implement this function
44 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
45 int &FrameIndex) const {
46 // TODO: Implement this function
50 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
51 int &FrameIndex) const {
52 // TODO: Implement this function
56 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
57 const MachineMemOperand *&MMO,
58 int &FrameIndex) const {
59 // TODO: Implement this function
62 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
63 int &FrameIndex) const {
64 // TODO: Implement this function
67 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
68 int &FrameIndex) const {
69 // TODO: Implement this function
72 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
73 const MachineMemOperand *&MMO,
74 int &FrameIndex) const {
75 // TODO: Implement this function
80 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
81 MachineBasicBlock::iterator &MBBI,
82 LiveVariables *LV) const {
83 // TODO: Implement this function
86 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
87 MachineBasicBlock &MBB) const {
88 while (iter != MBB.end()) {
89 switch (iter->getOpcode()) {
92 case AMDGPU::BRANCH_COND_i32:
93 case AMDGPU::BRANCH_COND_f32:
103 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
104 MachineBasicBlock::iterator MI,
105 unsigned SrcReg, bool isKill,
107 const TargetRegisterClass *RC,
108 const TargetRegisterInfo *TRI) const {
109 assert(!"Not Implemented");
113 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
114 MachineBasicBlock::iterator MI,
115 unsigned DestReg, int FrameIndex,
116 const TargetRegisterClass *RC,
117 const TargetRegisterInfo *TRI) const {
118 assert(!"Not Implemented");
121 bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
122 MachineBasicBlock *MBB = MI->getParent();
124 switch(MI->getOpcode()) {
126 if (isRegisterLoad(*MI)) {
127 unsigned RegIndex = MI->getOperand(2).getImm();
128 unsigned Channel = MI->getOperand(3).getImm();
129 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
130 unsigned OffsetReg = MI->getOperand(1).getReg();
131 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
132 buildMovInstr(MBB, MI, MI->getOperand(0).getReg(),
133 getIndirectAddrRegClass()->getRegister(Address));
135 buildIndirectRead(MBB, MI, MI->getOperand(0).getReg(),
138 } else if (isRegisterStore(*MI)) {
139 unsigned RegIndex = MI->getOperand(2).getImm();
140 unsigned Channel = MI->getOperand(3).getImm();
141 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
142 unsigned OffsetReg = MI->getOperand(1).getReg();
143 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
144 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
145 MI->getOperand(0).getReg());
147 buildIndirectWrite(MBB, MI, MI->getOperand(0).getReg(),
148 calculateIndirectAddress(RegIndex, Channel),
162 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
164 const SmallVectorImpl<unsigned> &Ops,
165 int FrameIndex) const {
166 // TODO: Implement this function
170 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
172 const SmallVectorImpl<unsigned> &Ops,
173 MachineInstr *LoadMI) const {
174 // TODO: Implement this function
178 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
179 const SmallVectorImpl<unsigned> &Ops) const {
180 // TODO: Implement this function
184 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
185 unsigned Reg, bool UnfoldLoad,
187 SmallVectorImpl<MachineInstr*> &NewMIs) const {
188 // TODO: Implement this function
193 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
194 SmallVectorImpl<SDNode*> &NewNodes) const {
195 // TODO: Implement this function
200 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
201 bool UnfoldLoad, bool UnfoldStore,
202 unsigned *LoadRegIndex) const {
203 // TODO: Implement this function
207 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
208 int64_t Offset1, int64_t Offset2,
209 unsigned NumLoads) const {
210 assert(Offset2 > Offset1
211 && "Second offset should be larger than first offset!");
212 // If we have less than 16 loads in a row, and the offsets are within 16,
213 // then schedule together.
214 // TODO: Make the loads schedule near if it fits in a cacheline
215 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
219 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
221 // TODO: Implement this function
224 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
225 MachineBasicBlock::iterator MI) const {
226 // TODO: Implement this function
229 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
230 // TODO: Implement this function
234 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
235 const SmallVectorImpl<MachineOperand> &Pred2)
237 // TODO: Implement this function
241 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
242 std::vector<MachineOperand> &Pred) const {
243 // TODO: Implement this function
247 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
248 // TODO: Implement this function
249 return MI->getDesc().isPredicable();
253 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
254 // TODO: Implement this function
258 bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
259 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
262 bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
263 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
267 void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
269 MachineRegisterInfo &MRI = MF.getRegInfo();
270 const AMDGPURegisterInfo & RI = getRegisterInfo();
272 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
273 MachineOperand &MO = MI.getOperand(i);
274 // Convert dst regclass to one that is supported by the ISA
275 if (MO.isReg() && MO.isDef()) {
276 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
277 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
278 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
282 MRI.setRegClass(MO.getReg(), newRegClass);
288 int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
290 default: return Opcode;
291 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
292 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
293 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);