1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #define GET_INSTRINFO_CTOR
25 #include "AMDGPUGenInstrInfo.inc"
29 AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
30 : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
32 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
36 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
37 unsigned &SrcReg, unsigned &DstReg,
38 unsigned &SubIdx) const {
39 // TODO: Implement this function
43 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
44 int &FrameIndex) const {
45 // TODO: Implement this function
49 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
50 int &FrameIndex) const {
51 // TODO: Implement this function
55 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
56 const MachineMemOperand *&MMO,
57 int &FrameIndex) const {
58 // TODO: Implement this function
61 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
62 int &FrameIndex) const {
63 // TODO: Implement this function
66 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
67 int &FrameIndex) const {
68 // TODO: Implement this function
71 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
72 const MachineMemOperand *&MMO,
73 int &FrameIndex) const {
74 // TODO: Implement this function
79 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
80 MachineBasicBlock::iterator &MBBI,
81 LiveVariables *LV) const {
82 // TODO: Implement this function
85 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
86 MachineBasicBlock &MBB) const {
87 while (iter != MBB.end()) {
88 switch (iter->getOpcode()) {
91 case AMDGPU::BRANCH_COND_i32:
92 case AMDGPU::BRANCH_COND_f32:
101 MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
102 MachineBasicBlock::iterator tmp = MBB->end();
107 if (tmp->getOpcode() == AMDGPU::ENDLOOP
108 || tmp->getOpcode() == AMDGPU::ENDIF
109 || tmp->getOpcode() == AMDGPU::ELSE) {
110 if (tmp == MBB->begin()) {
123 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
124 MachineBasicBlock::iterator MI,
125 unsigned SrcReg, bool isKill,
127 const TargetRegisterClass *RC,
128 const TargetRegisterInfo *TRI) const {
129 assert(!"Not Implemented");
133 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
134 MachineBasicBlock::iterator MI,
135 unsigned DestReg, int FrameIndex,
136 const TargetRegisterClass *RC,
137 const TargetRegisterInfo *TRI) const {
138 assert(!"Not Implemented");
142 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
144 const SmallVectorImpl<unsigned> &Ops,
145 int FrameIndex) const {
146 // TODO: Implement this function
150 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
152 const SmallVectorImpl<unsigned> &Ops,
153 MachineInstr *LoadMI) const {
154 // TODO: Implement this function
158 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
159 const SmallVectorImpl<unsigned> &Ops) const {
160 // TODO: Implement this function
164 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
165 unsigned Reg, bool UnfoldLoad,
167 SmallVectorImpl<MachineInstr*> &NewMIs) const {
168 // TODO: Implement this function
173 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
174 SmallVectorImpl<SDNode*> &NewNodes) const {
175 // TODO: Implement this function
180 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
181 bool UnfoldLoad, bool UnfoldStore,
182 unsigned *LoadRegIndex) const {
183 // TODO: Implement this function
187 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
188 int64_t Offset1, int64_t Offset2,
189 unsigned NumLoads) const {
190 assert(Offset2 > Offset1
191 && "Second offset should be larger than first offset!");
192 // If we have less than 16 loads in a row, and the offsets are within 16,
193 // then schedule together.
194 // TODO: Make the loads schedule near if it fits in a cacheline
195 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
199 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
201 // TODO: Implement this function
204 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
205 MachineBasicBlock::iterator MI) const {
206 // TODO: Implement this function
209 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
210 // TODO: Implement this function
214 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
215 const SmallVectorImpl<MachineOperand> &Pred2)
217 // TODO: Implement this function
221 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
222 std::vector<MachineOperand> &Pred) const {
223 // TODO: Implement this function
227 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
228 // TODO: Implement this function
229 return MI->getDesc().isPredicable();
233 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
234 // TODO: Implement this function
238 void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
240 MachineRegisterInfo &MRI = MF.getRegInfo();
241 const AMDGPURegisterInfo & RI = getRegisterInfo();
243 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
244 MachineOperand &MO = MI.getOperand(i);
245 // Convert dst regclass to one that is supported by the ISA
246 if (MO.isReg() && MO.isDef()) {
247 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
248 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
249 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
253 MRI.setRegClass(MO.getReg(), newRegClass);