1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #define GET_INSTRINFO_CTOR
25 #define GET_INSTRMAP_INFO
26 #include "AMDGPUGenInstrInfo.inc"
30 AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
31 : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
33 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
37 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
38 unsigned &SrcReg, unsigned &DstReg,
39 unsigned &SubIdx) const {
40 // TODO: Implement this function
44 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
45 int &FrameIndex) const {
46 // TODO: Implement this function
50 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
51 int &FrameIndex) const {
52 // TODO: Implement this function
56 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
57 const MachineMemOperand *&MMO,
58 int &FrameIndex) const {
59 // TODO: Implement this function
62 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
63 int &FrameIndex) const {
64 // TODO: Implement this function
67 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
68 int &FrameIndex) const {
69 // TODO: Implement this function
72 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
73 const MachineMemOperand *&MMO,
74 int &FrameIndex) const {
75 // TODO: Implement this function
80 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
81 MachineBasicBlock::iterator &MBBI,
82 LiveVariables *LV) const {
83 // TODO: Implement this function
86 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
87 MachineBasicBlock &MBB) const {
88 while (iter != MBB.end()) {
89 switch (iter->getOpcode()) {
92 case AMDGPU::BRANCH_COND_i32:
93 case AMDGPU::BRANCH_COND_f32:
102 MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
103 MachineBasicBlock::iterator tmp = MBB->end();
108 if (tmp->getOpcode() == AMDGPU::ENDLOOP
109 || tmp->getOpcode() == AMDGPU::ENDIF
110 || tmp->getOpcode() == AMDGPU::ELSE) {
111 if (tmp == MBB->begin()) {
124 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
125 MachineBasicBlock::iterator MI,
126 unsigned SrcReg, bool isKill,
128 const TargetRegisterClass *RC,
129 const TargetRegisterInfo *TRI) const {
130 assert(!"Not Implemented");
134 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
135 MachineBasicBlock::iterator MI,
136 unsigned DestReg, int FrameIndex,
137 const TargetRegisterClass *RC,
138 const TargetRegisterInfo *TRI) const {
139 assert(!"Not Implemented");
143 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
145 const SmallVectorImpl<unsigned> &Ops,
146 int FrameIndex) const {
147 // TODO: Implement this function
151 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
153 const SmallVectorImpl<unsigned> &Ops,
154 MachineInstr *LoadMI) const {
155 // TODO: Implement this function
159 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
160 const SmallVectorImpl<unsigned> &Ops) const {
161 // TODO: Implement this function
165 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
166 unsigned Reg, bool UnfoldLoad,
168 SmallVectorImpl<MachineInstr*> &NewMIs) const {
169 // TODO: Implement this function
174 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
175 SmallVectorImpl<SDNode*> &NewNodes) const {
176 // TODO: Implement this function
181 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
182 bool UnfoldLoad, bool UnfoldStore,
183 unsigned *LoadRegIndex) const {
184 // TODO: Implement this function
188 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
189 int64_t Offset1, int64_t Offset2,
190 unsigned NumLoads) const {
191 assert(Offset2 > Offset1
192 && "Second offset should be larger than first offset!");
193 // If we have less than 16 loads in a row, and the offsets are within 16,
194 // then schedule together.
195 // TODO: Make the loads schedule near if it fits in a cacheline
196 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
200 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
202 // TODO: Implement this function
205 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
206 MachineBasicBlock::iterator MI) const {
207 // TODO: Implement this function
210 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
211 // TODO: Implement this function
215 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
216 const SmallVectorImpl<MachineOperand> &Pred2)
218 // TODO: Implement this function
222 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
223 std::vector<MachineOperand> &Pred) const {
224 // TODO: Implement this function
228 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
229 // TODO: Implement this function
230 return MI->getDesc().isPredicable();
234 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
235 // TODO: Implement this function
239 bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
240 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
243 bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
244 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
248 void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
250 MachineRegisterInfo &MRI = MF.getRegInfo();
251 const AMDGPURegisterInfo & RI = getRegisterInfo();
253 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
254 MachineOperand &MO = MI.getOperand(i);
255 // Convert dst regclass to one that is supported by the ISA
256 if (MO.isReg() && MO.isDef()) {
257 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
258 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
259 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
263 MRI.setRegClass(MO.getReg(), newRegClass);