1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #define GET_INSTRINFO_CTOR
24 #define GET_INSTRMAP_INFO
25 #include "AMDGPUGenInstrInfo.inc"
29 AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
30 : AMDGPUGenInstrInfo(0,0), RI(tm), TM(tm) { }
32 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
36 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
37 unsigned &SrcReg, unsigned &DstReg,
38 unsigned &SubIdx) const {
39 // TODO: Implement this function
43 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
44 int &FrameIndex) const {
45 // TODO: Implement this function
49 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
50 int &FrameIndex) const {
51 // TODO: Implement this function
55 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
56 const MachineMemOperand *&MMO,
57 int &FrameIndex) const {
58 // TODO: Implement this function
61 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
62 int &FrameIndex) const {
63 // TODO: Implement this function
66 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
67 int &FrameIndex) const {
68 // TODO: Implement this function
71 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
72 const MachineMemOperand *&MMO,
73 int &FrameIndex) const {
74 // TODO: Implement this function
79 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
80 MachineBasicBlock::iterator &MBBI,
81 LiveVariables *LV) const {
82 // TODO: Implement this function
85 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
86 MachineBasicBlock &MBB) const {
87 while (iter != MBB.end()) {
88 switch (iter->getOpcode()) {
91 case AMDGPU::BRANCH_COND_i32:
92 case AMDGPU::BRANCH_COND_f32:
102 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
103 MachineBasicBlock::iterator MI,
104 unsigned SrcReg, bool isKill,
106 const TargetRegisterClass *RC,
107 const TargetRegisterInfo *TRI) const {
108 assert(!"Not Implemented");
112 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
113 MachineBasicBlock::iterator MI,
114 unsigned DestReg, int FrameIndex,
115 const TargetRegisterClass *RC,
116 const TargetRegisterInfo *TRI) const {
117 assert(!"Not Implemented");
121 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
123 const SmallVectorImpl<unsigned> &Ops,
124 int FrameIndex) const {
125 // TODO: Implement this function
129 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
131 const SmallVectorImpl<unsigned> &Ops,
132 MachineInstr *LoadMI) const {
133 // TODO: Implement this function
137 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
138 const SmallVectorImpl<unsigned> &Ops) const {
139 // TODO: Implement this function
143 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
144 unsigned Reg, bool UnfoldLoad,
146 SmallVectorImpl<MachineInstr*> &NewMIs) const {
147 // TODO: Implement this function
152 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
153 SmallVectorImpl<SDNode*> &NewNodes) const {
154 // TODO: Implement this function
159 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
160 bool UnfoldLoad, bool UnfoldStore,
161 unsigned *LoadRegIndex) const {
162 // TODO: Implement this function
166 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
167 int64_t Offset1, int64_t Offset2,
168 unsigned NumLoads) const {
169 assert(Offset2 > Offset1
170 && "Second offset should be larger than first offset!");
171 // If we have less than 16 loads in a row, and the offsets are within 16,
172 // then schedule together.
173 // TODO: Make the loads schedule near if it fits in a cacheline
174 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
178 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
180 // TODO: Implement this function
183 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
184 MachineBasicBlock::iterator MI) const {
185 // TODO: Implement this function
188 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
189 // TODO: Implement this function
193 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
194 const SmallVectorImpl<MachineOperand> &Pred2)
196 // TODO: Implement this function
200 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
201 std::vector<MachineOperand> &Pred) const {
202 // TODO: Implement this function
206 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
207 // TODO: Implement this function
208 return MI->getDesc().isPredicable();
212 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
213 // TODO: Implement this function
217 bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
218 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
221 bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
222 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
226 void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
228 MachineRegisterInfo &MRI = MF.getRegInfo();
229 const AMDGPURegisterInfo & RI = getRegisterInfo();
231 for (unsigned i = 0; i < MI.getNumOperands(); i++) {
232 MachineOperand &MO = MI.getOperand(i);
233 // Convert dst regclass to one that is supported by the ISA
234 if (MO.isReg() && MO.isDef()) {
235 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
236 const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
237 const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
241 MRI.setRegClass(MO.getReg(), newRegClass);