1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
17 #include "llvm/Support/raw_ostream.h"
20 #include "R600Defines.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "R600RegisterInfo.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 class R600ControlFlowFinalizer : public MachineFunctionPass {
33 enum ControlFlowInstruction {
46 const R600InstrInfo *TII;
47 unsigned MaxFetchInst;
48 const AMDGPUSubtarget &ST;
50 bool isFetch(const MachineInstr *MI) const {
51 switch (MI->getOpcode()) {
52 case AMDGPU::TEX_VTX_CONSTBUF:
53 case AMDGPU::TEX_VTX_TEXBUF:
55 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
56 case AMDGPU::TEX_GET_GRADIENTS_H:
57 case AMDGPU::TEX_GET_GRADIENTS_V:
58 case AMDGPU::TEX_SET_GRADIENTS_H:
59 case AMDGPU::TEX_SET_GRADIENTS_V:
60 case AMDGPU::TEX_SAMPLE:
61 case AMDGPU::TEX_SAMPLE_C:
62 case AMDGPU::TEX_SAMPLE_L:
63 case AMDGPU::TEX_SAMPLE_C_L:
64 case AMDGPU::TEX_SAMPLE_LB:
65 case AMDGPU::TEX_SAMPLE_C_LB:
66 case AMDGPU::TEX_SAMPLE_G:
67 case AMDGPU::TEX_SAMPLE_C_G:
69 case AMDGPU::TXD_SHADOW:
76 bool IsTrivialInst(MachineInstr *MI) const {
77 switch (MI->getOpcode()) {
86 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
87 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX) {
90 return TII->get(AMDGPU::CF_TC_R600);
92 return TII->get(AMDGPU::CF_CALL_FS_R600);
94 return TII->get(AMDGPU::WHILE_LOOP_R600);
96 return TII->get(AMDGPU::END_LOOP_R600);
98 return TII->get(AMDGPU::LOOP_BREAK_R600);
99 case CF_LOOP_CONTINUE:
100 return TII->get(AMDGPU::CF_CONTINUE_R600);
102 return TII->get(AMDGPU::CF_JUMP_R600);
104 return TII->get(AMDGPU::CF_ELSE_R600);
106 return TII->get(AMDGPU::POP_R600);
111 return TII->get(AMDGPU::CF_TC_EG);
113 return TII->get(AMDGPU::CF_CALL_FS_EG);
115 return TII->get(AMDGPU::WHILE_LOOP_EG);
117 return TII->get(AMDGPU::END_LOOP_EG);
119 return TII->get(AMDGPU::LOOP_BREAK_EG);
120 case CF_LOOP_CONTINUE:
121 return TII->get(AMDGPU::CF_CONTINUE_EG);
123 return TII->get(AMDGPU::CF_JUMP_EG);
125 return TII->get(AMDGPU::CF_ELSE_EG);
127 return TII->get(AMDGPU::POP_EG);
132 MachineBasicBlock::iterator
133 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
134 unsigned CfAddress) const {
135 MachineBasicBlock::iterator ClauseHead = I;
136 unsigned AluInstCount = 0;
137 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
138 if (IsTrivialInst(I))
143 if (AluInstCount > MaxFetchInst)
146 BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
147 getHWInstrDesc(CF_TC))
148 .addImm(CfAddress) // ADDR
149 .addImm(AluInstCount); // COUNT
152 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
153 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
155 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
157 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
159 MachineInstr *MI = *It;
160 CounterPropagateAddr(MI, Addr);
165 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
166 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
167 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
168 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
169 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
175 virtual bool runOnMachineFunction(MachineFunction &MF) {
176 unsigned MaxStack = 0;
177 unsigned CurrentStack = 0;
178 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
180 MachineBasicBlock &MBB = *MB;
181 unsigned CfCount = 0;
182 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
183 std::vector<MachineInstr * > IfThenElseStack;
184 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
185 if (MFI->ShaderType == 1) {
186 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
187 getHWInstrDesc(CF_CALL_FS));
190 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
193 DEBUG(dbgs() << CfCount << ":"; I->dump(););
194 I = MakeFetchClause(MBB, I, 0);
199 MachineBasicBlock::iterator MI = I;
201 switch (MI->getOpcode()) {
202 case AMDGPU::CF_ALU_PUSH_BEFORE:
204 MaxStack = std::max(MaxStack, CurrentStack);
206 case AMDGPU::EG_ExportBuf:
207 case AMDGPU::EG_ExportSwz:
208 case AMDGPU::R600_ExportBuf:
209 case AMDGPU::R600_ExportSwz:
210 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
213 case AMDGPU::WHILELOOP: {
215 MaxStack = std::max(MaxStack, CurrentStack);
216 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
217 getHWInstrDesc(CF_WHILE_LOOP))
219 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
220 std::set<MachineInstr *>());
221 Pair.second.insert(MIb);
222 LoopStack.push_back(Pair);
223 MI->eraseFromParent();
227 case AMDGPU::ENDLOOP: {
229 std::pair<unsigned, std::set<MachineInstr *> > Pair =
231 LoopStack.pop_back();
232 CounterPropagateAddr(Pair.second, CfCount);
233 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
234 .addImm(Pair.first + 1);
235 MI->eraseFromParent();
239 case AMDGPU::IF_PREDICATE_SET: {
240 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
241 getHWInstrDesc(CF_JUMP))
244 IfThenElseStack.push_back(MIb);
245 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
246 MI->eraseFromParent();
251 MachineInstr * JumpInst = IfThenElseStack.back();
252 IfThenElseStack.pop_back();
253 CounterPropagateAddr(JumpInst, CfCount);
254 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
255 getHWInstrDesc(CF_ELSE))
258 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
259 IfThenElseStack.push_back(MIb);
260 MI->eraseFromParent();
264 case AMDGPU::ENDIF: {
266 MachineInstr *IfOrElseInst = IfThenElseStack.back();
267 IfThenElseStack.pop_back();
268 CounterPropagateAddr(IfOrElseInst, CfCount + 1);
269 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
270 getHWInstrDesc(CF_POP))
273 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
274 MI->eraseFromParent();
278 case AMDGPU::PREDICATED_BREAK: {
281 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP))
284 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
285 getHWInstrDesc(CF_LOOP_BREAK))
287 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP))
290 LoopStack.back().second.insert(MIb);
291 MI->eraseFromParent();
294 case AMDGPU::CONTINUE: {
295 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
296 getHWInstrDesc(CF_LOOP_CONTINUE))
298 LoopStack.back().second.insert(MIb);
299 MI->eraseFromParent();
307 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
308 TII->get(AMDGPU::STACK_SIZE))
315 const char *getPassName() const {
316 return "R600 Control Flow Finalizer Pass";
320 char R600ControlFlowFinalizer::ID = 0;
325 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
326 return new R600ControlFlowFinalizer(TM);