1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
17 #include "llvm/Support/raw_ostream.h"
20 #include "R600Defines.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "R600RegisterInfo.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 class R600ControlFlowFinalizer : public MachineFunctionPass {
33 typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile;
35 enum ControlFlowInstruction {
50 const R600InstrInfo *TII;
51 const R600RegisterInfo &TRI;
52 unsigned MaxFetchInst;
53 const AMDGPUSubtarget &ST;
55 bool IsTrivialInst(MachineInstr *MI) const {
56 switch (MI->getOpcode()) {
65 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
67 bool isEg = (ST.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX);
70 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
73 Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600;
76 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
79 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
82 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
85 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
87 case CF_LOOP_CONTINUE:
88 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
91 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
94 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
97 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
100 if (ST.device()->getDeviceFlag() == OCL_DEVICE_CAYMAN) {
101 Opcode = AMDGPU::CF_END_CM;
104 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
107 assert (Opcode && "No opcode selected");
108 return TII->get(Opcode);
111 bool isCompatibleWithClause(const MachineInstr *MI,
112 std::set<unsigned> &DstRegs, std::set<unsigned> &SrcRegs) const {
113 unsigned DstMI, SrcMI;
114 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
115 E = MI->operands_end(); I != E; ++I) {
116 const MachineOperand &MO = *I;
122 unsigned Reg = MO.getReg();
123 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
126 SrcMI = TRI.getMatchingSuperReg(Reg,
127 TRI.getSubRegFromChannel(TRI.getHWRegChan(Reg)),
128 &AMDGPU::R600_Reg128RegClass);
131 if ((DstRegs.find(SrcMI) == DstRegs.end()) &&
132 (SrcRegs.find(DstMI) == SrcRegs.end())) {
133 SrcRegs.insert(SrcMI);
134 DstRegs.insert(DstMI);
141 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
143 MachineBasicBlock::iterator ClauseHead = I;
144 std::vector<MachineInstr *> ClauseContent;
145 unsigned AluInstCount = 0;
146 bool IsTex = TII->usesTextureCache(ClauseHead);
147 std::set<unsigned> DstRegs, SrcRegs;
148 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
149 if (IsTrivialInst(I))
151 if (AluInstCount > MaxFetchInst)
153 if ((IsTex && !TII->usesTextureCache(I)) ||
154 (!IsTex && !TII->usesVertexCache(I)))
156 if (!isCompatibleWithClause(I, DstRegs, SrcRegs))
159 ClauseContent.push_back(I);
161 MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
162 getHWInstrDesc(IsTex?CF_TC:CF_VC))
164 .addImm(AluInstCount - 1); // COUNT
165 return ClauseFile(MIb, ClauseContent);
168 void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
169 unsigned LiteralRegs[] = {
170 AMDGPU::ALU_LITERAL_X,
171 AMDGPU::ALU_LITERAL_Y,
172 AMDGPU::ALU_LITERAL_Z,
173 AMDGPU::ALU_LITERAL_W
175 for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) {
176 MachineOperand &MO = MI->getOperand(i);
179 if (MO.getReg() != AMDGPU::ALU_LITERAL_X)
181 unsigned ImmIdx = TII->getOperandIdx(MI->getOpcode(), R600Operands::IMM);
182 int64_t Imm = MI->getOperand(ImmIdx).getImm();
183 std::vector<int64_t>::iterator It =
184 std::find(Lits.begin(), Lits.end(), Imm);
185 if (It != Lits.end()) {
186 unsigned Index = It - Lits.begin();
187 MO.setReg(LiteralRegs[Index]);
189 assert(Lits.size() < 4 && "Too many literals in Instruction Group");
190 MO.setReg(LiteralRegs[Lits.size()]);
196 MachineBasicBlock::iterator insertLiterals(
197 MachineBasicBlock::iterator InsertPos,
198 const std::vector<unsigned> &Literals) const {
199 MachineBasicBlock *MBB = InsertPos->getParent();
200 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
201 unsigned LiteralPair0 = Literals[i];
202 unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0;
203 InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(),
204 TII->get(AMDGPU::LITERALS))
205 .addImm(LiteralPair0)
206 .addImm(LiteralPair1);
212 MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
214 MachineBasicBlock::iterator ClauseHead = I;
215 std::vector<MachineInstr *> ClauseContent;
217 for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) {
218 if (IsTrivialInst(I)) {
222 if (!I->isBundle() && !TII->isALUInstr(I->getOpcode()))
224 std::vector<int64_t> Literals;
226 MachineInstr *DeleteMI = I;
227 MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
228 while (++BI != E && BI->isBundledWithPred()) {
229 BI->unbundleFromPred();
230 for (unsigned i = 0, e = BI->getNumOperands(); i != e; ++i) {
231 MachineOperand &MO = BI->getOperand(i);
232 if (MO.isReg() && MO.isInternalRead())
233 MO.setIsInternalRead(false);
235 getLiteral(BI, Literals);
236 ClauseContent.push_back(BI);
239 DeleteMI->eraseFromParent();
241 getLiteral(I, Literals);
242 ClauseContent.push_back(I);
245 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
246 unsigned literal0 = Literals[i];
247 unsigned literal2 = (i + 1 < e)?Literals[i + 1]:0;
248 MachineInstr *MILit = BuildMI(MBB, I, I->getDebugLoc(),
249 TII->get(AMDGPU::LITERALS))
252 ClauseContent.push_back(MILit);
255 ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
256 return ClauseFile(ClauseHead, ClauseContent);
260 EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
262 CounterPropagateAddr(Clause.first, CfCount);
263 MachineBasicBlock *BB = Clause.first->getParent();
264 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE))
266 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
267 BB->splice(InsertPos, BB, Clause.second[i]);
269 CfCount += 2 * Clause.second.size();
273 EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
275 CounterPropagateAddr(Clause.first, CfCount);
276 MachineBasicBlock *BB = Clause.first->getParent();
277 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
279 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
280 BB->splice(InsertPos, BB, Clause.second[i]);
282 CfCount += Clause.second.size();
285 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
286 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
288 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
290 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
292 MachineInstr *MI = *It;
293 CounterPropagateAddr(MI, Addr);
297 unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const {
298 switch (ST.device()->getGeneration()) {
299 case AMDGPUDeviceInfo::HD4XXX:
303 case AMDGPUDeviceInfo::HD5XXX:
306 case AMDGPUDeviceInfo::HD6XXX:
310 return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4
314 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
315 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
316 TRI(TII->getRegisterInfo()),
317 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
318 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
319 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
325 virtual bool runOnMachineFunction(MachineFunction &MF) {
326 unsigned MaxStack = 0;
327 unsigned CurrentStack = 0;
329 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
331 MachineBasicBlock &MBB = *MB;
332 unsigned CfCount = 0;
333 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
334 std::vector<MachineInstr * > IfThenElseStack;
335 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
336 if (MFI->ShaderType == 1) {
337 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
338 getHWInstrDesc(CF_CALL_FS));
341 std::vector<ClauseFile> FetchClauses, AluClauses;
342 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
344 if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
345 DEBUG(dbgs() << CfCount << ":"; I->dump(););
346 FetchClauses.push_back(MakeFetchClause(MBB, I));
351 MachineBasicBlock::iterator MI = I;
353 switch (MI->getOpcode()) {
354 case AMDGPU::CF_ALU_PUSH_BEFORE:
356 MaxStack = std::max(MaxStack, CurrentStack);
360 AluClauses.push_back(MakeALUClause(MBB, I));
361 case AMDGPU::EG_ExportBuf:
362 case AMDGPU::EG_ExportSwz:
363 case AMDGPU::R600_ExportBuf:
364 case AMDGPU::R600_ExportSwz:
365 case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
366 case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
367 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
370 case AMDGPU::WHILELOOP: {
372 MaxStack = std::max(MaxStack, CurrentStack);
373 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
374 getHWInstrDesc(CF_WHILE_LOOP))
376 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
377 std::set<MachineInstr *>());
378 Pair.second.insert(MIb);
379 LoopStack.push_back(Pair);
380 MI->eraseFromParent();
384 case AMDGPU::ENDLOOP: {
386 std::pair<unsigned, std::set<MachineInstr *> > Pair =
388 LoopStack.pop_back();
389 CounterPropagateAddr(Pair.second, CfCount);
390 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
391 .addImm(Pair.first + 1);
392 MI->eraseFromParent();
396 case AMDGPU::IF_PREDICATE_SET: {
397 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
398 getHWInstrDesc(CF_JUMP))
401 IfThenElseStack.push_back(MIb);
402 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
403 MI->eraseFromParent();
408 MachineInstr * JumpInst = IfThenElseStack.back();
409 IfThenElseStack.pop_back();
410 CounterPropagateAddr(JumpInst, CfCount);
411 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
412 getHWInstrDesc(CF_ELSE))
415 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
416 IfThenElseStack.push_back(MIb);
417 MI->eraseFromParent();
421 case AMDGPU::ENDIF: {
423 MachineInstr *IfOrElseInst = IfThenElseStack.back();
424 IfThenElseStack.pop_back();
425 CounterPropagateAddr(IfOrElseInst, CfCount + 1);
426 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
427 getHWInstrDesc(CF_POP))
431 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
432 MI->eraseFromParent();
436 case AMDGPU::PREDICATED_BREAK: {
439 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP))
442 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
443 getHWInstrDesc(CF_LOOP_BREAK))
445 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP))
448 LoopStack.back().second.insert(MIb);
449 MI->eraseFromParent();
452 case AMDGPU::CONTINUE: {
453 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
454 getHWInstrDesc(CF_LOOP_CONTINUE))
456 LoopStack.back().second.insert(MIb);
457 MI->eraseFromParent();
461 case AMDGPU::RETURN: {
462 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
464 MI->eraseFromParent();
466 BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
469 for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
470 EmitFetchClause(I, FetchClauses[i], CfCount);
471 for (unsigned i = 0, e = AluClauses.size(); i < e; i++)
472 EmitALUClause(I, AluClauses[i], CfCount);
478 MFI->StackSize = getHWStackSize(MaxStack, hasPush);
484 const char *getPassName() const {
485 return "R600 Control Flow Finalizer Pass";
489 char R600ControlFlowFinalizer::ID = 0;
494 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
495 return new R600ControlFlowFinalizer(TM);