1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
18 #include "R600Defines.h"
19 #include "R600InstrInfo.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/Support/raw_ostream.h"
36 FIRST_NON_WQM_PUSH = 2,
37 FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3
40 const AMDGPUSubtarget &ST;
41 std::vector<StackItem> BranchStack;
42 std::vector<StackItem> LoopStack;
43 unsigned MaxStackSize;
44 unsigned CurrentEntries;
45 unsigned CurrentSubEntries;
47 CFStack(const AMDGPUSubtarget &st, unsigned ShaderType) : ST(st),
48 // We need to reserve a stack entry for CALL_FS in vertex shaders.
49 MaxStackSize(ShaderType == ShaderType::VERTEX ? 1 : 0),
50 CurrentEntries(0), CurrentSubEntries(0) { }
52 unsigned getLoopDepth();
53 bool branchStackContains(CFStack::StackItem);
54 bool requiresWorkAroundForInst(unsigned Opcode);
55 unsigned getSubEntrySize(CFStack::StackItem Item);
56 void updateMaxStackSize();
57 void pushBranch(unsigned Opcode, bool isWQM = false);
63 unsigned CFStack::getLoopDepth() {
64 return LoopStack.size();
67 bool CFStack::branchStackContains(CFStack::StackItem Item) {
68 for (std::vector<CFStack::StackItem>::const_iterator I = BranchStack.begin(),
69 E = BranchStack.end(); I != E; ++I) {
76 bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
77 if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST.hasCaymanISA() &&
81 if (!ST.hasCFAluBug())
85 default: return false;
86 case AMDGPU::CF_ALU_PUSH_BEFORE:
87 case AMDGPU::CF_ALU_ELSE_AFTER:
88 case AMDGPU::CF_ALU_BREAK:
89 case AMDGPU::CF_ALU_CONTINUE:
90 if (CurrentSubEntries == 0)
92 if (ST.getWavefrontSize() == 64) {
93 // We are being conservative here. We only require this work-around if
94 // CurrentSubEntries > 3 &&
95 // (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0)
97 // We have to be conservative, because we don't know for certain that
98 // our stack allocation algorithm for Evergreen/NI is correct. Applying this
99 // work-around when CurrentSubEntries > 3 allows us to over-allocate stack
100 // resources without any problems.
101 return CurrentSubEntries > 3;
103 assert(ST.getWavefrontSize() == 32);
104 // We are being conservative here. We only require the work-around if
105 // CurrentSubEntries > 7 &&
106 // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
107 // See the comment on the wavefront size == 64 case for why we are
108 // being conservative.
109 return CurrentSubEntries > 7;
114 unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
118 case CFStack::FIRST_NON_WQM_PUSH:
119 assert(!ST.hasCaymanISA());
120 if (ST.getGeneration() <= AMDGPUSubtarget::R700) {
121 // +1 For the push operation.
122 // +2 Extra space required.
125 // Some documentation says that this is not necessary on Evergreen,
126 // but experimentation has show that we need to allocate 1 extra
127 // sub-entry for the first non-WQM push.
128 // +1 For the push operation.
129 // +1 Extra space required.
132 case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
133 assert(ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
134 // +1 For the push operation.
135 // +1 Extra space required.
137 case CFStack::SUB_ENTRY:
142 void CFStack::updateMaxStackSize() {
143 unsigned CurrentStackSize = CurrentEntries +
144 (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
145 MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
148 void CFStack::pushBranch(unsigned Opcode, bool isWQM) {
149 CFStack::StackItem Item = CFStack::ENTRY;
151 case AMDGPU::CF_PUSH_EG:
152 case AMDGPU::CF_ALU_PUSH_BEFORE:
154 if (!ST.hasCaymanISA() && !branchStackContains(CFStack::FIRST_NON_WQM_PUSH))
155 Item = CFStack::FIRST_NON_WQM_PUSH; // May not be required on Evergreen/NI
157 // CFStack::getSubEntrySize()
158 else if (CurrentEntries > 0 &&
159 ST.getGeneration() > AMDGPUSubtarget::EVERGREEN &&
160 !ST.hasCaymanISA() &&
161 !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY))
162 Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY;
164 Item = CFStack::SUB_ENTRY;
166 Item = CFStack::ENTRY;
169 BranchStack.push_back(Item);
170 if (Item == CFStack::ENTRY)
173 CurrentSubEntries += getSubEntrySize(Item);
174 updateMaxStackSize();
177 void CFStack::pushLoop() {
178 LoopStack.push_back(CFStack::ENTRY);
180 updateMaxStackSize();
183 void CFStack::popBranch() {
184 CFStack::StackItem Top = BranchStack.back();
185 if (Top == CFStack::ENTRY)
188 CurrentSubEntries-= getSubEntrySize(Top);
189 BranchStack.pop_back();
192 void CFStack::popLoop() {
194 LoopStack.pop_back();
197 class R600ControlFlowFinalizer : public MachineFunctionPass {
200 typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile;
202 enum ControlFlowInstruction {
217 const R600InstrInfo *TII;
218 const R600RegisterInfo *TRI;
219 unsigned MaxFetchInst;
220 const AMDGPUSubtarget &ST;
222 bool IsTrivialInst(MachineInstr *MI) const {
223 switch (MI->getOpcode()) {
232 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
234 bool isEg = (ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
237 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
240 Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600;
243 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
246 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
249 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
252 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
254 case CF_LOOP_CONTINUE:
255 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
258 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
261 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
264 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
267 if (ST.hasCaymanISA()) {
268 Opcode = AMDGPU::CF_END_CM;
271 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
274 assert (Opcode && "No opcode selected");
275 return TII->get(Opcode);
278 bool isCompatibleWithClause(const MachineInstr *MI,
279 std::set<unsigned> &DstRegs) const {
280 unsigned DstMI, SrcMI;
281 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
282 E = MI->operands_end(); I != E; ++I) {
283 const MachineOperand &MO = *I;
287 unsigned Reg = MO.getReg();
288 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
291 DstMI = TRI->getMatchingSuperReg(Reg,
292 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
293 &AMDGPU::R600_Reg128RegClass);
296 unsigned Reg = MO.getReg();
297 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
300 SrcMI = TRI->getMatchingSuperReg(Reg,
301 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
302 &AMDGPU::R600_Reg128RegClass);
305 if ((DstRegs.find(SrcMI) == DstRegs.end())) {
306 DstRegs.insert(DstMI);
313 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
315 MachineBasicBlock::iterator ClauseHead = I;
316 std::vector<MachineInstr *> ClauseContent;
317 unsigned AluInstCount = 0;
318 bool IsTex = TII->usesTextureCache(ClauseHead);
319 std::set<unsigned> DstRegs;
320 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
321 if (IsTrivialInst(I))
323 if (AluInstCount >= MaxFetchInst)
325 if ((IsTex && !TII->usesTextureCache(I)) ||
326 (!IsTex && !TII->usesVertexCache(I)))
328 if (!isCompatibleWithClause(I, DstRegs))
331 ClauseContent.push_back(I);
333 MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
334 getHWInstrDesc(IsTex?CF_TC:CF_VC))
336 .addImm(AluInstCount - 1); // COUNT
337 return ClauseFile(MIb, ClauseContent);
340 void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
341 static const unsigned LiteralRegs[] = {
342 AMDGPU::ALU_LITERAL_X,
343 AMDGPU::ALU_LITERAL_Y,
344 AMDGPU::ALU_LITERAL_Z,
345 AMDGPU::ALU_LITERAL_W
347 const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
349 for (unsigned i = 0, e = Srcs.size(); i < e; ++i) {
350 if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X)
352 int64_t Imm = Srcs[i].second;
353 std::vector<int64_t>::iterator It =
354 std::find(Lits.begin(), Lits.end(), Imm);
355 if (It != Lits.end()) {
356 unsigned Index = It - Lits.begin();
357 Srcs[i].first->setReg(LiteralRegs[Index]);
359 assert(Lits.size() < 4 && "Too many literals in Instruction Group");
360 Srcs[i].first->setReg(LiteralRegs[Lits.size()]);
366 MachineBasicBlock::iterator insertLiterals(
367 MachineBasicBlock::iterator InsertPos,
368 const std::vector<unsigned> &Literals) const {
369 MachineBasicBlock *MBB = InsertPos->getParent();
370 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
371 unsigned LiteralPair0 = Literals[i];
372 unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0;
373 InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(),
374 TII->get(AMDGPU::LITERALS))
375 .addImm(LiteralPair0)
376 .addImm(LiteralPair1);
382 MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
384 MachineBasicBlock::iterator ClauseHead = I;
385 std::vector<MachineInstr *> ClauseContent;
387 for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) {
388 if (IsTrivialInst(I)) {
392 if (!I->isBundle() && !TII->isALUInstr(I->getOpcode()))
394 std::vector<int64_t> Literals;
396 MachineInstr *DeleteMI = I;
397 MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
398 while (++BI != E && BI->isBundledWithPred()) {
399 BI->unbundleFromPred();
400 for (unsigned i = 0, e = BI->getNumOperands(); i != e; ++i) {
401 MachineOperand &MO = BI->getOperand(i);
402 if (MO.isReg() && MO.isInternalRead())
403 MO.setIsInternalRead(false);
405 getLiteral(BI, Literals);
406 ClauseContent.push_back(BI);
409 DeleteMI->eraseFromParent();
411 getLiteral(I, Literals);
412 ClauseContent.push_back(I);
415 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
416 unsigned literal0 = Literals[i];
417 unsigned literal2 = (i + 1 < e)?Literals[i + 1]:0;
418 MachineInstr *MILit = BuildMI(MBB, I, I->getDebugLoc(),
419 TII->get(AMDGPU::LITERALS))
422 ClauseContent.push_back(MILit);
425 assert(ClauseContent.size() < 128 && "ALU clause is too big");
426 ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
427 return ClauseFile(ClauseHead, ClauseContent);
431 EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
433 CounterPropagateAddr(Clause.first, CfCount);
434 MachineBasicBlock *BB = Clause.first->getParent();
435 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE))
437 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
438 BB->splice(InsertPos, BB, Clause.second[i]);
440 CfCount += 2 * Clause.second.size();
444 EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
446 Clause.first->getOperand(0).setImm(0);
447 CounterPropagateAddr(Clause.first, CfCount);
448 MachineBasicBlock *BB = Clause.first->getParent();
449 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
451 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
452 BB->splice(InsertPos, BB, Clause.second[i]);
454 CfCount += Clause.second.size();
457 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
458 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
460 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
462 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
464 MachineInstr *MI = *It;
465 CounterPropagateAddr(MI, Addr);
470 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
472 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
473 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
474 MaxFetchInst = ST.getTexVTXClauseSize();
477 virtual bool runOnMachineFunction(MachineFunction &MF) {
478 TII=static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
479 TRI=static_cast<const R600RegisterInfo *>(MF.getTarget().getRegisterInfo());
480 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
482 CFStack CFStack(ST, MFI->ShaderType);
483 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
485 MachineBasicBlock &MBB = *MB;
486 unsigned CfCount = 0;
487 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
488 std::vector<MachineInstr * > IfThenElseStack;
489 if (MFI->ShaderType == 1) {
490 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
491 getHWInstrDesc(CF_CALL_FS));
494 std::vector<ClauseFile> FetchClauses, AluClauses;
495 std::vector<MachineInstr *> LastAlu(1);
496 std::vector<MachineInstr *> ToPopAfter;
498 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
500 if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
501 DEBUG(dbgs() << CfCount << ":"; I->dump(););
502 FetchClauses.push_back(MakeFetchClause(MBB, I));
508 MachineBasicBlock::iterator MI = I;
509 if (MI->getOpcode() != AMDGPU::ENDIF)
511 if (MI->getOpcode() == AMDGPU::CF_ALU)
514 bool RequiresWorkAround =
515 CFStack.requiresWorkAroundForInst(MI->getOpcode());
516 switch (MI->getOpcode()) {
517 case AMDGPU::CF_ALU_PUSH_BEFORE:
518 if (RequiresWorkAround) {
519 DEBUG(dbgs() << "Applying bug work-around for ALU_PUSH_BEFORE\n");
520 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_EG))
523 MI->setDesc(TII->get(AMDGPU::CF_ALU));
525 CFStack.pushBranch(AMDGPU::CF_PUSH_EG);
527 CFStack.pushBranch(AMDGPU::CF_ALU_PUSH_BEFORE);
531 AluClauses.push_back(MakeALUClause(MBB, I));
532 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
535 case AMDGPU::WHILELOOP: {
537 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
538 getHWInstrDesc(CF_WHILE_LOOP))
540 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
541 std::set<MachineInstr *>());
542 Pair.second.insert(MIb);
543 LoopStack.push_back(Pair);
544 MI->eraseFromParent();
548 case AMDGPU::ENDLOOP: {
550 std::pair<unsigned, std::set<MachineInstr *> > Pair =
552 LoopStack.pop_back();
553 CounterPropagateAddr(Pair.second, CfCount);
554 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
555 .addImm(Pair.first + 1);
556 MI->eraseFromParent();
560 case AMDGPU::IF_PREDICATE_SET: {
561 LastAlu.push_back(0);
562 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
563 getHWInstrDesc(CF_JUMP))
566 IfThenElseStack.push_back(MIb);
567 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
568 MI->eraseFromParent();
573 MachineInstr * JumpInst = IfThenElseStack.back();
574 IfThenElseStack.pop_back();
575 CounterPropagateAddr(JumpInst, CfCount);
576 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
577 getHWInstrDesc(CF_ELSE))
580 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
581 IfThenElseStack.push_back(MIb);
582 MI->eraseFromParent();
586 case AMDGPU::ENDIF: {
588 if (LastAlu.back()) {
589 ToPopAfter.push_back(LastAlu.back());
591 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
592 getHWInstrDesc(CF_POP))
596 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
600 MachineInstr *IfOrElseInst = IfThenElseStack.back();
601 IfThenElseStack.pop_back();
602 CounterPropagateAddr(IfOrElseInst, CfCount);
603 IfOrElseInst->getOperand(1).setImm(1);
605 MI->eraseFromParent();
608 case AMDGPU::BREAK: {
610 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
611 getHWInstrDesc(CF_LOOP_BREAK))
613 LoopStack.back().second.insert(MIb);
614 MI->eraseFromParent();
617 case AMDGPU::CONTINUE: {
618 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
619 getHWInstrDesc(CF_LOOP_CONTINUE))
621 LoopStack.back().second.insert(MIb);
622 MI->eraseFromParent();
626 case AMDGPU::RETURN: {
627 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
629 MI->eraseFromParent();
631 BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
634 for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
635 EmitFetchClause(I, FetchClauses[i], CfCount);
636 for (unsigned i = 0, e = AluClauses.size(); i < e; i++)
637 EmitALUClause(I, AluClauses[i], CfCount);
640 if (TII->isExport(MI->getOpcode())) {
641 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
647 for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) {
648 MachineInstr *Alu = ToPopAfter[i];
649 BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu),
650 TII->get(AMDGPU::CF_ALU_POP_AFTER))
651 .addImm(Alu->getOperand(0).getImm())
652 .addImm(Alu->getOperand(1).getImm())
653 .addImm(Alu->getOperand(2).getImm())
654 .addImm(Alu->getOperand(3).getImm())
655 .addImm(Alu->getOperand(4).getImm())
656 .addImm(Alu->getOperand(5).getImm())
657 .addImm(Alu->getOperand(6).getImm())
658 .addImm(Alu->getOperand(7).getImm())
659 .addImm(Alu->getOperand(8).getImm());
660 Alu->eraseFromParent();
662 MFI->StackSize = CFStack.MaxStackSize;
668 const char *getPassName() const {
669 return "R600 Control Flow Finalizer Pass";
673 char R600ControlFlowFinalizer::ID = 0;
675 } // end anonymous namespace
678 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
679 return new R600ControlFlowFinalizer(TM);