1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Support/Debug.h"
17 #include "R600Defines.h"
18 #include "R600InstrInfo.h"
19 #include "R600MachineFunctionInfo.h"
20 #include "R600RegisterInfo.h"
21 #include "llvm/CodeGen/MachineFunctionPass.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/Support/raw_ostream.h"
28 #define DEBUG_TYPE "r600cf"
37 FIRST_NON_WQM_PUSH = 2,
38 FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3
41 const AMDGPUSubtarget &ST;
42 std::vector<StackItem> BranchStack;
43 std::vector<StackItem> LoopStack;
44 unsigned MaxStackSize;
45 unsigned CurrentEntries;
46 unsigned CurrentSubEntries;
48 CFStack(const AMDGPUSubtarget &st, unsigned ShaderType) : ST(st),
49 // We need to reserve a stack entry for CALL_FS in vertex shaders.
50 MaxStackSize(ShaderType == ShaderType::VERTEX ? 1 : 0),
51 CurrentEntries(0), CurrentSubEntries(0) { }
53 unsigned getLoopDepth();
54 bool branchStackContains(CFStack::StackItem);
55 bool requiresWorkAroundForInst(unsigned Opcode);
56 unsigned getSubEntrySize(CFStack::StackItem Item);
57 void updateMaxStackSize();
58 void pushBranch(unsigned Opcode, bool isWQM = false);
64 unsigned CFStack::getLoopDepth() {
65 return LoopStack.size();
68 bool CFStack::branchStackContains(CFStack::StackItem Item) {
69 for (std::vector<CFStack::StackItem>::const_iterator I = BranchStack.begin(),
70 E = BranchStack.end(); I != E; ++I) {
77 bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
78 if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST.hasCaymanISA() &&
82 if (!ST.hasCFAluBug())
86 default: return false;
87 case AMDGPU::CF_ALU_PUSH_BEFORE:
88 case AMDGPU::CF_ALU_ELSE_AFTER:
89 case AMDGPU::CF_ALU_BREAK:
90 case AMDGPU::CF_ALU_CONTINUE:
91 if (CurrentSubEntries == 0)
93 if (ST.getWavefrontSize() == 64) {
94 // We are being conservative here. We only require this work-around if
95 // CurrentSubEntries > 3 &&
96 // (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0)
98 // We have to be conservative, because we don't know for certain that
99 // our stack allocation algorithm for Evergreen/NI is correct. Applying this
100 // work-around when CurrentSubEntries > 3 allows us to over-allocate stack
101 // resources without any problems.
102 return CurrentSubEntries > 3;
104 assert(ST.getWavefrontSize() == 32);
105 // We are being conservative here. We only require the work-around if
106 // CurrentSubEntries > 7 &&
107 // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
108 // See the comment on the wavefront size == 64 case for why we are
109 // being conservative.
110 return CurrentSubEntries > 7;
115 unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
119 case CFStack::FIRST_NON_WQM_PUSH:
120 assert(!ST.hasCaymanISA());
121 if (ST.getGeneration() <= AMDGPUSubtarget::R700) {
122 // +1 For the push operation.
123 // +2 Extra space required.
126 // Some documentation says that this is not necessary on Evergreen,
127 // but experimentation has show that we need to allocate 1 extra
128 // sub-entry for the first non-WQM push.
129 // +1 For the push operation.
130 // +1 Extra space required.
133 case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
134 assert(ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
135 // +1 For the push operation.
136 // +1 Extra space required.
138 case CFStack::SUB_ENTRY:
143 void CFStack::updateMaxStackSize() {
144 unsigned CurrentStackSize = CurrentEntries +
145 (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
146 MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
149 void CFStack::pushBranch(unsigned Opcode, bool isWQM) {
150 CFStack::StackItem Item = CFStack::ENTRY;
152 case AMDGPU::CF_PUSH_EG:
153 case AMDGPU::CF_ALU_PUSH_BEFORE:
155 if (!ST.hasCaymanISA() && !branchStackContains(CFStack::FIRST_NON_WQM_PUSH))
156 Item = CFStack::FIRST_NON_WQM_PUSH; // May not be required on Evergreen/NI
158 // CFStack::getSubEntrySize()
159 else if (CurrentEntries > 0 &&
160 ST.getGeneration() > AMDGPUSubtarget::EVERGREEN &&
161 !ST.hasCaymanISA() &&
162 !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY))
163 Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY;
165 Item = CFStack::SUB_ENTRY;
167 Item = CFStack::ENTRY;
170 BranchStack.push_back(Item);
171 if (Item == CFStack::ENTRY)
174 CurrentSubEntries += getSubEntrySize(Item);
175 updateMaxStackSize();
178 void CFStack::pushLoop() {
179 LoopStack.push_back(CFStack::ENTRY);
181 updateMaxStackSize();
184 void CFStack::popBranch() {
185 CFStack::StackItem Top = BranchStack.back();
186 if (Top == CFStack::ENTRY)
189 CurrentSubEntries-= getSubEntrySize(Top);
190 BranchStack.pop_back();
193 void CFStack::popLoop() {
195 LoopStack.pop_back();
198 class R600ControlFlowFinalizer : public MachineFunctionPass {
201 typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile;
203 enum ControlFlowInstruction {
218 const R600InstrInfo *TII;
219 const R600RegisterInfo *TRI;
220 unsigned MaxFetchInst;
221 const AMDGPUSubtarget &ST;
223 bool IsTrivialInst(MachineInstr *MI) const {
224 switch (MI->getOpcode()) {
233 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
235 bool isEg = (ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
238 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
241 Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600;
244 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
247 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
250 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
253 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
255 case CF_LOOP_CONTINUE:
256 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
259 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
262 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
265 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
268 if (ST.hasCaymanISA()) {
269 Opcode = AMDGPU::CF_END_CM;
272 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
275 assert (Opcode && "No opcode selected");
276 return TII->get(Opcode);
279 bool isCompatibleWithClause(const MachineInstr *MI,
280 std::set<unsigned> &DstRegs) const {
281 unsigned DstMI, SrcMI;
282 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
283 E = MI->operands_end(); I != E; ++I) {
284 const MachineOperand &MO = *I;
288 unsigned Reg = MO.getReg();
289 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
292 DstMI = TRI->getMatchingSuperReg(Reg,
293 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
294 &AMDGPU::R600_Reg128RegClass);
297 unsigned Reg = MO.getReg();
298 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
301 SrcMI = TRI->getMatchingSuperReg(Reg,
302 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
303 &AMDGPU::R600_Reg128RegClass);
306 if ((DstRegs.find(SrcMI) == DstRegs.end())) {
307 DstRegs.insert(DstMI);
314 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
316 MachineBasicBlock::iterator ClauseHead = I;
317 std::vector<MachineInstr *> ClauseContent;
318 unsigned AluInstCount = 0;
319 bool IsTex = TII->usesTextureCache(ClauseHead);
320 std::set<unsigned> DstRegs;
321 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
322 if (IsTrivialInst(I))
324 if (AluInstCount >= MaxFetchInst)
326 if ((IsTex && !TII->usesTextureCache(I)) ||
327 (!IsTex && !TII->usesVertexCache(I)))
329 if (!isCompatibleWithClause(I, DstRegs))
332 ClauseContent.push_back(I);
334 MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
335 getHWInstrDesc(IsTex?CF_TC:CF_VC))
337 .addImm(AluInstCount - 1); // COUNT
338 return ClauseFile(MIb, ClauseContent);
341 void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
342 static const unsigned LiteralRegs[] = {
343 AMDGPU::ALU_LITERAL_X,
344 AMDGPU::ALU_LITERAL_Y,
345 AMDGPU::ALU_LITERAL_Z,
346 AMDGPU::ALU_LITERAL_W
348 const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
350 for (unsigned i = 0, e = Srcs.size(); i < e; ++i) {
351 if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X)
353 int64_t Imm = Srcs[i].second;
354 std::vector<int64_t>::iterator It =
355 std::find(Lits.begin(), Lits.end(), Imm);
356 if (It != Lits.end()) {
357 unsigned Index = It - Lits.begin();
358 Srcs[i].first->setReg(LiteralRegs[Index]);
360 assert(Lits.size() < 4 && "Too many literals in Instruction Group");
361 Srcs[i].first->setReg(LiteralRegs[Lits.size()]);
367 MachineBasicBlock::iterator insertLiterals(
368 MachineBasicBlock::iterator InsertPos,
369 const std::vector<unsigned> &Literals) const {
370 MachineBasicBlock *MBB = InsertPos->getParent();
371 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
372 unsigned LiteralPair0 = Literals[i];
373 unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0;
374 InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(),
375 TII->get(AMDGPU::LITERALS))
376 .addImm(LiteralPair0)
377 .addImm(LiteralPair1);
383 MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
385 MachineBasicBlock::iterator ClauseHead = I;
386 std::vector<MachineInstr *> ClauseContent;
388 for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) {
389 if (IsTrivialInst(I)) {
393 if (!I->isBundle() && !TII->isALUInstr(I->getOpcode()))
395 std::vector<int64_t> Literals;
397 MachineInstr *DeleteMI = I;
398 MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
399 while (++BI != E && BI->isBundledWithPred()) {
400 BI->unbundleFromPred();
401 for (unsigned i = 0, e = BI->getNumOperands(); i != e; ++i) {
402 MachineOperand &MO = BI->getOperand(i);
403 if (MO.isReg() && MO.isInternalRead())
404 MO.setIsInternalRead(false);
406 getLiteral(BI, Literals);
407 ClauseContent.push_back(BI);
410 DeleteMI->eraseFromParent();
412 getLiteral(I, Literals);
413 ClauseContent.push_back(I);
416 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
417 unsigned literal0 = Literals[i];
418 unsigned literal2 = (i + 1 < e)?Literals[i + 1]:0;
419 MachineInstr *MILit = BuildMI(MBB, I, I->getDebugLoc(),
420 TII->get(AMDGPU::LITERALS))
423 ClauseContent.push_back(MILit);
426 assert(ClauseContent.size() < 128 && "ALU clause is too big");
427 ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
428 return ClauseFile(ClauseHead, ClauseContent);
432 EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
434 CounterPropagateAddr(Clause.first, CfCount);
435 MachineBasicBlock *BB = Clause.first->getParent();
436 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE))
438 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
439 BB->splice(InsertPos, BB, Clause.second[i]);
441 CfCount += 2 * Clause.second.size();
445 EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
447 Clause.first->getOperand(0).setImm(0);
448 CounterPropagateAddr(Clause.first, CfCount);
449 MachineBasicBlock *BB = Clause.first->getParent();
450 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
452 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
453 BB->splice(InsertPos, BB, Clause.second[i]);
455 CfCount += Clause.second.size();
458 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
459 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
461 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
463 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
465 MachineInstr *MI = *It;
466 CounterPropagateAddr(MI, Addr);
471 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
472 TII (nullptr), TRI(nullptr),
473 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
474 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
475 MaxFetchInst = ST.getTexVTXClauseSize();
478 bool runOnMachineFunction(MachineFunction &MF) override {
479 TII=static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
480 TRI=static_cast<const R600RegisterInfo *>(MF.getTarget().getRegisterInfo());
481 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
483 CFStack CFStack(ST, MFI->ShaderType);
484 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
486 MachineBasicBlock &MBB = *MB;
487 unsigned CfCount = 0;
488 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
489 std::vector<MachineInstr * > IfThenElseStack;
490 if (MFI->ShaderType == 1) {
491 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
492 getHWInstrDesc(CF_CALL_FS));
495 std::vector<ClauseFile> FetchClauses, AluClauses;
496 std::vector<MachineInstr *> LastAlu(1);
497 std::vector<MachineInstr *> ToPopAfter;
499 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
501 if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
502 DEBUG(dbgs() << CfCount << ":"; I->dump(););
503 FetchClauses.push_back(MakeFetchClause(MBB, I));
505 LastAlu.back() = nullptr;
509 MachineBasicBlock::iterator MI = I;
510 if (MI->getOpcode() != AMDGPU::ENDIF)
511 LastAlu.back() = nullptr;
512 if (MI->getOpcode() == AMDGPU::CF_ALU)
515 bool RequiresWorkAround =
516 CFStack.requiresWorkAroundForInst(MI->getOpcode());
517 switch (MI->getOpcode()) {
518 case AMDGPU::CF_ALU_PUSH_BEFORE:
519 if (RequiresWorkAround) {
520 DEBUG(dbgs() << "Applying bug work-around for ALU_PUSH_BEFORE\n");
521 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_EG))
524 MI->setDesc(TII->get(AMDGPU::CF_ALU));
526 CFStack.pushBranch(AMDGPU::CF_PUSH_EG);
528 CFStack.pushBranch(AMDGPU::CF_ALU_PUSH_BEFORE);
532 AluClauses.push_back(MakeALUClause(MBB, I));
533 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
536 case AMDGPU::WHILELOOP: {
538 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
539 getHWInstrDesc(CF_WHILE_LOOP))
541 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
542 std::set<MachineInstr *>());
543 Pair.second.insert(MIb);
544 LoopStack.push_back(Pair);
545 MI->eraseFromParent();
549 case AMDGPU::ENDLOOP: {
551 std::pair<unsigned, std::set<MachineInstr *> > Pair =
553 LoopStack.pop_back();
554 CounterPropagateAddr(Pair.second, CfCount);
555 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
556 .addImm(Pair.first + 1);
557 MI->eraseFromParent();
561 case AMDGPU::IF_PREDICATE_SET: {
562 LastAlu.push_back(nullptr);
563 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
564 getHWInstrDesc(CF_JUMP))
567 IfThenElseStack.push_back(MIb);
568 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
569 MI->eraseFromParent();
574 MachineInstr * JumpInst = IfThenElseStack.back();
575 IfThenElseStack.pop_back();
576 CounterPropagateAddr(JumpInst, CfCount);
577 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
578 getHWInstrDesc(CF_ELSE))
581 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
582 IfThenElseStack.push_back(MIb);
583 MI->eraseFromParent();
587 case AMDGPU::ENDIF: {
589 if (LastAlu.back()) {
590 ToPopAfter.push_back(LastAlu.back());
592 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
593 getHWInstrDesc(CF_POP))
597 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
601 MachineInstr *IfOrElseInst = IfThenElseStack.back();
602 IfThenElseStack.pop_back();
603 CounterPropagateAddr(IfOrElseInst, CfCount);
604 IfOrElseInst->getOperand(1).setImm(1);
606 MI->eraseFromParent();
609 case AMDGPU::BREAK: {
611 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
612 getHWInstrDesc(CF_LOOP_BREAK))
614 LoopStack.back().second.insert(MIb);
615 MI->eraseFromParent();
618 case AMDGPU::CONTINUE: {
619 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
620 getHWInstrDesc(CF_LOOP_CONTINUE))
622 LoopStack.back().second.insert(MIb);
623 MI->eraseFromParent();
627 case AMDGPU::RETURN: {
628 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
630 MI->eraseFromParent();
632 BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
635 for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
636 EmitFetchClause(I, FetchClauses[i], CfCount);
637 for (unsigned i = 0, e = AluClauses.size(); i < e; i++)
638 EmitALUClause(I, AluClauses[i], CfCount);
641 if (TII->isExport(MI->getOpcode())) {
642 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
648 for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) {
649 MachineInstr *Alu = ToPopAfter[i];
650 BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu),
651 TII->get(AMDGPU::CF_ALU_POP_AFTER))
652 .addImm(Alu->getOperand(0).getImm())
653 .addImm(Alu->getOperand(1).getImm())
654 .addImm(Alu->getOperand(2).getImm())
655 .addImm(Alu->getOperand(3).getImm())
656 .addImm(Alu->getOperand(4).getImm())
657 .addImm(Alu->getOperand(5).getImm())
658 .addImm(Alu->getOperand(6).getImm())
659 .addImm(Alu->getOperand(7).getImm())
660 .addImm(Alu->getOperand(8).getImm());
661 Alu->eraseFromParent();
663 MFI->StackSize = CFStack.MaxStackSize;
669 const char *getPassName() const override {
670 return "R600 Control Flow Finalizer Pass";
674 char R600ControlFlowFinalizer::ID = 0;
676 } // end anonymous namespace
679 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
680 return new R600ControlFlowFinalizer(TM);