1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
18 #include "R600Defines.h"
19 #include "R600InstrInfo.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/Support/raw_ostream.h"
31 class R600ControlFlowFinalizer : public MachineFunctionPass {
34 typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile;
36 enum ControlFlowInstruction {
51 const R600InstrInfo *TII;
52 const R600RegisterInfo *TRI;
53 unsigned MaxFetchInst;
54 const AMDGPUSubtarget &ST;
56 bool IsTrivialInst(MachineInstr *MI) const {
57 switch (MI->getOpcode()) {
66 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
68 bool isEg = (ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
71 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
74 Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600;
77 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
80 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
83 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
86 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
88 case CF_LOOP_CONTINUE:
89 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
92 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
95 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
98 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
101 if (ST.hasCaymanISA()) {
102 Opcode = AMDGPU::CF_END_CM;
105 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
108 assert (Opcode && "No opcode selected");
109 return TII->get(Opcode);
112 bool isCompatibleWithClause(const MachineInstr *MI,
113 std::set<unsigned> &DstRegs) const {
114 unsigned DstMI, SrcMI;
115 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
116 E = MI->operands_end(); I != E; ++I) {
117 const MachineOperand &MO = *I;
121 unsigned Reg = MO.getReg();
122 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
125 DstMI = TRI->getMatchingSuperReg(Reg,
126 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
127 &AMDGPU::R600_Reg128RegClass);
130 unsigned Reg = MO.getReg();
131 if (AMDGPU::R600_Reg128RegClass.contains(Reg))
134 SrcMI = TRI->getMatchingSuperReg(Reg,
135 TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
136 &AMDGPU::R600_Reg128RegClass);
139 if ((DstRegs.find(SrcMI) == DstRegs.end())) {
140 DstRegs.insert(DstMI);
147 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
149 MachineBasicBlock::iterator ClauseHead = I;
150 std::vector<MachineInstr *> ClauseContent;
151 unsigned AluInstCount = 0;
152 bool IsTex = TII->usesTextureCache(ClauseHead);
153 std::set<unsigned> DstRegs;
154 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
155 if (IsTrivialInst(I))
157 if (AluInstCount >= MaxFetchInst)
159 if ((IsTex && !TII->usesTextureCache(I)) ||
160 (!IsTex && !TII->usesVertexCache(I)))
162 if (!isCompatibleWithClause(I, DstRegs))
165 ClauseContent.push_back(I);
167 MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
168 getHWInstrDesc(IsTex?CF_TC:CF_VC))
170 .addImm(AluInstCount - 1); // COUNT
171 return ClauseFile(MIb, ClauseContent);
174 void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
175 static const unsigned LiteralRegs[] = {
176 AMDGPU::ALU_LITERAL_X,
177 AMDGPU::ALU_LITERAL_Y,
178 AMDGPU::ALU_LITERAL_Z,
179 AMDGPU::ALU_LITERAL_W
181 const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
183 for (unsigned i = 0, e = Srcs.size(); i < e; ++i) {
184 if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X)
186 int64_t Imm = Srcs[i].second;
187 std::vector<int64_t>::iterator It =
188 std::find(Lits.begin(), Lits.end(), Imm);
189 if (It != Lits.end()) {
190 unsigned Index = It - Lits.begin();
191 Srcs[i].first->setReg(LiteralRegs[Index]);
193 assert(Lits.size() < 4 && "Too many literals in Instruction Group");
194 Srcs[i].first->setReg(LiteralRegs[Lits.size()]);
200 MachineBasicBlock::iterator insertLiterals(
201 MachineBasicBlock::iterator InsertPos,
202 const std::vector<unsigned> &Literals) const {
203 MachineBasicBlock *MBB = InsertPos->getParent();
204 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
205 unsigned LiteralPair0 = Literals[i];
206 unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0;
207 InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(),
208 TII->get(AMDGPU::LITERALS))
209 .addImm(LiteralPair0)
210 .addImm(LiteralPair1);
216 MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
218 MachineBasicBlock::iterator ClauseHead = I;
219 std::vector<MachineInstr *> ClauseContent;
221 for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) {
222 if (IsTrivialInst(I)) {
226 if (!I->isBundle() && !TII->isALUInstr(I->getOpcode()))
228 std::vector<int64_t> Literals;
230 MachineInstr *DeleteMI = I;
231 MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
232 while (++BI != E && BI->isBundledWithPred()) {
233 BI->unbundleFromPred();
234 for (unsigned i = 0, e = BI->getNumOperands(); i != e; ++i) {
235 MachineOperand &MO = BI->getOperand(i);
236 if (MO.isReg() && MO.isInternalRead())
237 MO.setIsInternalRead(false);
239 getLiteral(BI, Literals);
240 ClauseContent.push_back(BI);
243 DeleteMI->eraseFromParent();
245 getLiteral(I, Literals);
246 ClauseContent.push_back(I);
249 for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
250 unsigned literal0 = Literals[i];
251 unsigned literal2 = (i + 1 < e)?Literals[i + 1]:0;
252 MachineInstr *MILit = BuildMI(MBB, I, I->getDebugLoc(),
253 TII->get(AMDGPU::LITERALS))
256 ClauseContent.push_back(MILit);
259 assert(ClauseContent.size() < 128 && "ALU clause is too big");
260 ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
261 return ClauseFile(ClauseHead, ClauseContent);
265 EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
267 CounterPropagateAddr(Clause.first, CfCount);
268 MachineBasicBlock *BB = Clause.first->getParent();
269 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE))
271 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
272 BB->splice(InsertPos, BB, Clause.second[i]);
274 CfCount += 2 * Clause.second.size();
278 EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
280 Clause.first->getOperand(0).setImm(0);
281 CounterPropagateAddr(Clause.first, CfCount);
282 MachineBasicBlock *BB = Clause.first->getParent();
283 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
285 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
286 BB->splice(InsertPos, BB, Clause.second[i]);
288 CfCount += Clause.second.size();
291 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
292 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
294 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
296 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
298 MachineInstr *MI = *It;
299 CounterPropagateAddr(MI, Addr);
303 unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const {
304 switch (ST.getGeneration()) {
305 case AMDGPUSubtarget::R600:
306 case AMDGPUSubtarget::R700:
310 case AMDGPUSubtarget::EVERGREEN:
313 case AMDGPUSubtarget::NORTHERN_ISLANDS:
316 default: llvm_unreachable("Not a VLIW4/VLIW5 GPU");
318 return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4
322 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
324 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
325 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
326 MaxFetchInst = ST.getTexVTXClauseSize();
329 virtual bool runOnMachineFunction(MachineFunction &MF) {
330 TII=static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
331 TRI=static_cast<const R600RegisterInfo *>(MF.getTarget().getRegisterInfo());
333 unsigned MaxStack = 0;
334 unsigned CurrentStack = 0;
335 unsigned CurrentLoopDepth = 0;
336 bool HasPush = false;
337 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
339 MachineBasicBlock &MBB = *MB;
340 unsigned CfCount = 0;
341 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
342 std::vector<MachineInstr * > IfThenElseStack;
343 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
344 if (MFI->ShaderType == 1) {
345 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
346 getHWInstrDesc(CF_CALL_FS));
350 std::vector<ClauseFile> FetchClauses, AluClauses;
351 std::vector<MachineInstr *> LastAlu(1);
352 std::vector<MachineInstr *> ToPopAfter;
354 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
356 if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
357 DEBUG(dbgs() << CfCount << ":"; I->dump(););
358 FetchClauses.push_back(MakeFetchClause(MBB, I));
363 MachineBasicBlock::iterator MI = I;
364 if (MI->getOpcode() != AMDGPU::ENDIF)
366 if (MI->getOpcode() == AMDGPU::CF_ALU)
369 switch (MI->getOpcode()) {
370 case AMDGPU::CF_ALU_PUSH_BEFORE:
372 MaxStack = std::max(MaxStack, CurrentStack);
374 if (ST.hasCaymanISA() && CurrentLoopDepth > 1) {
375 BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_CM))
378 MI->setDesc(TII->get(AMDGPU::CF_ALU));
383 AluClauses.push_back(MakeALUClause(MBB, I));
384 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
387 case AMDGPU::WHILELOOP: {
390 MaxStack = std::max(MaxStack, CurrentStack);
391 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
392 getHWInstrDesc(CF_WHILE_LOOP))
394 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
395 std::set<MachineInstr *>());
396 Pair.second.insert(MIb);
397 LoopStack.push_back(Pair);
398 MI->eraseFromParent();
402 case AMDGPU::ENDLOOP: {
405 std::pair<unsigned, std::set<MachineInstr *> > Pair =
407 LoopStack.pop_back();
408 CounterPropagateAddr(Pair.second, CfCount);
409 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
410 .addImm(Pair.first + 1);
411 MI->eraseFromParent();
415 case AMDGPU::IF_PREDICATE_SET: {
416 LastAlu.push_back(0);
417 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
418 getHWInstrDesc(CF_JUMP))
421 IfThenElseStack.push_back(MIb);
422 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
423 MI->eraseFromParent();
428 MachineInstr * JumpInst = IfThenElseStack.back();
429 IfThenElseStack.pop_back();
430 CounterPropagateAddr(JumpInst, CfCount);
431 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
432 getHWInstrDesc(CF_ELSE))
435 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
436 IfThenElseStack.push_back(MIb);
437 MI->eraseFromParent();
441 case AMDGPU::ENDIF: {
443 if (LastAlu.back()) {
444 ToPopAfter.push_back(LastAlu.back());
446 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
447 getHWInstrDesc(CF_POP))
451 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
455 MachineInstr *IfOrElseInst = IfThenElseStack.back();
456 IfThenElseStack.pop_back();
457 CounterPropagateAddr(IfOrElseInst, CfCount);
458 IfOrElseInst->getOperand(1).setImm(1);
460 MI->eraseFromParent();
463 case AMDGPU::BREAK: {
465 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
466 getHWInstrDesc(CF_LOOP_BREAK))
468 LoopStack.back().second.insert(MIb);
469 MI->eraseFromParent();
472 case AMDGPU::CONTINUE: {
473 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
474 getHWInstrDesc(CF_LOOP_CONTINUE))
476 LoopStack.back().second.insert(MIb);
477 MI->eraseFromParent();
481 case AMDGPU::RETURN: {
482 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
484 MI->eraseFromParent();
486 BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
489 for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
490 EmitFetchClause(I, FetchClauses[i], CfCount);
491 for (unsigned i = 0, e = AluClauses.size(); i < e; i++)
492 EmitALUClause(I, AluClauses[i], CfCount);
495 if (TII->isExport(MI->getOpcode())) {
496 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
502 for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) {
503 MachineInstr *Alu = ToPopAfter[i];
504 BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu),
505 TII->get(AMDGPU::CF_ALU_POP_AFTER))
506 .addImm(Alu->getOperand(0).getImm())
507 .addImm(Alu->getOperand(1).getImm())
508 .addImm(Alu->getOperand(2).getImm())
509 .addImm(Alu->getOperand(3).getImm())
510 .addImm(Alu->getOperand(4).getImm())
511 .addImm(Alu->getOperand(5).getImm())
512 .addImm(Alu->getOperand(6).getImm())
513 .addImm(Alu->getOperand(7).getImm())
514 .addImm(Alu->getOperand(8).getImm());
515 Alu->eraseFromParent();
517 MFI->StackSize = getHWStackSize(MaxStack, HasPush);
523 const char *getPassName() const {
524 return "R600 Control Flow Finalizer Pass";
528 char R600ControlFlowFinalizer::ID = 0;
530 } // end anonymous namespace
533 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
534 return new R600ControlFlowFinalizer(TM);