1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This pass lowers the pseudo control flow instructions to real
12 /// machine instructions.
14 /// All control flow is handled using predicated instructions and
15 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17 /// by writting to the 64-bit EXEC register (each bit corresponds to a
18 /// single vector ALU). Typically, for predicates, a vector ALU will write
19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
21 /// EXEC to update the predicates.
24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
25 /// %SGPR0 = SI_IF %VCC
26 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
27 /// %SGPR0 = SI_ELSE %SGPR0
28 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
35 /// S_CBRANCH_EXECZ label0 // This instruction is an optional
36 /// // optimization which allows us to
37 /// // branch if all the bits of
39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44 /// S_BRANCH_EXECZ label1 // Use our branch optimization
45 /// // instruction again.
46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
49 //===----------------------------------------------------------------------===//
52 #include "AMDGPUSubtarget.h"
53 #include "SIInstrInfo.h"
54 #include "SIMachineFunctionInfo.h"
55 #include "llvm/CodeGen/MachineFrameInfo.h"
56 #include "llvm/CodeGen/MachineFunction.h"
57 #include "llvm/CodeGen/MachineFunctionPass.h"
58 #include "llvm/CodeGen/MachineInstrBuilder.h"
59 #include "llvm/CodeGen/MachineRegisterInfo.h"
60 #include "llvm/IR/Constants.h"
66 class SILowerControlFlowPass : public MachineFunctionPass {
69 static const unsigned SkipThreshold = 12;
72 const SIRegisterInfo *TRI;
73 const SIInstrInfo *TII;
75 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
77 void Skip(MachineInstr &From, MachineOperand &To);
78 void SkipIfDead(MachineInstr &MI);
80 void If(MachineInstr &MI);
81 void Else(MachineInstr &MI);
82 void Break(MachineInstr &MI);
83 void IfBreak(MachineInstr &MI);
84 void ElseBreak(MachineInstr &MI);
85 void Loop(MachineInstr &MI);
86 void EndCf(MachineInstr &MI);
88 void Kill(MachineInstr &MI);
89 void Branch(MachineInstr &MI);
91 void LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset = 0);
92 void computeIndirectRegAndOffset(unsigned VecReg, unsigned &Reg, int &Offset);
93 void IndirectSrc(MachineInstr &MI);
94 void IndirectDst(MachineInstr &MI);
97 SILowerControlFlowPass(TargetMachine &tm) :
98 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
100 bool runOnMachineFunction(MachineFunction &MF) override;
102 const char *getPassName() const override {
103 return "SI Lower control flow instructions";
108 } // End anonymous namespace
110 char SILowerControlFlowPass::ID = 0;
112 FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) {
113 return new SILowerControlFlowPass(tm);
116 bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From,
117 MachineBasicBlock *To) {
119 unsigned NumInstr = 0;
121 for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty();
122 MBB = *MBB->succ_begin()) {
124 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
125 NumInstr < SkipThreshold && I != E; ++I) {
127 if (I->isBundle() || !I->isBundled())
128 if (++NumInstr >= SkipThreshold)
136 void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) {
138 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
141 DebugLoc DL = From.getDebugLoc();
142 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
146 void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) {
148 MachineBasicBlock &MBB = *MI.getParent();
149 DebugLoc DL = MI.getDebugLoc();
151 if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() !=
153 !shouldSkip(&MBB, &MBB.getParent()->back()))
156 MachineBasicBlock::iterator Insert = &MI;
159 // If the exec mask is non-zero, skip the next two instructions
160 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
163 // Exec mask is zero: Export to NULL target...
164 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
166 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
170 .addReg(AMDGPU::VGPR0)
171 .addReg(AMDGPU::VGPR0)
172 .addReg(AMDGPU::VGPR0)
173 .addReg(AMDGPU::VGPR0);
175 // ... and terminate wavefront
176 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
179 void SILowerControlFlowPass::If(MachineInstr &MI) {
180 MachineBasicBlock &MBB = *MI.getParent();
181 DebugLoc DL = MI.getDebugLoc();
182 unsigned Reg = MI.getOperand(0).getReg();
183 unsigned Vcc = MI.getOperand(1).getReg();
185 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
188 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
189 .addReg(AMDGPU::EXEC)
192 Skip(MI, MI.getOperand(2));
194 MI.eraseFromParent();
197 void SILowerControlFlowPass::Else(MachineInstr &MI) {
198 MachineBasicBlock &MBB = *MI.getParent();
199 DebugLoc DL = MI.getDebugLoc();
200 unsigned Dst = MI.getOperand(0).getReg();
201 unsigned Src = MI.getOperand(1).getReg();
203 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
204 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
205 .addReg(Src); // Saved EXEC
207 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
208 .addReg(AMDGPU::EXEC)
211 Skip(MI, MI.getOperand(2));
213 MI.eraseFromParent();
216 void SILowerControlFlowPass::Break(MachineInstr &MI) {
217 MachineBasicBlock &MBB = *MI.getParent();
218 DebugLoc DL = MI.getDebugLoc();
220 unsigned Dst = MI.getOperand(0).getReg();
221 unsigned Src = MI.getOperand(1).getReg();
223 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
224 .addReg(AMDGPU::EXEC)
227 MI.eraseFromParent();
230 void SILowerControlFlowPass::IfBreak(MachineInstr &MI) {
231 MachineBasicBlock &MBB = *MI.getParent();
232 DebugLoc DL = MI.getDebugLoc();
234 unsigned Dst = MI.getOperand(0).getReg();
235 unsigned Vcc = MI.getOperand(1).getReg();
236 unsigned Src = MI.getOperand(2).getReg();
238 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
242 MI.eraseFromParent();
245 void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) {
246 MachineBasicBlock &MBB = *MI.getParent();
247 DebugLoc DL = MI.getDebugLoc();
249 unsigned Dst = MI.getOperand(0).getReg();
250 unsigned Saved = MI.getOperand(1).getReg();
251 unsigned Src = MI.getOperand(2).getReg();
253 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
257 MI.eraseFromParent();
260 void SILowerControlFlowPass::Loop(MachineInstr &MI) {
261 MachineBasicBlock &MBB = *MI.getParent();
262 DebugLoc DL = MI.getDebugLoc();
263 unsigned Src = MI.getOperand(0).getReg();
265 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
266 .addReg(AMDGPU::EXEC)
269 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
270 .addOperand(MI.getOperand(1));
272 MI.eraseFromParent();
275 void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
276 MachineBasicBlock &MBB = *MI.getParent();
277 DebugLoc DL = MI.getDebugLoc();
278 unsigned Reg = MI.getOperand(0).getReg();
280 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
281 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
282 .addReg(AMDGPU::EXEC)
285 MI.eraseFromParent();
288 void SILowerControlFlowPass::Branch(MachineInstr &MI) {
289 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
290 MI.eraseFromParent();
292 // If these aren't equal, this is probably an infinite loop.
295 void SILowerControlFlowPass::Kill(MachineInstr &MI) {
296 MachineBasicBlock &MBB = *MI.getParent();
297 DebugLoc DL = MI.getDebugLoc();
298 const MachineOperand &Op = MI.getOperand(0);
301 const SIMachineFunctionInfo *MFI
302 = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
303 // Kill is only allowed in pixel / geometry shaders.
304 assert(MFI->getShaderType() == ShaderType::PIXEL ||
305 MFI->getShaderType() == ShaderType::GEOMETRY);
308 // Clear this thread from the exec mask if the operand is negative
310 // Constant operand: Set exec mask to 0 or do nothing
311 if (Op.getImm() & 0x80000000) {
312 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
316 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32))
321 MI.eraseFromParent();
324 void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel, int Offset) {
326 MachineBasicBlock &MBB = *MI.getParent();
327 DebugLoc DL = MI.getDebugLoc();
328 MachineBasicBlock::iterator I = MI;
330 unsigned Save = MI.getOperand(1).getReg();
331 unsigned Idx = MI.getOperand(3).getReg();
333 if (AMDGPU::SReg_32RegClass.contains(Idx)) {
335 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
339 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
342 MBB.insert(I, MovRel);
345 assert(AMDGPU::SReg_64RegClass.contains(Save));
346 assert(AMDGPU::VGPR_32RegClass.contains(Idx));
348 // Save the EXEC mask
349 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
350 .addReg(AMDGPU::EXEC);
352 // Read the next variant into VCC (lower 32 bits) <- also loop target
353 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
357 // Move index from VCC into M0
358 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
359 .addReg(AMDGPU::VCC_LO);
361 // Compare the just read M0 value to all possible Idx values
362 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32))
366 // Update EXEC, save the original EXEC value to VCC
367 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
368 .addReg(AMDGPU::VCC);
371 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
375 // Do the actual move
376 MBB.insert(I, MovRel);
378 // Update EXEC, switch all done bits to 0 and all todo bits to 1
379 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
380 .addReg(AMDGPU::EXEC)
381 .addReg(AMDGPU::VCC);
383 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
384 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
388 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
392 MI.eraseFromParent();
395 /// \param @VecReg The register which holds element zero of the vector
396 /// being addressed into.
397 /// \param[out] @Reg The base register to use in the indirect addressing instruction.
398 /// \param[in,out] @Offset As an input, this is the constant offset part of the
399 // indirect Index. e.g. v0 = v[VecReg + Offset]
400 // As an output, this is a constant value that needs
401 // to be added to the value stored in M0.
402 void SILowerControlFlowPass::computeIndirectRegAndOffset(unsigned VecReg,
405 unsigned SubReg = TRI->getSubReg(VecReg, AMDGPU::sub0);
409 const TargetRegisterClass *RC = TRI->getPhysRegClass(SubReg);
410 int RegIdx = TRI->getHWRegIndex(SubReg) + Offset;
419 Reg = RC->getRegister(RegIdx);
422 void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
424 MachineBasicBlock &MBB = *MI.getParent();
425 DebugLoc DL = MI.getDebugLoc();
427 unsigned Dst = MI.getOperand(0).getReg();
428 unsigned Vec = MI.getOperand(2).getReg();
429 int Off = MI.getOperand(4).getImm();
432 computeIndirectRegAndOffset(Vec, Reg, Off);
434 MachineInstr *MovRel =
435 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
437 .addReg(AMDGPU::M0, RegState::Implicit)
438 .addReg(Vec, RegState::Implicit);
440 LoadM0(MI, MovRel, Off);
443 void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
445 MachineBasicBlock &MBB = *MI.getParent();
446 DebugLoc DL = MI.getDebugLoc();
448 unsigned Dst = MI.getOperand(0).getReg();
449 int Off = MI.getOperand(4).getImm();
450 unsigned Val = MI.getOperand(5).getReg();
453 computeIndirectRegAndOffset(Dst, Reg, Off);
455 MachineInstr *MovRel =
456 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
457 .addReg(Reg, RegState::Define)
459 .addReg(AMDGPU::M0, RegState::Implicit)
460 .addReg(Dst, RegState::Implicit);
462 LoadM0(MI, MovRel, Off);
465 bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
466 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
468 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
469 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
471 bool HaveKill = false;
472 bool NeedWQM = false;
473 bool NeedFlat = false;
476 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
479 MachineBasicBlock &MBB = *BI;
480 MachineBasicBlock::iterator I, Next;
481 for (I = MBB.begin(); I != MBB.end(); I = Next) {
484 MachineInstr &MI = *I;
485 if (TII->isWQM(MI.getOpcode()) || TII->isDS(MI.getOpcode()))
488 // Flat uses m0 in case it needs to access LDS.
489 if (TII->isFLAT(MI.getOpcode()))
492 switch (MI.getOpcode()) {
499 case AMDGPU::SI_ELSE:
503 case AMDGPU::SI_BREAK:
507 case AMDGPU::SI_IF_BREAK:
511 case AMDGPU::SI_ELSE_BREAK:
515 case AMDGPU::SI_LOOP:
520 case AMDGPU::SI_END_CF:
521 if (--Depth == 0 && HaveKill) {
528 case AMDGPU::SI_KILL:
536 case AMDGPU::S_BRANCH:
540 case AMDGPU::SI_INDIRECT_SRC:
544 case AMDGPU::SI_INDIRECT_DST_V1:
545 case AMDGPU::SI_INDIRECT_DST_V2:
546 case AMDGPU::SI_INDIRECT_DST_V4:
547 case AMDGPU::SI_INDIRECT_DST_V8:
548 case AMDGPU::SI_INDIRECT_DST_V16:
555 if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) {
556 MachineBasicBlock &MBB = MF.front();
557 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
558 AMDGPU::EXEC).addReg(AMDGPU::EXEC);
561 // FIXME: This seems inappropriate to do here.
562 if (NeedFlat && MFI->IsKernel) {
563 // Insert the prologue initializing the SGPRs pointing to the scratch space
564 // for flat accesses.
565 const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
567 // TODO: What to use with function calls?
569 // FIXME: This is reporting stack size that is used in a scratch buffer
570 // rather than registers as well.
571 uint64_t StackSizeBytes = FrameInfo->getStackSize();
574 = static_cast<const AMDGPUInstrInfo*>(TII)->getIndirectIndexBegin(MF);
575 // Convert register index to 256-byte unit.
576 uint64_t StackOffset = IndirectBegin < 0 ? 0 : (4 * IndirectBegin / 256);
578 assert((StackSizeBytes < 0xffff) && StackOffset < 0xffff &&
579 "Stack limits should be smaller than 16-bits");
581 // Initialize the flat scratch register pair.
582 // TODO: Can we use one s_mov_b64 here?
584 // Offset is in units of 256-bytes.
585 MachineBasicBlock &MBB = MF.front();
587 MachineBasicBlock::iterator Start = MBB.getFirstNonPHI();
588 const MCInstrDesc &SMovK = TII->get(AMDGPU::S_MOVK_I32);
590 assert(isInt<16>(StackOffset) && isInt<16>(StackSizeBytes));
592 BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_LO)
593 .addImm(StackOffset);
595 // Documentation says size is "per-thread scratch size in bytes"
596 BuildMI(MBB, Start, NoDL, SMovK, AMDGPU::FLAT_SCR_HI)
597 .addImm(StackSizeBytes);