1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This pass lowers the pseudo control flow instructions to real
12 /// machine instructions.
14 /// All control flow is handled using predicated instructions and
15 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17 /// by writting to the 64-bit EXEC register (each bit corresponds to a
18 /// single vector ALU). Typically, for predicates, a vector ALU will write
19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
21 /// EXEC to update the predicates.
24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
25 /// %SGPR0 = SI_IF %VCC
26 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
27 /// %SGPR0 = SI_ELSE %SGPR0
28 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
35 /// S_CBRANCH_EXECZ label0 // This instruction is an optional
36 /// // optimization which allows us to
37 /// // branch if all the bits of
39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44 /// S_BRANCH_EXECZ label1 // Use our branch optimization
45 /// // instruction again.
46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
49 //===----------------------------------------------------------------------===//
52 #include "AMDGPUSubtarget.h"
53 #include "SIInstrInfo.h"
54 #include "SIMachineFunctionInfo.h"
55 #include "llvm/CodeGen/MachineFunction.h"
56 #include "llvm/CodeGen/MachineFunctionPass.h"
57 #include "llvm/CodeGen/MachineInstrBuilder.h"
58 #include "llvm/CodeGen/MachineRegisterInfo.h"
59 #include "llvm/IR/Constants.h"
65 class SILowerControlFlowPass : public MachineFunctionPass {
68 static const unsigned SkipThreshold = 12;
71 const SIRegisterInfo *TRI;
72 const SIInstrInfo *TII;
74 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
76 void Skip(MachineInstr &From, MachineOperand &To);
77 void SkipIfDead(MachineInstr &MI);
79 void If(MachineInstr &MI);
80 void Else(MachineInstr &MI);
81 void Break(MachineInstr &MI);
82 void IfBreak(MachineInstr &MI);
83 void ElseBreak(MachineInstr &MI);
84 void Loop(MachineInstr &MI);
85 void EndCf(MachineInstr &MI);
87 void Kill(MachineInstr &MI);
88 void Branch(MachineInstr &MI);
90 void InitM0ForLDS(MachineBasicBlock::iterator MI);
91 void LoadM0(MachineInstr &MI, MachineInstr *MovRel);
92 void IndirectSrc(MachineInstr &MI);
93 void IndirectDst(MachineInstr &MI);
96 SILowerControlFlowPass(TargetMachine &tm) :
97 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
99 bool runOnMachineFunction(MachineFunction &MF) override;
101 const char *getPassName() const override {
102 return "SI Lower control flow instructions";
107 } // End anonymous namespace
109 char SILowerControlFlowPass::ID = 0;
111 FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) {
112 return new SILowerControlFlowPass(tm);
115 bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From,
116 MachineBasicBlock *To) {
118 unsigned NumInstr = 0;
120 for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty();
121 MBB = *MBB->succ_begin()) {
123 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
124 NumInstr < SkipThreshold && I != E; ++I) {
126 if (I->isBundle() || !I->isBundled())
127 if (++NumInstr >= SkipThreshold)
135 void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) {
137 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
140 DebugLoc DL = From.getDebugLoc();
141 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
143 .addReg(AMDGPU::EXEC);
146 void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) {
148 MachineBasicBlock &MBB = *MI.getParent();
149 DebugLoc DL = MI.getDebugLoc();
151 if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getShaderType() !=
153 !shouldSkip(&MBB, &MBB.getParent()->back()))
156 MachineBasicBlock::iterator Insert = &MI;
159 // If the exec mask is non-zero, skip the next two instructions
160 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
162 .addReg(AMDGPU::EXEC);
164 // Exec mask is zero: Export to NULL target...
165 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
167 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
171 .addReg(AMDGPU::VGPR0)
172 .addReg(AMDGPU::VGPR0)
173 .addReg(AMDGPU::VGPR0)
174 .addReg(AMDGPU::VGPR0);
176 // ... and terminate wavefront
177 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
180 void SILowerControlFlowPass::If(MachineInstr &MI) {
181 MachineBasicBlock &MBB = *MI.getParent();
182 DebugLoc DL = MI.getDebugLoc();
183 unsigned Reg = MI.getOperand(0).getReg();
184 unsigned Vcc = MI.getOperand(1).getReg();
186 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
189 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
190 .addReg(AMDGPU::EXEC)
193 Skip(MI, MI.getOperand(2));
195 MI.eraseFromParent();
198 void SILowerControlFlowPass::Else(MachineInstr &MI) {
199 MachineBasicBlock &MBB = *MI.getParent();
200 DebugLoc DL = MI.getDebugLoc();
201 unsigned Dst = MI.getOperand(0).getReg();
202 unsigned Src = MI.getOperand(1).getReg();
204 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
205 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
206 .addReg(Src); // Saved EXEC
208 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
209 .addReg(AMDGPU::EXEC)
212 Skip(MI, MI.getOperand(2));
214 MI.eraseFromParent();
217 void SILowerControlFlowPass::Break(MachineInstr &MI) {
218 MachineBasicBlock &MBB = *MI.getParent();
219 DebugLoc DL = MI.getDebugLoc();
221 unsigned Dst = MI.getOperand(0).getReg();
222 unsigned Src = MI.getOperand(1).getReg();
224 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
225 .addReg(AMDGPU::EXEC)
228 MI.eraseFromParent();
231 void SILowerControlFlowPass::IfBreak(MachineInstr &MI) {
232 MachineBasicBlock &MBB = *MI.getParent();
233 DebugLoc DL = MI.getDebugLoc();
235 unsigned Dst = MI.getOperand(0).getReg();
236 unsigned Vcc = MI.getOperand(1).getReg();
237 unsigned Src = MI.getOperand(2).getReg();
239 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
243 MI.eraseFromParent();
246 void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) {
247 MachineBasicBlock &MBB = *MI.getParent();
248 DebugLoc DL = MI.getDebugLoc();
250 unsigned Dst = MI.getOperand(0).getReg();
251 unsigned Saved = MI.getOperand(1).getReg();
252 unsigned Src = MI.getOperand(2).getReg();
254 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
258 MI.eraseFromParent();
261 void SILowerControlFlowPass::Loop(MachineInstr &MI) {
262 MachineBasicBlock &MBB = *MI.getParent();
263 DebugLoc DL = MI.getDebugLoc();
264 unsigned Src = MI.getOperand(0).getReg();
266 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
267 .addReg(AMDGPU::EXEC)
270 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
271 .addOperand(MI.getOperand(1))
272 .addReg(AMDGPU::EXEC);
274 MI.eraseFromParent();
277 void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
278 MachineBasicBlock &MBB = *MI.getParent();
279 DebugLoc DL = MI.getDebugLoc();
280 unsigned Reg = MI.getOperand(0).getReg();
282 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
283 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
284 .addReg(AMDGPU::EXEC)
287 MI.eraseFromParent();
290 void SILowerControlFlowPass::Branch(MachineInstr &MI) {
291 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
292 MI.eraseFromParent();
294 // If these aren't equal, this is probably an infinite loop.
297 void SILowerControlFlowPass::Kill(MachineInstr &MI) {
298 MachineBasicBlock &MBB = *MI.getParent();
299 DebugLoc DL = MI.getDebugLoc();
300 const MachineOperand &Op = MI.getOperand(0);
303 const SIMachineFunctionInfo *MFI
304 = MBB.getParent()->getInfo<SIMachineFunctionInfo>();
305 // Kill is only allowed in pixel / geometry shaders.
306 assert(MFI->getShaderType() == ShaderType::PIXEL ||
307 MFI->getShaderType() == ShaderType::GEOMETRY);
310 // Clear this thread from the exec mask if the operand is negative
311 if ((Op.isImm() || Op.isFPImm())) {
312 // Constant operand: Set exec mask to 0 or do nothing
313 if (Op.isImm() ? (Op.getImm() & 0x80000000) :
314 Op.getFPImm()->isNegative()) {
315 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
319 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC)
324 MI.eraseFromParent();
327 /// The m0 register stores the maximum allowable address for LDS reads and
328 /// writes. Its value must be at least the size in bytes of LDS allocated by
329 /// the shader. For simplicity, we set it to the maximum possible value.
330 void SILowerControlFlowPass::InitM0ForLDS(MachineBasicBlock::iterator MI) {
331 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
332 AMDGPU::M0).addImm(0xffffffff);
335 void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) {
337 MachineBasicBlock &MBB = *MI.getParent();
338 DebugLoc DL = MI.getDebugLoc();
339 MachineBasicBlock::iterator I = MI;
341 unsigned Save = MI.getOperand(1).getReg();
342 unsigned Idx = MI.getOperand(3).getReg();
344 if (AMDGPU::SReg_32RegClass.contains(Idx)) {
345 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
347 MBB.insert(I, MovRel);
350 assert(AMDGPU::SReg_64RegClass.contains(Save));
351 assert(AMDGPU::VReg_32RegClass.contains(Idx));
353 // Save the EXEC mask
354 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
355 .addReg(AMDGPU::EXEC);
357 // Read the next variant into VCC (lower 32 bits) <- also loop target
358 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
362 // Move index from VCC into M0
363 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
364 .addReg(AMDGPU::VCC_LO);
366 // Compare the just read M0 value to all possible Idx values
367 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC)
371 // Update EXEC, save the original EXEC value to VCC
372 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
373 .addReg(AMDGPU::VCC);
375 // Do the actual move
376 MBB.insert(I, MovRel);
378 // Update EXEC, switch all done bits to 0 and all todo bits to 1
379 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
380 .addReg(AMDGPU::EXEC)
381 .addReg(AMDGPU::VCC);
383 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
384 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
386 .addReg(AMDGPU::EXEC);
389 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
393 // FIXME: Are there any values other than the LDS address clamp that need to
394 // be stored in the m0 register and may be live for more than a few
395 // instructions? If so, we should save the m0 register at the beginning
396 // of this function and restore it here.
397 // FIXME: Add support for LDS direct loads.
399 MI.eraseFromParent();
402 void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
404 MachineBasicBlock &MBB = *MI.getParent();
405 DebugLoc DL = MI.getDebugLoc();
407 unsigned Dst = MI.getOperand(0).getReg();
408 unsigned Vec = MI.getOperand(2).getReg();
409 unsigned Off = MI.getOperand(4).getImm();
410 unsigned SubReg = TRI->getSubReg(Vec, AMDGPU::sub0);
414 MachineInstr *MovRel =
415 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
416 .addReg(SubReg + Off)
417 .addReg(AMDGPU::M0, RegState::Implicit)
418 .addReg(Vec, RegState::Implicit);
423 void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
425 MachineBasicBlock &MBB = *MI.getParent();
426 DebugLoc DL = MI.getDebugLoc();
428 unsigned Dst = MI.getOperand(0).getReg();
429 unsigned Off = MI.getOperand(4).getImm();
430 unsigned Val = MI.getOperand(5).getReg();
431 unsigned SubReg = TRI->getSubReg(Dst, AMDGPU::sub0);
435 MachineInstr *MovRel =
436 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
437 .addReg(SubReg + Off, RegState::Define)
439 .addReg(AMDGPU::M0, RegState::Implicit)
440 .addReg(Dst, RegState::Implicit);
445 bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
446 TII = static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
448 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
449 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
451 bool HaveKill = false;
453 bool NeedWQM = false;
456 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
459 MachineBasicBlock &MBB = *BI;
460 MachineBasicBlock::iterator I, Next;
461 for (I = MBB.begin(); I != MBB.end(); I = Next) {
464 MachineInstr &MI = *I;
465 if (TII->isDS(MI.getOpcode())) {
470 switch (MI.getOpcode()) {
477 case AMDGPU::SI_ELSE:
481 case AMDGPU::SI_BREAK:
485 case AMDGPU::SI_IF_BREAK:
489 case AMDGPU::SI_ELSE_BREAK:
493 case AMDGPU::SI_LOOP:
498 case AMDGPU::SI_END_CF:
499 if (--Depth == 0 && HaveKill) {
506 case AMDGPU::SI_KILL:
514 case AMDGPU::S_BRANCH:
518 case AMDGPU::SI_INDIRECT_SRC:
522 case AMDGPU::SI_INDIRECT_DST_V1:
523 case AMDGPU::SI_INDIRECT_DST_V2:
524 case AMDGPU::SI_INDIRECT_DST_V4:
525 case AMDGPU::SI_INDIRECT_DST_V8:
526 case AMDGPU::SI_INDIRECT_DST_V16:
530 case AMDGPU::V_INTERP_P1_F32:
531 case AMDGPU::V_INTERP_P2_F32:
532 case AMDGPU::V_INTERP_MOV_F32:
541 MachineBasicBlock &MBB = MF.front();
542 // Initialize M0 to a value that won't cause LDS access to be discarded
543 // due to offset clamping
544 InitM0ForLDS(MBB.getFirstNonPHI());
547 if (NeedWQM && MFI->getShaderType() == ShaderType::PIXEL) {
548 MachineBasicBlock &MBB = MF.front();
549 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
550 AMDGPU::EXEC).addReg(AMDGPU::EXEC);