class SILowerControlFlowPass : public MachineFunctionPass {
private:
+ static const unsigned SkipThreshold = 12;
+
static char ID;
const TargetInstrInfo *TII;
+ void Skip(MachineInstr &MI, MachineOperand &To);
+
void If(MachineInstr &MI);
void Else(MachineInstr &MI);
void Break(MachineInstr &MI);
void Loop(MachineInstr &MI);
void EndCf(MachineInstr &MI);
+ void Branch(MachineInstr &MI);
+
public:
SILowerControlFlowPass(TargetMachine &tm) :
MachineFunctionPass(ID), TII(tm.getInstrInfo()) { }
return new SILowerControlFlowPass(tm);
}
+void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) {
+
+ unsigned NumInstr = 0;
+
+ for (MachineBasicBlock *MBB = *From.getParent()->succ_begin();
+ NumInstr < SkipThreshold && MBB != To.getMBB() && !MBB->succ_empty();
+ MBB = *MBB->succ_begin()) {
+
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
+ NumInstr < SkipThreshold && I != E; ++I) {
+
+ if (I->isBundle() || !I->isBundled())
+ ++NumInstr;
+ }
+ }
+
+ if (NumInstr < SkipThreshold)
+ return;
+
+ DebugLoc DL = From.getDebugLoc();
+ BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
+ .addOperand(To)
+ .addReg(AMDGPU::EXEC);
+}
+
void SILowerControlFlowPass::If(MachineInstr &MI) {
MachineBasicBlock &MBB = *MI.getParent();
.addReg(AMDGPU::EXEC)
.addReg(Reg);
+ Skip(MI, MI.getOperand(2));
+
MI.eraseFromParent();
}
.addReg(AMDGPU::EXEC)
.addReg(Dst);
+ Skip(MI, MI.getOperand(2));
+
MI.eraseFromParent();
}
MI.eraseFromParent();
}
+void SILowerControlFlowPass::Branch(MachineInstr &MI) {
+
+ MachineBasicBlock *Next = MI.getParent()->getNextNode();
+ MachineBasicBlock *Target = MI.getOperand(0).getMBB();
+ if (Target == Next)
+ MI.eraseFromParent();
+ else
+ assert(0);
+}
+
bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
bool HaveCf = false;
HaveCf = true;
EndCf(MI);
break;
+
+ case AMDGPU::S_BRANCH:
+ Branch(MI);
+ break;
}
}
}