1 //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Machine Scheduler interface
12 // TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "misched"
18 #include "R600MachineScheduler.h"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Pass.h"
22 #include "llvm/PassManager.h"
23 #include "llvm/Support/raw_ostream.h"
27 void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
30 TII = static_cast<const R600InstrInfo*>(DAG->TII);
31 TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
33 CurInstKind = IDOther;
35 OccupedSlotsMask = 15;
36 InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
37 InstKindLimit[IDOther] = 32;
39 const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
40 InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
45 void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
46 std::vector<SUnit *> &QDst)
48 QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
53 unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
54 assert (GPRCount && "GPRCount cannot be 0");
55 return 248 / GPRCount;
58 SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
60 NextInstKind = IDOther;
64 // check if we might want to switch current clause type
65 bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
66 (Available[CurInstKind].empty());
67 bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
68 (!Available[IDFetch].empty() || !Available[IDOther].empty());
70 if (CurInstKind == IDAlu && !Available[IDFetch].empty()) {
71 // We use the heuristic provided by AMD Accelerated Parallel Processing
72 // OpenCL Programming Guide :
73 // The approx. number of WF that allows TEX inst to hide ALU inst is :
74 // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
75 float ALUFetchRationEstimate =
76 (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
77 (FetchInstCount + Available[IDFetch].size());
78 unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
79 DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
80 // We assume the local GPR requirements to be "dominated" by the requirement
81 // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
82 // after TEX are indeed likely to consume or generate values from/for the
84 // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
85 // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
86 // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
87 // (TODO : use RegisterPressure)
88 // If we are going too use too many GPR, we flush Fetch instruction to lower
89 // register pressure on 128 bits regs.
90 unsigned NearRegisterRequirement = 2 * Available[IDFetch].size();
91 if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement))
92 AllowSwitchFromAlu = true;
96 // We want to scheduled AR defs as soon as possible to make sure they aren't
97 // put in a different ALU clause from their uses.
98 if (!SU && !UnscheduledARDefs.empty()) {
99 SU = UnscheduledARDefs[0];
100 UnscheduledARDefs.erase(UnscheduledARDefs.begin());
101 NextInstKind = IDAlu;
104 if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
105 (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
108 if (!SU && !PhysicalRegCopy.empty()) {
109 SU = PhysicalRegCopy.front();
110 PhysicalRegCopy.erase(PhysicalRegCopy.begin());
113 if (CurEmitted >= InstKindLimit[IDAlu])
115 NextInstKind = IDAlu;
121 SU = pickOther(IDFetch);
123 NextInstKind = IDFetch;
128 SU = pickOther(IDOther);
130 NextInstKind = IDOther;
133 // We want to schedule the AR uses as late as possible to make sure that
134 // the AR defs have been released.
135 if (!SU && !UnscheduledARUses.empty()) {
136 SU = UnscheduledARUses[0];
137 UnscheduledARUses.erase(UnscheduledARUses.begin());
138 NextInstKind = IDAlu;
144 dbgs() << " ** Pick node **\n";
147 dbgs() << "NO NODE \n";
148 for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
149 const SUnit &S = DAG->SUnits[i];
159 void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
160 if (NextInstKind != CurInstKind) {
161 DEBUG(dbgs() << "Instruction Type Switch\n");
162 if (NextInstKind != IDAlu)
163 OccupedSlotsMask = 15;
165 CurInstKind = NextInstKind;
168 if (CurInstKind == IDAlu) {
170 switch (getAluKind(SU)) {
178 for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(),
179 E = SU->getInstr()->operands_end(); It != E; ++It) {
180 MachineOperand &MO = *It;
181 if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
191 DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
193 if (CurInstKind != IDFetch) {
194 MoveUnits(Pending[IDFetch], Available[IDFetch]);
200 isPhysicalRegCopy(MachineInstr *MI) {
201 if (MI->getOpcode() != AMDGPU::COPY)
204 return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
207 void R600SchedStrategy::releaseTopNode(SUnit *SU) {
208 DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
211 void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
212 DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
213 if (isPhysicalRegCopy(SU->getInstr())) {
214 PhysicalRegCopy.push_back(SU);
218 int IK = getInstKind(SU);
220 // Check for AR register defines
221 for (MachineInstr::const_mop_iterator I = SU->getInstr()->operands_begin(),
222 E = SU->getInstr()->operands_end();
224 if (I->isReg() && I->getReg() == AMDGPU::AR_X) {
226 UnscheduledARDefs.push_back(SU);
228 UnscheduledARUses.push_back(SU);
234 // There is no export clause, we can schedule one as soon as its ready
236 Available[IDOther].push_back(SU);
238 Pending[IK].push_back(SU);
242 bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
243 const TargetRegisterClass *RC) const {
244 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
245 return RC->contains(Reg);
247 return MRI->getRegClass(Reg) == RC;
251 R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
252 MachineInstr *MI = SU->getInstr();
254 switch (MI->getOpcode()) {
257 case AMDGPU::INTERP_PAIR_XY:
258 case AMDGPU::INTERP_PAIR_ZW:
259 case AMDGPU::INTERP_VEC_LOAD:
263 if (MI->getOperand(1).isUndef()) {
264 // MI will become a KILL, don't considers it in scheduling
271 // Does the instruction take a whole IG ?
272 // XXX: Is it possible to add a helper function in R600InstrInfo that can
273 // be used here and in R600PacketizerList::isSoloInstruction() ?
274 if(TII->isVector(*MI) ||
275 TII->isCubeOp(MI->getOpcode()) ||
276 TII->isReductionOp(MI->getOpcode()) ||
277 MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
281 // Is the result already assigned to a channel ?
282 unsigned DestSubReg = MI->getOperand(0).getSubReg();
283 switch (DestSubReg) {
296 // Is the result already member of a X/Y/Z/W class ?
297 unsigned DestReg = MI->getOperand(0).getReg();
298 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
299 regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
301 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
303 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
305 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
307 if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
314 int R600SchedStrategy::getInstKind(SUnit* SU) {
315 int Opcode = SU->getInstr()->getOpcode();
317 if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
320 if (TII->isALUInstr(Opcode)) {
327 case AMDGPU::CONST_COPY:
328 case AMDGPU::INTERP_PAIR_XY:
329 case AMDGPU::INTERP_PAIR_ZW:
330 case AMDGPU::INTERP_VEC_LOAD:
338 SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q) {
341 for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
344 InstructionsGroupCandidate.push_back(SU->getInstr());
345 if (TII->canBundle(InstructionsGroupCandidate)) {
346 InstructionsGroupCandidate.pop_back();
347 Q.erase((It + 1).base());
350 InstructionsGroupCandidate.pop_back();
356 void R600SchedStrategy::LoadAlu() {
357 std::vector<SUnit *> &QSrc = Pending[IDAlu];
358 for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
359 AluKind AK = getAluKind(QSrc[i]);
360 AvailableAlus[AK].push_back(QSrc[i]);
365 void R600SchedStrategy::PrepareNextSlot() {
366 DEBUG(dbgs() << "New Slot\n");
367 assert (OccupedSlotsMask && "Slot wasn't filled");
368 OccupedSlotsMask = 0;
369 InstructionsGroupCandidate.clear();
373 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
374 unsigned DestReg = MI->getOperand(0).getReg();
375 // PressureRegister crashes if an operand is def and used in the same inst
376 // and we try to constraint its regclass
377 for (MachineInstr::mop_iterator It = MI->operands_begin(),
378 E = MI->operands_end(); It != E; ++It) {
379 MachineOperand &MO = *It;
380 if (MO.isReg() && !MO.isDef() &&
381 MO.getReg() == MI->getOperand(0).getReg())
384 // Constrains the regclass of DestReg to assign it to Slot
387 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass);
390 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass);
393 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass);
396 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass);
401 SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) {
402 static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
403 SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]);
406 SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]);
408 AssignSlot(UnslotedSU->getInstr(), Slot);
412 unsigned R600SchedStrategy::AvailablesAluCount() const {
413 return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() +
414 AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() +
415 AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() +
416 AvailableAlus[AluDiscarded].size() + AvailableAlus[AluPredX].size();
419 SUnit* R600SchedStrategy::pickAlu() {
420 while (AvailablesAluCount() || !Pending[IDAlu].empty()) {
421 if (!OccupedSlotsMask) {
422 // Bottom up scheduling : predX must comes first
423 if (!AvailableAlus[AluPredX].empty()) {
424 OccupedSlotsMask = 15;
425 return PopInst(AvailableAlus[AluPredX]);
427 // Flush physical reg copies (RA will discard them)
428 if (!AvailableAlus[AluDiscarded].empty()) {
429 OccupedSlotsMask = 15;
430 return PopInst(AvailableAlus[AluDiscarded]);
432 // If there is a T_XYZW alu available, use it
433 if (!AvailableAlus[AluT_XYZW].empty()) {
434 OccupedSlotsMask = 15;
435 return PopInst(AvailableAlus[AluT_XYZW]);
438 for (int Chan = 3; Chan > -1; --Chan) {
439 bool isOccupied = OccupedSlotsMask & (1 << Chan);
441 SUnit *SU = AttemptFillSlot(Chan);
443 OccupedSlotsMask |= (1 << Chan);
444 InstructionsGroupCandidate.push_back(SU->getInstr());
454 SUnit* R600SchedStrategy::pickOther(int QID) {
456 std::vector<SUnit *> &AQ = Available[QID];
459 MoveUnits(Pending[QID], AQ);
463 AQ.resize(AQ.size() - 1);