1 //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Machine Scheduler interface
13 //===----------------------------------------------------------------------===//
15 #include "R600MachineScheduler.h"
16 #include "AMDGPUSubtarget.h"
17 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/Pass.h"
20 #include "llvm/PassManager.h"
21 #include "llvm/Support/raw_ostream.h"
25 #define DEBUG_TYPE "misched"
27 void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
28 assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness");
29 DAG = static_cast<ScheduleDAGMILive*>(dag);
30 TII = static_cast<const R600InstrInfo*>(DAG->TII);
31 TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
32 VLIW5 = !DAG->MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
34 CurInstKind = IDOther;
36 OccupedSlotsMask = 31;
37 InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
38 InstKindLimit[IDOther] = 32;
40 const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
41 InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
46 void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
47 std::vector<SUnit *> &QDst)
49 QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
54 unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
55 assert (GPRCount && "GPRCount cannot be 0");
56 return 248 / GPRCount;
59 SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
61 NextInstKind = IDOther;
65 // check if we might want to switch current clause type
66 bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
67 (Available[CurInstKind].empty());
68 bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
69 (!Available[IDFetch].empty() || !Available[IDOther].empty());
71 if (CurInstKind == IDAlu && !Available[IDFetch].empty()) {
72 // We use the heuristic provided by AMD Accelerated Parallel Processing
73 // OpenCL Programming Guide :
74 // The approx. number of WF that allows TEX inst to hide ALU inst is :
75 // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
76 float ALUFetchRationEstimate =
77 (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
78 (FetchInstCount + Available[IDFetch].size());
79 unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
80 DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
81 // We assume the local GPR requirements to be "dominated" by the requirement
82 // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
83 // after TEX are indeed likely to consume or generate values from/for the
85 // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
86 // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
87 // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
88 // (TODO : use RegisterPressure)
89 // If we are going too use too many GPR, we flush Fetch instruction to lower
90 // register pressure on 128 bits regs.
91 unsigned NearRegisterRequirement = 2 * Available[IDFetch].size();
92 if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement))
93 AllowSwitchFromAlu = true;
96 if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
97 (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
100 if (!SU && !PhysicalRegCopy.empty()) {
101 SU = PhysicalRegCopy.front();
102 PhysicalRegCopy.erase(PhysicalRegCopy.begin());
105 if (CurEmitted >= InstKindLimit[IDAlu])
107 NextInstKind = IDAlu;
113 SU = pickOther(IDFetch);
115 NextInstKind = IDFetch;
120 SU = pickOther(IDOther);
122 NextInstKind = IDOther;
127 dbgs() << " ** Pick node **\n";
130 dbgs() << "NO NODE \n";
131 for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
132 const SUnit &S = DAG->SUnits[i];
142 void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
143 if (NextInstKind != CurInstKind) {
144 DEBUG(dbgs() << "Instruction Type Switch\n");
145 if (NextInstKind != IDAlu)
146 OccupedSlotsMask |= 31;
148 CurInstKind = NextInstKind;
151 if (CurInstKind == IDAlu) {
153 switch (getAluKind(SU)) {
161 for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(),
162 E = SU->getInstr()->operands_end(); It != E; ++It) {
163 MachineOperand &MO = *It;
164 if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
174 DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
176 if (CurInstKind != IDFetch) {
177 MoveUnits(Pending[IDFetch], Available[IDFetch]);
183 isPhysicalRegCopy(MachineInstr *MI) {
184 if (MI->getOpcode() != AMDGPU::COPY)
187 return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
190 void R600SchedStrategy::releaseTopNode(SUnit *SU) {
191 DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
194 void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
195 DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
196 if (isPhysicalRegCopy(SU->getInstr())) {
197 PhysicalRegCopy.push_back(SU);
201 int IK = getInstKind(SU);
203 // There is no export clause, we can schedule one as soon as its ready
205 Available[IDOther].push_back(SU);
207 Pending[IK].push_back(SU);
211 bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
212 const TargetRegisterClass *RC) const {
213 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
214 return RC->contains(Reg);
216 return MRI->getRegClass(Reg) == RC;
220 R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
221 MachineInstr *MI = SU->getInstr();
223 if (TII->isTransOnly(MI))
226 switch (MI->getOpcode()) {
229 case AMDGPU::INTERP_PAIR_XY:
230 case AMDGPU::INTERP_PAIR_ZW:
231 case AMDGPU::INTERP_VEC_LOAD:
235 if (MI->getOperand(1).isUndef()) {
236 // MI will become a KILL, don't considers it in scheduling
243 // Does the instruction take a whole IG ?
244 // XXX: Is it possible to add a helper function in R600InstrInfo that can
245 // be used here and in R600PacketizerList::isSoloInstruction() ?
246 if(TII->isVector(*MI) ||
247 TII->isCubeOp(MI->getOpcode()) ||
248 TII->isReductionOp(MI->getOpcode()) ||
249 MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
253 if (TII->isLDSInstr(MI->getOpcode())) {
257 // Is the result already assigned to a channel ?
258 unsigned DestSubReg = MI->getOperand(0).getSubReg();
259 switch (DestSubReg) {
272 // Is the result already member of a X/Y/Z/W class ?
273 unsigned DestReg = MI->getOperand(0).getReg();
274 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
275 regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
277 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
279 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
281 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
283 if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
286 // LDS src registers cannot be used in the Trans slot.
287 if (TII->readsLDSSrcReg(MI))
294 int R600SchedStrategy::getInstKind(SUnit* SU) {
295 int Opcode = SU->getInstr()->getOpcode();
297 if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
300 if (TII->isALUInstr(Opcode)) {
307 case AMDGPU::CONST_COPY:
308 case AMDGPU::INTERP_PAIR_XY:
309 case AMDGPU::INTERP_PAIR_ZW:
310 case AMDGPU::INTERP_VEC_LOAD:
318 SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
321 for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
324 InstructionsGroupCandidate.push_back(SU->getInstr());
325 if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)
326 && (!AnyALU || !TII->isVectorOnly(SU->getInstr()))
328 InstructionsGroupCandidate.pop_back();
329 Q.erase((It + 1).base());
332 InstructionsGroupCandidate.pop_back();
338 void R600SchedStrategy::LoadAlu() {
339 std::vector<SUnit *> &QSrc = Pending[IDAlu];
340 for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
341 AluKind AK = getAluKind(QSrc[i]);
342 AvailableAlus[AK].push_back(QSrc[i]);
347 void R600SchedStrategy::PrepareNextSlot() {
348 DEBUG(dbgs() << "New Slot\n");
349 assert (OccupedSlotsMask && "Slot wasn't filled");
350 OccupedSlotsMask = 0;
351 // if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS)
352 // OccupedSlotsMask |= 16;
353 InstructionsGroupCandidate.clear();
357 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
358 int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
359 if (DstIndex == -1) {
362 unsigned DestReg = MI->getOperand(DstIndex).getReg();
363 // PressureRegister crashes if an operand is def and used in the same inst
364 // and we try to constraint its regclass
365 for (MachineInstr::mop_iterator It = MI->operands_begin(),
366 E = MI->operands_end(); It != E; ++It) {
367 MachineOperand &MO = *It;
368 if (MO.isReg() && !MO.isDef() &&
369 MO.getReg() == DestReg)
372 // Constrains the regclass of DestReg to assign it to Slot
375 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass);
378 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass);
381 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass);
384 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass);
389 SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot, bool AnyAlu) {
390 static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
391 SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]], AnyAlu);
394 SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny], AnyAlu);
396 AssignSlot(UnslotedSU->getInstr(), Slot);
400 unsigned R600SchedStrategy::AvailablesAluCount() const {
401 return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() +
402 AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() +
403 AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() +
404 AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() +
405 AvailableAlus[AluPredX].size();
408 SUnit* R600SchedStrategy::pickAlu() {
409 while (AvailablesAluCount() || !Pending[IDAlu].empty()) {
410 if (!OccupedSlotsMask) {
411 // Bottom up scheduling : predX must comes first
412 if (!AvailableAlus[AluPredX].empty()) {
413 OccupedSlotsMask |= 31;
414 return PopInst(AvailableAlus[AluPredX], false);
416 // Flush physical reg copies (RA will discard them)
417 if (!AvailableAlus[AluDiscarded].empty()) {
418 OccupedSlotsMask |= 31;
419 return PopInst(AvailableAlus[AluDiscarded], false);
421 // If there is a T_XYZW alu available, use it
422 if (!AvailableAlus[AluT_XYZW].empty()) {
423 OccupedSlotsMask |= 15;
424 return PopInst(AvailableAlus[AluT_XYZW], false);
427 bool TransSlotOccuped = OccupedSlotsMask & 16;
428 if (!TransSlotOccuped && VLIW5) {
429 if (!AvailableAlus[AluTrans].empty()) {
430 OccupedSlotsMask |= 16;
431 return PopInst(AvailableAlus[AluTrans], false);
433 SUnit *SU = AttemptFillSlot(3, true);
435 OccupedSlotsMask |= 16;
439 for (int Chan = 3; Chan > -1; --Chan) {
440 bool isOccupied = OccupedSlotsMask & (1 << Chan);
442 SUnit *SU = AttemptFillSlot(Chan, false);
444 OccupedSlotsMask |= (1 << Chan);
445 InstructionsGroupCandidate.push_back(SU->getInstr());
455 SUnit* R600SchedStrategy::pickOther(int QID) {
457 std::vector<SUnit *> &AQ = Available[QID];
460 MoveUnits(Pending[QID], AQ);
464 AQ.resize(AQ.size() - 1);