1 //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Machine Scheduler interface
13 //===----------------------------------------------------------------------===//
15 #include "R600MachineScheduler.h"
16 #include "AMDGPUSubtarget.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Pass.h"
19 #include "llvm/PassManager.h"
20 #include "llvm/Support/raw_ostream.h"
24 #define DEBUG_TYPE "misched"
26 void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
27 assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness");
28 DAG = static_cast<ScheduleDAGMILive*>(dag);
29 TII = static_cast<const R600InstrInfo*>(DAG->TII);
30 TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
31 VLIW5 = !DAG->MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA();
33 CurInstKind = IDOther;
35 OccupedSlotsMask = 31;
36 InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
37 InstKindLimit[IDOther] = 32;
39 const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
40 InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
45 void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
46 std::vector<SUnit *> &QDst)
48 QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
53 unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
54 assert (GPRCount && "GPRCount cannot be 0");
55 return 248 / GPRCount;
58 SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
60 NextInstKind = IDOther;
64 // check if we might want to switch current clause type
65 bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
66 (Available[CurInstKind].empty());
67 bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
68 (!Available[IDFetch].empty() || !Available[IDOther].empty());
70 if (CurInstKind == IDAlu && !Available[IDFetch].empty()) {
71 // We use the heuristic provided by AMD Accelerated Parallel Processing
72 // OpenCL Programming Guide :
73 // The approx. number of WF that allows TEX inst to hide ALU inst is :
74 // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
75 float ALUFetchRationEstimate =
76 (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
77 (FetchInstCount + Available[IDFetch].size());
78 if (ALUFetchRationEstimate == 0) {
79 AllowSwitchFromAlu = true;
81 unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
82 DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
83 // We assume the local GPR requirements to be "dominated" by the requirement
84 // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
85 // after TEX are indeed likely to consume or generate values from/for the
87 // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
88 // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
89 // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
90 // (TODO : use RegisterPressure)
91 // If we are going too use too many GPR, we flush Fetch instruction to lower
92 // register pressure on 128 bits regs.
93 unsigned NearRegisterRequirement = 2 * Available[IDFetch].size();
94 if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement))
95 AllowSwitchFromAlu = true;
99 if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
100 (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
103 if (!SU && !PhysicalRegCopy.empty()) {
104 SU = PhysicalRegCopy.front();
105 PhysicalRegCopy.erase(PhysicalRegCopy.begin());
108 if (CurEmitted >= InstKindLimit[IDAlu])
110 NextInstKind = IDAlu;
116 SU = pickOther(IDFetch);
118 NextInstKind = IDFetch;
123 SU = pickOther(IDOther);
125 NextInstKind = IDOther;
130 dbgs() << " ** Pick node **\n";
133 dbgs() << "NO NODE \n";
134 for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
135 const SUnit &S = DAG->SUnits[i];
145 void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
146 if (NextInstKind != CurInstKind) {
147 DEBUG(dbgs() << "Instruction Type Switch\n");
148 if (NextInstKind != IDAlu)
149 OccupedSlotsMask |= 31;
151 CurInstKind = NextInstKind;
154 if (CurInstKind == IDAlu) {
156 switch (getAluKind(SU)) {
164 for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(),
165 E = SU->getInstr()->operands_end(); It != E; ++It) {
166 MachineOperand &MO = *It;
167 if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
177 DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
179 if (CurInstKind != IDFetch) {
180 MoveUnits(Pending[IDFetch], Available[IDFetch]);
186 isPhysicalRegCopy(MachineInstr *MI) {
187 if (MI->getOpcode() != AMDGPU::COPY)
190 return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
193 void R600SchedStrategy::releaseTopNode(SUnit *SU) {
194 DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
197 void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
198 DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
199 if (isPhysicalRegCopy(SU->getInstr())) {
200 PhysicalRegCopy.push_back(SU);
204 int IK = getInstKind(SU);
206 // There is no export clause, we can schedule one as soon as its ready
208 Available[IDOther].push_back(SU);
210 Pending[IK].push_back(SU);
214 bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
215 const TargetRegisterClass *RC) const {
216 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
217 return RC->contains(Reg);
219 return MRI->getRegClass(Reg) == RC;
223 R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
224 MachineInstr *MI = SU->getInstr();
226 if (TII->isTransOnly(MI))
229 switch (MI->getOpcode()) {
232 case AMDGPU::INTERP_PAIR_XY:
233 case AMDGPU::INTERP_PAIR_ZW:
234 case AMDGPU::INTERP_VEC_LOAD:
238 if (MI->getOperand(1).isUndef()) {
239 // MI will become a KILL, don't considers it in scheduling
246 // Does the instruction take a whole IG ?
247 // XXX: Is it possible to add a helper function in R600InstrInfo that can
248 // be used here and in R600PacketizerList::isSoloInstruction() ?
249 if(TII->isVector(*MI) ||
250 TII->isCubeOp(MI->getOpcode()) ||
251 TII->isReductionOp(MI->getOpcode()) ||
252 MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
256 if (TII->isLDSInstr(MI->getOpcode())) {
260 // Is the result already assigned to a channel ?
261 unsigned DestSubReg = MI->getOperand(0).getSubReg();
262 switch (DestSubReg) {
275 // Is the result already member of a X/Y/Z/W class ?
276 unsigned DestReg = MI->getOperand(0).getReg();
277 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
278 regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
280 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
282 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
284 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
286 if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
289 // LDS src registers cannot be used in the Trans slot.
290 if (TII->readsLDSSrcReg(MI))
297 int R600SchedStrategy::getInstKind(SUnit* SU) {
298 int Opcode = SU->getInstr()->getOpcode();
300 if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
303 if (TII->isALUInstr(Opcode)) {
310 case AMDGPU::CONST_COPY:
311 case AMDGPU::INTERP_PAIR_XY:
312 case AMDGPU::INTERP_PAIR_ZW:
313 case AMDGPU::INTERP_VEC_LOAD:
321 SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) {
324 for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
327 InstructionsGroupCandidate.push_back(SU->getInstr());
328 if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)
329 && (!AnyALU || !TII->isVectorOnly(SU->getInstr()))
331 InstructionsGroupCandidate.pop_back();
332 Q.erase((It + 1).base());
335 InstructionsGroupCandidate.pop_back();
341 void R600SchedStrategy::LoadAlu() {
342 std::vector<SUnit *> &QSrc = Pending[IDAlu];
343 for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
344 AluKind AK = getAluKind(QSrc[i]);
345 AvailableAlus[AK].push_back(QSrc[i]);
350 void R600SchedStrategy::PrepareNextSlot() {
351 DEBUG(dbgs() << "New Slot\n");
352 assert (OccupedSlotsMask && "Slot wasn't filled");
353 OccupedSlotsMask = 0;
354 // if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS)
355 // OccupedSlotsMask |= 16;
356 InstructionsGroupCandidate.clear();
360 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
361 int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
362 if (DstIndex == -1) {
365 unsigned DestReg = MI->getOperand(DstIndex).getReg();
366 // PressureRegister crashes if an operand is def and used in the same inst
367 // and we try to constraint its regclass
368 for (MachineInstr::mop_iterator It = MI->operands_begin(),
369 E = MI->operands_end(); It != E; ++It) {
370 MachineOperand &MO = *It;
371 if (MO.isReg() && !MO.isDef() &&
372 MO.getReg() == DestReg)
375 // Constrains the regclass of DestReg to assign it to Slot
378 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass);
381 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass);
384 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass);
387 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass);
392 SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot, bool AnyAlu) {
393 static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
394 SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]], AnyAlu);
397 SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny], AnyAlu);
399 AssignSlot(UnslotedSU->getInstr(), Slot);
403 unsigned R600SchedStrategy::AvailablesAluCount() const {
404 return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() +
405 AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() +
406 AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() +
407 AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() +
408 AvailableAlus[AluPredX].size();
411 SUnit* R600SchedStrategy::pickAlu() {
412 while (AvailablesAluCount() || !Pending[IDAlu].empty()) {
413 if (!OccupedSlotsMask) {
414 // Bottom up scheduling : predX must comes first
415 if (!AvailableAlus[AluPredX].empty()) {
416 OccupedSlotsMask |= 31;
417 return PopInst(AvailableAlus[AluPredX], false);
419 // Flush physical reg copies (RA will discard them)
420 if (!AvailableAlus[AluDiscarded].empty()) {
421 OccupedSlotsMask |= 31;
422 return PopInst(AvailableAlus[AluDiscarded], false);
424 // If there is a T_XYZW alu available, use it
425 if (!AvailableAlus[AluT_XYZW].empty()) {
426 OccupedSlotsMask |= 15;
427 return PopInst(AvailableAlus[AluT_XYZW], false);
430 bool TransSlotOccuped = OccupedSlotsMask & 16;
431 if (!TransSlotOccuped && VLIW5) {
432 if (!AvailableAlus[AluTrans].empty()) {
433 OccupedSlotsMask |= 16;
434 return PopInst(AvailableAlus[AluTrans], false);
436 SUnit *SU = AttemptFillSlot(3, true);
438 OccupedSlotsMask |= 16;
442 for (int Chan = 3; Chan > -1; --Chan) {
443 bool isOccupied = OccupedSlotsMask & (1 << Chan);
445 SUnit *SU = AttemptFillSlot(Chan, false);
447 OccupedSlotsMask |= (1 << Chan);
448 InstructionsGroupCandidate.push_back(SU->getInstr());
458 SUnit* R600SchedStrategy::pickOther(int QID) {
460 std::vector<SUnit *> &AQ = Available[QID];
463 MoveUnits(Pending[QID], AQ);
467 AQ.resize(AQ.size() - 1);