1 //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Machine Scheduler interface
12 // TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "misched"
18 #include "R600MachineScheduler.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/Pass.h"
22 #include "llvm/PassManager.h"
23 #include "llvm/Support/raw_ostream.h"
28 void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
31 TII = static_cast<const R600InstrInfo*>(DAG->TII);
32 TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
34 Available[IDAlu]->clear();
35 Available[IDFetch]->clear();
36 Available[IDOther]->clear();
37 CurInstKind = IDOther;
39 OccupedSlotsMask = 15;
40 InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
43 const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
44 InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
47 void R600SchedStrategy::MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst)
51 for (ReadyQueue::iterator I = QSrc->begin(),
52 E = QSrc->end(); I != E; ++I) {
53 (*I)->NodeQueueId &= ~QSrc->getID();
59 SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
62 NextInstKind = IDOther;
64 // check if we might want to switch current clause type
65 bool AllowSwitchToAlu = (CurInstKind == IDOther) ||
66 (CurEmitted >= InstKindLimit[CurInstKind]) ||
67 (Available[CurInstKind]->empty());
68 bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
69 (!Available[IDFetch]->empty() || !Available[IDOther]->empty());
71 if ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
72 (!AllowSwitchFromAlu && CurInstKind == IDAlu)) {
76 if (CurEmitted >= InstKindLimit[IDAlu])
84 SU = pickOther(IDFetch);
86 NextInstKind = IDFetch;
91 SU = pickOther(IDOther);
93 NextInstKind = IDOther;
98 dbgs() << "picked node: ";
101 dbgs() << "NO NODE ";
102 for (int i = 0; i < IDLast; ++i) {
103 Available[i]->dump();
106 for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
107 const SUnit &S = DAG->SUnits[i];
117 void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
119 DEBUG(dbgs() << "scheduled: ");
120 DEBUG(SU->dump(DAG));
122 if (NextInstKind != CurInstKind) {
123 DEBUG(dbgs() << "Instruction Type Switch\n");
124 if (NextInstKind != IDAlu)
125 OccupedSlotsMask = 15;
127 CurInstKind = NextInstKind;
130 if (CurInstKind == IDAlu) {
131 switch (getAluKind(SU)) {
139 for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(),
140 E = SU->getInstr()->operands_end(); It != E; ++It) {
141 MachineOperand &MO = *It;
142 if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
152 DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
154 if (CurInstKind != IDFetch) {
155 MoveUnits(Pending[IDFetch], Available[IDFetch]);
157 MoveUnits(Pending[IDOther], Available[IDOther]);
160 void R600SchedStrategy::releaseTopNode(SUnit *SU) {
161 int IK = getInstKind(SU);
163 DEBUG(dbgs() << IK << " <= ");
164 DEBUG(SU->dump(DAG));
166 Pending[IK]->push(SU);
169 void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
172 bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
173 const TargetRegisterClass *RC) const {
174 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
175 return RC->contains(Reg);
177 return MRI->getRegClass(Reg) == RC;
181 R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
182 MachineInstr *MI = SU->getInstr();
184 switch (MI->getOpcode()) {
185 case AMDGPU::INTERP_PAIR_XY:
186 case AMDGPU::INTERP_PAIR_ZW:
187 case AMDGPU::INTERP_VEC_LOAD:
190 if (TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) {
191 // %vregX = COPY Tn_X is likely to be discarded in favor of an
192 // assignement of Tn_X to %vregX, don't considers it in scheduling
195 else if (MI->getOperand(1).isUndef()) {
196 // MI will become a KILL, don't considers it in scheduling
203 // Does the instruction take a whole IG ?
204 if(TII->isVector(*MI) ||
205 TII->isCubeOp(MI->getOpcode()) ||
206 TII->isReductionOp(MI->getOpcode()))
209 // Is the result already assigned to a channel ?
210 unsigned DestSubReg = MI->getOperand(0).getSubReg();
211 switch (DestSubReg) {
224 // Is the result already member of a X/Y/Z/W class ?
225 unsigned DestReg = MI->getOperand(0).getReg();
226 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
227 regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
229 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
231 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
233 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
235 if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
242 int R600SchedStrategy::getInstKind(SUnit* SU) {
243 int Opcode = SU->getInstr()->getOpcode();
245 if (TII->isALUInstr(Opcode)) {
251 case AMDGPU::CONST_COPY:
252 case AMDGPU::INTERP_PAIR_XY:
253 case AMDGPU::INTERP_PAIR_ZW:
254 case AMDGPU::INTERP_VEC_LOAD:
255 case AMDGPU::DOT4_eg_pseudo:
256 case AMDGPU::DOT4_r600_pseudo:
258 case AMDGPU::TEX_VTX_CONSTBUF:
259 case AMDGPU::TEX_VTX_TEXBUF:
261 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
262 case AMDGPU::TEX_GET_GRADIENTS_H:
263 case AMDGPU::TEX_GET_GRADIENTS_V:
264 case AMDGPU::TEX_SET_GRADIENTS_H:
265 case AMDGPU::TEX_SET_GRADIENTS_V:
266 case AMDGPU::TEX_SAMPLE:
267 case AMDGPU::TEX_SAMPLE_C:
268 case AMDGPU::TEX_SAMPLE_L:
269 case AMDGPU::TEX_SAMPLE_C_L:
270 case AMDGPU::TEX_SAMPLE_LB:
271 case AMDGPU::TEX_SAMPLE_C_LB:
272 case AMDGPU::TEX_SAMPLE_G:
273 case AMDGPU::TEX_SAMPLE_C_G:
275 case AMDGPU::TXD_SHADOW:
279 dbgs() << "other inst: ";
286 SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) {
289 for (std::set<SUnit *, CompareSUnit>::iterator It = Q.begin(), E = Q.end();
292 InstructionsGroupCandidate.push_back(SU->getInstr());
293 if (TII->canBundle(InstructionsGroupCandidate)) {
294 InstructionsGroupCandidate.pop_back();
298 InstructionsGroupCandidate.pop_back();
304 void R600SchedStrategy::LoadAlu() {
305 ReadyQueue *QSrc = Pending[IDAlu];
306 for (ReadyQueue::iterator I = QSrc->begin(),
307 E = QSrc->end(); I != E; ++I) {
308 (*I)->NodeQueueId &= ~QSrc->getID();
309 AluKind AK = getAluKind(*I);
310 AvailableAlus[AK].insert(*I);
315 void R600SchedStrategy::PrepareNextSlot() {
316 DEBUG(dbgs() << "New Slot\n");
317 assert (OccupedSlotsMask && "Slot wasn't filled");
318 OccupedSlotsMask = 0;
319 InstructionsGroupCandidate.clear();
323 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
324 unsigned DestReg = MI->getOperand(0).getReg();
325 // PressureRegister crashes if an operand is def and used in the same inst
326 // and we try to constraint its regclass
327 for (MachineInstr::mop_iterator It = MI->operands_begin(),
328 E = MI->operands_end(); It != E; ++It) {
329 MachineOperand &MO = *It;
330 if (MO.isReg() && !MO.isDef() &&
331 MO.getReg() == MI->getOperand(0).getReg())
334 // Constrains the regclass of DestReg to assign it to Slot
337 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass);
340 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass);
343 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass);
346 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass);
351 SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) {
352 static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
353 SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]);
354 SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]);
357 } else if (!SlotedSU) {
358 AssignSlot(UnslotedSU->getInstr(), Slot);
361 //Determine which one to pick (the lesser one)
362 if (CompareSUnit()(SlotedSU, UnslotedSU)) {
363 AvailableAlus[AluAny].insert(UnslotedSU);
366 AvailableAlus[IndexToID[Slot]].insert(SlotedSU);
367 AssignSlot(UnslotedSU->getInstr(), Slot);
373 bool R600SchedStrategy::isAvailablesAluEmpty() const {
374 return Pending[IDAlu]->empty() && AvailableAlus[AluAny].empty() &&
375 AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() &&
376 AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() &&
377 AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty();
380 SUnit* R600SchedStrategy::pickAlu() {
381 while (!isAvailablesAluEmpty()) {
382 if (!OccupedSlotsMask) {
383 // Flush physical reg copies (RA will discard them)
384 if (!AvailableAlus[AluDiscarded].empty()) {
385 OccupedSlotsMask = 15;
386 return PopInst(AvailableAlus[AluDiscarded]);
388 // If there is a T_XYZW alu available, use it
389 if (!AvailableAlus[AluT_XYZW].empty()) {
390 OccupedSlotsMask = 15;
391 return PopInst(AvailableAlus[AluT_XYZW]);
394 for (unsigned Chan = 0; Chan < 4; ++Chan) {
395 bool isOccupied = OccupedSlotsMask & (1 << Chan);
397 SUnit *SU = AttemptFillSlot(Chan);
399 OccupedSlotsMask |= (1 << Chan);
400 InstructionsGroupCandidate.push_back(SU->getInstr());
410 SUnit* R600SchedStrategy::pickOther(int QID) {
412 ReadyQueue *AQ = Available[QID];
415 MoveUnits(Pending[QID], AQ);
419 AQ->remove(AQ->begin());