1 //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief R600 Machine Scheduler interface
12 // TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "misched"
18 #include "R600MachineScheduler.h"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/Pass.h"
22 #include "llvm/PassManager.h"
23 #include "llvm/Support/raw_ostream.h"
27 void R600SchedStrategy::initialize(ScheduleDAGMI *dag) {
30 TII = static_cast<const R600InstrInfo*>(DAG->TII);
31 TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
33 CurInstKind = IDOther;
35 OccupedSlotsMask = 15;
36 InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
37 InstKindLimit[IDOther] = 32;
39 const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
40 InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
43 void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
44 std::vector<SUnit *> &QDst)
46 QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
50 SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
52 NextInstKind = IDOther;
56 // check if we might want to switch current clause type
57 bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
58 (Available[CurInstKind].empty());
59 bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
60 (!Available[IDFetch].empty() || !Available[IDOther].empty());
62 // We want to scheduled AR defs as soon as possible to make sure they aren't
63 // put in a different ALU clause from their uses.
64 if (!SU && !UnscheduledARDefs.empty()) {
65 SU = UnscheduledARDefs[0];
66 UnscheduledARDefs.erase(UnscheduledARDefs.begin());
70 if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
71 (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
75 if (CurEmitted >= InstKindLimit[IDAlu])
83 SU = pickOther(IDFetch);
85 NextInstKind = IDFetch;
90 SU = pickOther(IDOther);
92 NextInstKind = IDOther;
95 // We want to schedule the AR uses as late as possible to make sure that
96 // the AR defs have been released.
97 if (!SU && !UnscheduledARUses.empty()) {
98 SU = UnscheduledARUses[0];
99 UnscheduledARUses.erase(UnscheduledARUses.begin());
100 NextInstKind = IDAlu;
106 dbgs() << " ** Pick node **\n";
109 dbgs() << "NO NODE \n";
110 for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
111 const SUnit &S = DAG->SUnits[i];
121 void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
123 if (NextInstKind != CurInstKind) {
124 DEBUG(dbgs() << "Instruction Type Switch\n");
125 if (NextInstKind != IDAlu)
126 OccupedSlotsMask = 15;
128 CurInstKind = NextInstKind;
131 if (CurInstKind == IDAlu) {
132 switch (getAluKind(SU)) {
140 for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(),
141 E = SU->getInstr()->operands_end(); It != E; ++It) {
142 MachineOperand &MO = *It;
143 if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X)
153 DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n");
155 if (CurInstKind != IDFetch) {
156 MoveUnits(Pending[IDFetch], Available[IDFetch]);
160 void R600SchedStrategy::releaseTopNode(SUnit *SU) {
161 DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
165 void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
166 DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
168 int IK = getInstKind(SU);
170 // Check for AR register defines
171 for (MachineInstr::const_mop_iterator I = SU->getInstr()->operands_begin(),
172 E = SU->getInstr()->operands_end();
174 if (I->isReg() && I->getReg() == AMDGPU::AR_X) {
176 UnscheduledARDefs.push_back(SU);
178 UnscheduledARUses.push_back(SU);
184 // There is no export clause, we can schedule one as soon as its ready
186 Available[IDOther].push_back(SU);
188 Pending[IK].push_back(SU);
192 bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
193 const TargetRegisterClass *RC) const {
194 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
195 return RC->contains(Reg);
197 return MRI->getRegClass(Reg) == RC;
201 R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
202 MachineInstr *MI = SU->getInstr();
204 switch (MI->getOpcode()) {
207 case AMDGPU::INTERP_PAIR_XY:
208 case AMDGPU::INTERP_PAIR_ZW:
209 case AMDGPU::INTERP_VEC_LOAD:
213 if (MI->getOperand(1).isUndef()) {
214 // MI will become a KILL, don't considers it in scheduling
221 // Does the instruction take a whole IG ?
222 if(TII->isVector(*MI) ||
223 TII->isCubeOp(MI->getOpcode()) ||
224 TII->isReductionOp(MI->getOpcode()))
227 // Is the result already assigned to a channel ?
228 unsigned DestSubReg = MI->getOperand(0).getSubReg();
229 switch (DestSubReg) {
242 // Is the result already member of a X/Y/Z/W class ?
243 unsigned DestReg = MI->getOperand(0).getReg();
244 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) ||
245 regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass))
247 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass))
249 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass))
251 if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass))
253 if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass))
260 int R600SchedStrategy::getInstKind(SUnit* SU) {
261 int Opcode = SU->getInstr()->getOpcode();
263 if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
266 if (TII->isALUInstr(Opcode)) {
273 case AMDGPU::CONST_COPY:
274 case AMDGPU::INTERP_PAIR_XY:
275 case AMDGPU::INTERP_PAIR_ZW:
276 case AMDGPU::INTERP_VEC_LOAD:
284 SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q) {
287 for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
290 InstructionsGroupCandidate.push_back(SU->getInstr());
291 if (TII->canBundle(InstructionsGroupCandidate)) {
292 InstructionsGroupCandidate.pop_back();
293 Q.erase((It + 1).base());
296 InstructionsGroupCandidate.pop_back();
302 void R600SchedStrategy::LoadAlu() {
303 std::vector<SUnit *> &QSrc = Pending[IDAlu];
304 for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
305 AluKind AK = getAluKind(QSrc[i]);
306 AvailableAlus[AK].push_back(QSrc[i]);
311 void R600SchedStrategy::PrepareNextSlot() {
312 DEBUG(dbgs() << "New Slot\n");
313 assert (OccupedSlotsMask && "Slot wasn't filled");
314 OccupedSlotsMask = 0;
315 InstructionsGroupCandidate.clear();
319 void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
320 unsigned DestReg = MI->getOperand(0).getReg();
321 // PressureRegister crashes if an operand is def and used in the same inst
322 // and we try to constraint its regclass
323 for (MachineInstr::mop_iterator It = MI->operands_begin(),
324 E = MI->operands_end(); It != E; ++It) {
325 MachineOperand &MO = *It;
326 if (MO.isReg() && !MO.isDef() &&
327 MO.getReg() == MI->getOperand(0).getReg())
330 // Constrains the regclass of DestReg to assign it to Slot
333 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass);
336 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass);
339 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass);
342 MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass);
347 SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) {
348 static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
349 SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]);
352 SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]);
354 AssignSlot(UnslotedSU->getInstr(), Slot);
358 bool R600SchedStrategy::isAvailablesAluEmpty() const {
359 return Pending[IDAlu].empty() && AvailableAlus[AluAny].empty() &&
360 AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() &&
361 AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() &&
362 AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty() &&
363 AvailableAlus[AluPredX].empty();
366 SUnit* R600SchedStrategy::pickAlu() {
367 while (!isAvailablesAluEmpty()) {
368 if (!OccupedSlotsMask) {
369 // Bottom up scheduling : predX must comes first
370 if (!AvailableAlus[AluPredX].empty()) {
371 OccupedSlotsMask = 15;
372 return PopInst(AvailableAlus[AluPredX]);
374 // Flush physical reg copies (RA will discard them)
375 if (!AvailableAlus[AluDiscarded].empty()) {
376 OccupedSlotsMask = 15;
377 return PopInst(AvailableAlus[AluDiscarded]);
379 // If there is a T_XYZW alu available, use it
380 if (!AvailableAlus[AluT_XYZW].empty()) {
381 OccupedSlotsMask = 15;
382 return PopInst(AvailableAlus[AluT_XYZW]);
385 for (int Chan = 3; Chan > -1; --Chan) {
386 bool isOccupied = OccupedSlotsMask & (1 << Chan);
388 SUnit *SU = AttemptFillSlot(Chan);
390 OccupedSlotsMask |= (1 << Chan);
391 InstructionsGroupCandidate.push_back(SU->getInstr());
401 SUnit* R600SchedStrategy::pickOther(int QID) {
403 std::vector<SUnit *> &AQ = Available[QID];
406 MoveUnits(Pending[QID], AQ);
410 AQ.resize(AQ.size() - 1);