#define DEBUG_TYPE "misched"
#include "R600MachineScheduler.h"
-#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Pass.h"
#include "llvm/PassManager.h"
#include "llvm/Support/raw_ostream.h"
-#include <set>
using namespace llvm;
TII = static_cast<const R600InstrInfo*>(DAG->TII);
TRI = static_cast<const R600RegisterInfo*>(DAG->TRI);
MRI = &DAG->MRI;
- Available[IDAlu]->clear();
- Available[IDFetch]->clear();
- Available[IDOther]->clear();
CurInstKind = IDOther;
CurEmitted = 0;
- OccupedSlotsMask = 15;
- memset(InstructionsGroupCandidate, 0, sizeof(InstructionsGroupCandidate));
- InstKindLimit[IDAlu] = 120; // 120 minus 8 for security
-
+ OccupedSlotsMask = 31;
+ InstKindLimit[IDAlu] = TII->getMaxAlusPerClause();
+ InstKindLimit[IDOther] = 32;
const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>();
- if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD5XXX) {
- InstKindLimit[IDFetch] = 7; // 8 minus 1 for security
- } else {
- InstKindLimit[IDFetch] = 15; // 16 minus 1 for security
- }
+ InstKindLimit[IDFetch] = ST.getTexVTXClauseSize();
+ AluInstCount = 0;
+ FetchInstCount = 0;
}
-void R600SchedStrategy::MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst)
+void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc,
+ std::vector<SUnit *> &QDst)
{
- if (QSrc->empty())
- return;
- for (ReadyQueue::iterator I = QSrc->begin(),
- E = QSrc->end(); I != E; ++I) {
- (*I)->NodeQueueId &= ~QSrc->getID();
- QDst->push(*I);
- }
- QSrc->clear();
+ QDst.insert(QDst.end(), QSrc.begin(), QSrc.end());
+ QSrc.clear();
+}
+
+static
+unsigned getWFCountLimitedByGPR(unsigned GPRCount) {
+ assert (GPRCount && "GPRCount cannot be 0");
+ return 248 / GPRCount;
}
SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) {
SUnit *SU = 0;
- IsTopNode = true;
NextInstKind = IDOther;
+ IsTopNode = false;
+
// check if we might want to switch current clause type
- bool AllowSwitchToAlu = (CurInstKind == IDOther) ||
- (CurEmitted > InstKindLimit[CurInstKind]) ||
- (Available[CurInstKind]->empty());
- bool AllowSwitchFromAlu = (CurEmitted > InstKindLimit[CurInstKind]) &&
- (!Available[IDFetch]->empty() || !Available[IDOther]->empty());
-
- if ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
- (!AllowSwitchFromAlu && CurInstKind == IDAlu)) {
+ bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) ||
+ (Available[CurInstKind].empty());
+ bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) &&
+ (!Available[IDFetch].empty() || !Available[IDOther].empty());
+
+ if (CurInstKind == IDAlu && !Available[IDFetch].empty()) {
+ // We use the heuristic provided by AMD Accelerated Parallel Processing
+ // OpenCL Programming Guide :
+ // The approx. number of WF that allows TEX inst to hide ALU inst is :
+ // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU))
+ float ALUFetchRationEstimate =
+ (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) /
+ (FetchInstCount + Available[IDFetch].size());
+ unsigned NeededWF = 62.5f / ALUFetchRationEstimate;
+ DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" );
+ // We assume the local GPR requirements to be "dominated" by the requirement
+ // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and
+ // after TEX are indeed likely to consume or generate values from/for the
+ // TEX clause.
+ // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause
+ // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need
+ // one GPR) or TmXYZW = TnXYZW (need 2 GPR).
+ // (TODO : use RegisterPressure)
+ // If we are going too use too many GPR, we flush Fetch instruction to lower
+ // register pressure on 128 bits regs.
+ unsigned NearRegisterRequirement = 2 * Available[IDFetch].size();
+ if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement))
+ AllowSwitchFromAlu = true;
+ }
+
+
+ // We want to scheduled AR defs as soon as possible to make sure they aren't
+ // put in a different ALU clause from their uses.
+ if (!SU && !UnscheduledARDefs.empty()) {
+ SU = UnscheduledARDefs[0];
+ UnscheduledARDefs.erase(UnscheduledARDefs.begin());
+ NextInstKind = IDAlu;
+ }
+
+ if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) ||
+ (!AllowSwitchFromAlu && CurInstKind == IDAlu))) {
// try to pick ALU
SU = pickAlu();
+ if (!SU && !PhysicalRegCopy.empty()) {
+ SU = PhysicalRegCopy.front();
+ PhysicalRegCopy.erase(PhysicalRegCopy.begin());
+ }
if (SU) {
- if (CurEmitted > InstKindLimit[IDAlu])
+ if (CurEmitted >= InstKindLimit[IDAlu])
CurEmitted = 0;
NextInstKind = IDAlu;
}
NextInstKind = IDOther;
}
+ // We want to schedule the AR uses as late as possible to make sure that
+ // the AR defs have been released.
+ if (!SU && !UnscheduledARUses.empty()) {
+ SU = UnscheduledARUses[0];
+ UnscheduledARUses.erase(UnscheduledARUses.begin());
+ NextInstKind = IDAlu;
+ }
+
+
DEBUG(
if (SU) {
- dbgs() << "picked node: ";
+ dbgs() << " ** Pick node **\n";
SU->dump(DAG);
} else {
- dbgs() << "NO NODE ";
- for (int i = 0; i < IDLast; ++i) {
- Available[i]->dump();
- Pending[i]->dump();
- }
+ dbgs() << "NO NODE \n";
for (unsigned i = 0; i < DAG->SUnits.size(); i++) {
const SUnit &S = DAG->SUnits[i];
if (!S.isScheduled)
}
void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) {
-
- DEBUG(dbgs() << "scheduled: ");
- DEBUG(SU->dump(DAG));
-
if (NextInstKind != CurInstKind) {
DEBUG(dbgs() << "Instruction Type Switch\n");
if (NextInstKind != IDAlu)
- OccupedSlotsMask = 15;
+ OccupedSlotsMask |= 31;
CurEmitted = 0;
CurInstKind = NextInstKind;
}
if (CurInstKind == IDAlu) {
+ AluInstCount ++;
switch (getAluKind(SU)) {
case AluT_XYZW:
CurEmitted += 4;
if (CurInstKind != IDFetch) {
MoveUnits(Pending[IDFetch], Available[IDFetch]);
- }
- MoveUnits(Pending[IDOther], Available[IDOther]);
+ } else
+ FetchInstCount++;
}
-void R600SchedStrategy::releaseTopNode(SUnit *SU) {
- int IK = getInstKind(SU);
+static bool
+isPhysicalRegCopy(MachineInstr *MI) {
+ if (MI->getOpcode() != AMDGPU::COPY)
+ return false;
- DEBUG(dbgs() << IK << " <= ");
- DEBUG(SU->dump(DAG));
+ return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg());
+}
- Pending[IK]->push(SU);
+void R600SchedStrategy::releaseTopNode(SUnit *SU) {
+ DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG););
}
void R600SchedStrategy::releaseBottomNode(SUnit *SU) {
+ DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG););
+ if (isPhysicalRegCopy(SU->getInstr())) {
+ PhysicalRegCopy.push_back(SU);
+ return;
+ }
+
+ int IK = getInstKind(SU);
+
+ // Check for AR register defines
+ for (MachineInstr::const_mop_iterator I = SU->getInstr()->operands_begin(),
+ E = SU->getInstr()->operands_end();
+ I != E; ++I) {
+ if (I->isReg() && I->getReg() == AMDGPU::AR_X) {
+ if (I->isDef()) {
+ UnscheduledARDefs.push_back(SU);
+ } else {
+ UnscheduledARUses.push_back(SU);
+ }
+ return;
+ }
+ }
+
+ // There is no export clause, we can schedule one as soon as its ready
+ if (IK == IDOther)
+ Available[IDOther].push_back(SU);
+ else
+ Pending[IK].push_back(SU);
+
}
bool R600SchedStrategy::regBelongsToClass(unsigned Reg,
R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const {
MachineInstr *MI = SU->getInstr();
+ if (TII->isTransOnly(MI))
+ return AluTrans;
+
switch (MI->getOpcode()) {
+ case AMDGPU::PRED_X:
+ return AluPredX;
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
+ case AMDGPU::DOT_4:
return AluT_XYZW;
case AMDGPU::COPY:
- if (TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) {
- // %vregX = COPY Tn_X is likely to be discarded in favor of an
- // assignement of Tn_X to %vregX, don't considers it in scheduling
- return AluDiscarded;
- }
- else if (MI->getOperand(1).isUndef()) {
+ if (MI->getOperand(1).isUndef()) {
// MI will become a KILL, don't considers it in scheduling
return AluDiscarded;
}
}
// Does the instruction take a whole IG ?
+ // XXX: Is it possible to add a helper function in R600InstrInfo that can
+ // be used here and in R600PacketizerList::isSoloInstruction() ?
if(TII->isVector(*MI) ||
TII->isCubeOp(MI->getOpcode()) ||
- TII->isReductionOp(MI->getOpcode()))
+ TII->isReductionOp(MI->getOpcode()) ||
+ MI->getOpcode() == AMDGPU::GROUP_BARRIER) {
return AluT_XYZW;
+ }
+
+ if (TII->isLDSInstr(MI->getOpcode())) {
+ return AluT_X;
+ }
// Is the result already assigned to a channel ?
unsigned DestSubReg = MI->getOperand(0).getSubReg();
int R600SchedStrategy::getInstKind(SUnit* SU) {
int Opcode = SU->getInstr()->getOpcode();
+ if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode))
+ return IDFetch;
+
if (TII->isALUInstr(Opcode)) {
return IDAlu;
}
switch (Opcode) {
+ case AMDGPU::PRED_X:
case AMDGPU::COPY:
case AMDGPU::CONST_COPY:
case AMDGPU::INTERP_PAIR_XY:
case AMDGPU::INTERP_PAIR_ZW:
case AMDGPU::INTERP_VEC_LOAD:
- case AMDGPU::DOT4_eg_pseudo:
- case AMDGPU::DOT4_r600_pseudo:
+ case AMDGPU::DOT_4:
return IDAlu;
- case AMDGPU::TEX_VTX_CONSTBUF:
- case AMDGPU::TEX_VTX_TEXBUF:
- case AMDGPU::TEX_LD:
- case AMDGPU::TEX_GET_TEXTURE_RESINFO:
- case AMDGPU::TEX_GET_GRADIENTS_H:
- case AMDGPU::TEX_GET_GRADIENTS_V:
- case AMDGPU::TEX_SET_GRADIENTS_H:
- case AMDGPU::TEX_SET_GRADIENTS_V:
- case AMDGPU::TEX_SAMPLE:
- case AMDGPU::TEX_SAMPLE_C:
- case AMDGPU::TEX_SAMPLE_L:
- case AMDGPU::TEX_SAMPLE_C_L:
- case AMDGPU::TEX_SAMPLE_LB:
- case AMDGPU::TEX_SAMPLE_C_LB:
- case AMDGPU::TEX_SAMPLE_G:
- case AMDGPU::TEX_SAMPLE_C_G:
- case AMDGPU::TXD:
- case AMDGPU::TXD_SHADOW:
- return IDFetch;
default:
- DEBUG(
- dbgs() << "other inst: ";
- SU->dump(DAG);
- );
return IDOther;
}
}
-class ConstPairs {
-private:
- unsigned XYPair;
- unsigned ZWPair;
-public:
- ConstPairs(unsigned ReadConst[3]) : XYPair(0), ZWPair(0) {
- for (unsigned i = 0; i < 3; i++) {
- unsigned ReadConstChan = ReadConst[i] & 3;
- unsigned ReadConstIndex = ReadConst[i] & (~3);
- if (ReadConstChan < 2) {
- if (!XYPair) {
- XYPair = ReadConstIndex;
- }
- } else {
- if (!ZWPair) {
- ZWPair = ReadConstIndex;
- }
- }
- }
- }
-
- bool isCompatibleWith(const ConstPairs& CP) const {
- return (!XYPair || !CP.XYPair || CP.XYPair == XYPair) &&
- (!ZWPair || !CP.ZWPair || CP.ZWPair == ZWPair);
- }
-};
-
-static
-const ConstPairs getPairs(const R600InstrInfo *TII, const MachineInstr& MI) {
- unsigned ReadConsts[3] = {0, 0, 0};
- R600Operands::Ops OpTable[3][2] = {
- {R600Operands::SRC0, R600Operands::SRC0_SEL},
- {R600Operands::SRC1, R600Operands::SRC1_SEL},
- {R600Operands::SRC2, R600Operands::SRC2_SEL},
- };
-
- if (!TII->isALUInstr(MI.getOpcode()))
- return ConstPairs(ReadConsts);
-
- for (unsigned i = 0; i < 3; i++) {
- int SrcIdx = TII->getOperandIdx(MI.getOpcode(), OpTable[i][0]);
- if (SrcIdx < 0)
- break;
- if (MI.getOperand(SrcIdx).getReg() == AMDGPU::ALU_CONST)
- ReadConsts[i] =MI.getOperand(
- TII->getOperandIdx(MI.getOpcode(), OpTable[i][1])).getImm();
- }
- return ConstPairs(ReadConsts);
-}
-
-bool
-R600SchedStrategy::isBundleable(const MachineInstr& MI) {
- const ConstPairs &MIPair = getPairs(TII, MI);
- for (unsigned i = 0; i < 4; i++) {
- if (!InstructionsGroupCandidate[i])
- continue;
- const ConstPairs &IGPair = getPairs(TII,
- *InstructionsGroupCandidate[i]->getInstr());
- if (!IGPair.isCompatibleWith(MIPair))
- return false;
- }
- return true;
-}
-
-SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) {
+SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q) {
if (Q.empty())
return NULL;
- for (std::set<SUnit *, CompareSUnit>::iterator It = Q.begin(), E = Q.end();
+ for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend();
It != E; ++It) {
SUnit *SU = *It;
- if (isBundleable(*SU->getInstr())) {
- Q.erase(It);
+ InstructionsGroupCandidate.push_back(SU->getInstr());
+ if (TII->fitsConstReadLimitations(InstructionsGroupCandidate)) {
+ InstructionsGroupCandidate.pop_back();
+ Q.erase((It + 1).base());
return SU;
+ } else {
+ InstructionsGroupCandidate.pop_back();
}
}
return NULL;
}
void R600SchedStrategy::LoadAlu() {
- ReadyQueue *QSrc = Pending[IDAlu];
- for (ReadyQueue::iterator I = QSrc->begin(),
- E = QSrc->end(); I != E; ++I) {
- (*I)->NodeQueueId &= ~QSrc->getID();
- AluKind AK = getAluKind(*I);
- AvailableAlus[AK].insert(*I);
- }
- QSrc->clear();
+ std::vector<SUnit *> &QSrc = Pending[IDAlu];
+ for (unsigned i = 0, e = QSrc.size(); i < e; ++i) {
+ AluKind AK = getAluKind(QSrc[i]);
+ AvailableAlus[AK].push_back(QSrc[i]);
+ }
+ QSrc.clear();
}
void R600SchedStrategy::PrepareNextSlot() {
DEBUG(dbgs() << "New Slot\n");
assert (OccupedSlotsMask && "Slot wasn't filled");
OccupedSlotsMask = 0;
- memset(InstructionsGroupCandidate, 0, sizeof(InstructionsGroupCandidate));
+ InstructionsGroupCandidate.clear();
LoadAlu();
}
void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) {
- unsigned DestReg = MI->getOperand(0).getReg();
+ int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ if (DstIndex == -1) {
+ return;
+ }
+ unsigned DestReg = MI->getOperand(DstIndex).getReg();
// PressureRegister crashes if an operand is def and used in the same inst
// and we try to constraint its regclass
for (MachineInstr::mop_iterator It = MI->operands_begin(),
E = MI->operands_end(); It != E; ++It) {
MachineOperand &MO = *It;
if (MO.isReg() && !MO.isDef() &&
- MO.getReg() == MI->getOperand(0).getReg())
+ MO.getReg() == DestReg)
return;
}
// Constrains the regclass of DestReg to assign it to Slot
SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) {
static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W};
SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]);
- SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]);
- if (!UnslotedSU) {
+ if (SlotedSU)
return SlotedSU;
- } else if (!SlotedSU) {
+ SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]);
+ if (UnslotedSU)
AssignSlot(UnslotedSU->getInstr(), Slot);
- return UnslotedSU;
- } else {
- //Determine which one to pick (the lesser one)
- if (CompareSUnit()(SlotedSU, UnslotedSU)) {
- AvailableAlus[AluAny].insert(UnslotedSU);
- return SlotedSU;
- } else {
- AvailableAlus[IndexToID[Slot]].insert(SlotedSU);
- AssignSlot(UnslotedSU->getInstr(), Slot);
- return UnslotedSU;
- }
- }
+ return UnslotedSU;
}
-bool R600SchedStrategy::isAvailablesAluEmpty() const {
- return Pending[IDAlu]->empty() && AvailableAlus[AluAny].empty() &&
- AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() &&
- AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() &&
- AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty();
+unsigned R600SchedStrategy::AvailablesAluCount() const {
+ return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() +
+ AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() +
+ AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() +
+ AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() +
+ AvailableAlus[AluPredX].size();
}
SUnit* R600SchedStrategy::pickAlu() {
- while (!isAvailablesAluEmpty()) {
+ while (AvailablesAluCount() || !Pending[IDAlu].empty()) {
if (!OccupedSlotsMask) {
+ // Bottom up scheduling : predX must comes first
+ if (!AvailableAlus[AluPredX].empty()) {
+ OccupedSlotsMask |= 31;
+ return PopInst(AvailableAlus[AluPredX]);
+ }
// Flush physical reg copies (RA will discard them)
if (!AvailableAlus[AluDiscarded].empty()) {
- OccupedSlotsMask = 15;
+ OccupedSlotsMask |= 31;
return PopInst(AvailableAlus[AluDiscarded]);
}
// If there is a T_XYZW alu available, use it
if (!AvailableAlus[AluT_XYZW].empty()) {
- OccupedSlotsMask = 15;
+ OccupedSlotsMask |= 15;
return PopInst(AvailableAlus[AluT_XYZW]);
}
}
- for (unsigned Chan = 0; Chan < 4; ++Chan) {
+ bool TransSlotOccuped = OccupedSlotsMask & 16;
+ if (!TransSlotOccuped) {
+ if (!AvailableAlus[AluTrans].empty()) {
+ OccupedSlotsMask |= 16;
+ return PopInst(AvailableAlus[AluTrans]);
+ }
+ }
+ for (int Chan = 3; Chan > -1; --Chan) {
bool isOccupied = OccupedSlotsMask & (1 << Chan);
if (!isOccupied) {
SUnit *SU = AttemptFillSlot(Chan);
if (SU) {
OccupedSlotsMask |= (1 << Chan);
- InstructionsGroupCandidate[Chan] = SU;
+ InstructionsGroupCandidate.push_back(SU->getInstr());
return SU;
}
}
SUnit* R600SchedStrategy::pickOther(int QID) {
SUnit *SU = 0;
- ReadyQueue *AQ = Available[QID];
+ std::vector<SUnit *> &AQ = Available[QID];
- if (AQ->empty()) {
+ if (AQ.empty()) {
MoveUnits(Pending[QID], AQ);
}
- if (!AQ->empty()) {
- SU = *AQ->begin();
- AQ->remove(AQ->begin());
+ if (!AQ.empty()) {
+ SU = AQ.back();
+ AQ.resize(AQ.size() - 1);
}
return SU;
}