unsigned NumSuccs; // # of SDep::Data sucss.
unsigned NumPredsLeft; // # of preds not scheduled.
unsigned NumSuccsLeft; // # of succs not scheduled.
+ bool isCall : 1; // Is a function call.
bool isTwoAddress : 1; // Is a two-address instruction.
bool isCommutable : 1; // Is a commutable instruction.
bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
SUnit(SDNode *node, unsigned nodenum)
: Node(node), Instr(0), OrigNode(0), NodeNum(nodenum),
NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ NumSuccsLeft(0),
+ isCall(false), isTwoAddress(false), isCommutable(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
SUnit(MachineInstr *instr, unsigned nodenum)
: Node(0), Instr(instr), OrigNode(0), NodeNum(nodenum),
NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ NumSuccsLeft(0),
+ isCall(false), isTwoAddress(false), isCommutable(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
SUnit()
: Node(0), Instr(0), OrigNode(0), NodeNum(~0u),
NodeQueueId(0), Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
- NumSuccsLeft(0), isTwoAddress(false), isCommutable(false),
+ NumSuccsLeft(0),
+ isCall(false), isTwoAddress(false), isCommutable(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isCloned(false),
return true;
}
- /// isProfitableToIfCvt - Return true if it's profitable to first "NumInstrs"
+ /// isProfitableToIfCvt - Return true if it's profitable to predicate
+ /// instructions with accumulated instruction latency of "NumCycles"
/// of the specified basic block, where the probability of the instructions
/// being executed is given by Probability, and Confidence is a measure
/// of our confidence that it will be properly predicted.
virtual
- bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs,
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
+ unsigned ExtraPredCycles,
float Probability, float Confidence) const {
return false;
}
/// by Probability, and Confidence is a measure of our confidence that it
/// will be properly predicted.
virtual bool
- isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
- MachineBasicBlock &FMBB, unsigned NumFInstrs,
+ isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumTCycles, unsigned ExtraTCycles,
+ MachineBasicBlock &FMBB,
+ unsigned NumFCycles, unsigned ExtraFCycles,
float Probability, float Confidence) const {
return false;
}
/// isProfitableToDupForIfCvt - Return true if it's profitable for
- /// if-converter to duplicate a specific number of instructions in the
- /// specified MBB to enable if-conversion, where the probability of the
- /// instructions being executed is given by Probability, and Confidence is
- /// a measure of our confidence that it will be properly predicted.
+ /// if-converter to duplicate instructions of specified accumulated
+ /// instruction latencies in the specified MBB to enable if-conversion.
+ /// The probability of the instructions being executed is given by
+ /// Probability, and Confidence is a measure of our confidence that it
+ /// will be properly predicted.
virtual bool
- isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs,
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCyles,
float Probability, float Confidence) const {
return false;
}
/// getNumMicroOps - Return the number of u-operations the given machine
/// instruction will be decoded to on the target cpu.
- virtual unsigned getNumMicroOps(const MachineInstr *MI,
- const InstrItineraryData *ItinData) const;
+ virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const;
/// getOperandLatency - Compute and return the use operand latency of a given
- /// itinerary class and operand index if the value is produced by an
- /// instruction of the specified itinerary class and def operand index.
+ /// pair of def and use.
/// In most cases, the static scheduling itinerary was enough to determine the
/// operand latency. But it may not be possible for instructions with variable
/// number of defs / uses.
- virtual
- int getOperandLatency(const InstrItineraryData *ItinData,
- const MachineInstr *DefMI, unsigned DefIdx,
- const MachineInstr *UseMI, unsigned UseIdx) const;
-
- virtual
- int getOperandLatency(const InstrItineraryData *ItinData,
- SDNode *DefNode, unsigned DefIdx,
- SDNode *UseNode, unsigned UseIdx) const;
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const;
+
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const;
+
+ /// getInstrLatency - Compute the instruction latency of a given instruction.
+ /// If the instruction has higher cost when predicated, it's returned via
+ /// PredCost.
+ virtual int getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost = 0) const;
+
+ virtual int getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const;
/// hasHighOperandLatency - Compute operand latency between a def of 'Reg'
/// and an use in the current loop, return true if the target considered
/// ClobbersPred - True if BB could modify predicates (e.g. has
/// cmp, call, etc.)
/// NonPredSize - Number of non-predicated instructions.
- /// ExtraCost - Extra cost for microcoded instructions.
+ /// ExtraCost - Extra cost for multi-cycle instructions.
+ /// ExtraCost2 - Some instructions are slower when predicated
/// BB - Corresponding MachineBasicBlock.
/// TrueBB / FalseBB- See AnalyzeBranch().
/// BrCond - Conditions for end of block conditional branches.
bool ClobbersPred : 1;
unsigned NonPredSize;
unsigned ExtraCost;
+ unsigned ExtraCost2;
MachineBasicBlock *BB;
MachineBasicBlock *TrueBB;
MachineBasicBlock *FalseBB;
IsAnalyzed(false), IsEnqueued(false), IsBrAnalyzable(false),
HasFallThrough(false), IsUnpredicable(false),
CannotBeCopied(false), ClobbersPred(false), NonPredSize(0),
- ExtraCost(0), BB(0), TrueBB(0), FalseBB(0) {}
+ ExtraCost(0), ExtraCost2(0), BB(0), TrueBB(0), FalseBB(0) {}
};
/// IfcvtToken - Record information about pending if-conversions to attempt:
bool IgnoreBr = false);
void MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges = true);
- bool MeetIfcvtSizeLimit(MachineBasicBlock &BB, unsigned Size,
+ bool MeetIfcvtSizeLimit(MachineBasicBlock &BB,
+ unsigned Cycle, unsigned Extra,
float Prediction, float Confidence) const {
- return Size > 0 && TII->isProfitableToIfCvt(BB, Size,
- Prediction, Confidence);
+ return Cycle > 0 && TII->isProfitableToIfCvt(BB, Cycle, Extra,
+ Prediction, Confidence);
}
- bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB, unsigned TSize,
- MachineBasicBlock &FBB, unsigned FSize,
+ bool MeetIfcvtSizeLimit(MachineBasicBlock &TBB,
+ unsigned TCycle, unsigned TExtra,
+ MachineBasicBlock &FBB,
+ unsigned FCycle, unsigned FExtra,
float Prediction, float Confidence) const {
- return TSize > 0 && FSize > 0 &&
- TII->isProfitableToIfCvt(TBB, TSize, FBB, FSize,
+ return TCycle > 0 && FCycle > 0 &&
+ TII->isProfitableToIfCvt(TBB, TCycle, TExtra, FBB, FCycle, FExtra,
Prediction, Confidence);
}
// Then scan all the instructions.
BBI.NonPredSize = 0;
BBI.ExtraCost = 0;
+ BBI.ExtraCost2 = 0;
BBI.ClobbersPred = false;
for (MachineBasicBlock::iterator I = BBI.BB->begin(), E = BBI.BB->end();
I != E; ++I) {
if (!isCondBr) {
if (!isPredicated) {
BBI.NonPredSize++;
- unsigned NumOps = TII->getNumMicroOps(&*I, InstrItins);
- if (NumOps > 1)
- BBI.ExtraCost += NumOps-1;
+ unsigned ExtraPredCost = 0;
+ unsigned NumCycles = TII->getInstrLatency(InstrItins, &*I,
+ &ExtraPredCost);
+ if (NumCycles > 1)
+ BBI.ExtraCost += NumCycles-1;
+ BBI.ExtraCost2 += ExtraPredCost;
} else if (!AlreadyPredicated) {
// FIXME: This instruction is already predicated before the
// if-conversion pass. It's probably something like a conditional move.
if (CanRevCond && ValidDiamond(TrueBBI, FalseBBI, Dups, Dups2) &&
MeetIfcvtSizeLimit(*TrueBBI.BB, (TrueBBI.NonPredSize - (Dups + Dups2) +
- TrueBBI.ExtraCost),
+ TrueBBI.ExtraCost), TrueBBI.ExtraCost2,
*FalseBBI.BB, (FalseBBI.NonPredSize - (Dups + Dups2) +
- FalseBBI.ExtraCost),
+ FalseBBI.ExtraCost),FalseBBI.ExtraCost2,
Prediction, Confidence) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
if (ValidTriangle(TrueBBI, FalseBBI, false, Dups, Prediction, Confidence) &&
MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
- Prediction, Confidence) &&
+ TrueBBI.ExtraCost2, Prediction, Confidence) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true)) {
// Triangle:
// EBB
if (ValidTriangle(TrueBBI, FalseBBI, true, Dups, Prediction, Confidence) &&
MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
- Prediction, Confidence) &&
+ TrueBBI.ExtraCost2, Prediction, Confidence) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleRev, TNeedSub, Dups));
Enqueued = true;
if (ValidSimple(TrueBBI, Dups, Prediction, Confidence) &&
MeetIfcvtSizeLimit(*TrueBBI.BB, TrueBBI.NonPredSize + TrueBBI.ExtraCost,
- Prediction, Confidence) &&
+ TrueBBI.ExtraCost2, Prediction, Confidence) &&
FeasibilityAnalysis(TrueBBI, BBI.BrCond)) {
// Simple (split, no rejoin):
// EBB
1.0-Prediction, Confidence) &&
MeetIfcvtSizeLimit(*FalseBBI.BB,
FalseBBI.NonPredSize + FalseBBI.ExtraCost,
- 1.0-Prediction, Confidence) &&
+ FalseBBI.ExtraCost2, 1.0-Prediction, Confidence) &&
FeasibilityAnalysis(FalseBBI, RevCond, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFalse, FNeedSub, Dups));
Enqueued = true;
1.0-Prediction, Confidence) &&
MeetIfcvtSizeLimit(*FalseBBI.BB,
FalseBBI.NonPredSize + FalseBBI.ExtraCost,
- 1.0-Prediction, Confidence) &&
+ FalseBBI.ExtraCost2, 1.0-Prediction, Confidence) &&
FeasibilityAnalysis(FalseBBI, RevCond, true, true)) {
Tokens.push_back(new IfcvtToken(BBI, ICTriangleFRev, FNeedSub, Dups));
Enqueued = true;
if (ValidSimple(FalseBBI, Dups, 1.0-Prediction, Confidence) &&
MeetIfcvtSizeLimit(*FalseBBI.BB,
FalseBBI.NonPredSize + FalseBBI.ExtraCost,
- 1.0-Prediction, Confidence) &&
+ FalseBBI.ExtraCost2, 1.0-Prediction, Confidence) &&
FeasibilityAnalysis(FalseBBI, RevCond)) {
Tokens.push_back(new IfcvtToken(BBI, ICSimpleFalse, FNeedSub, Dups));
Enqueued = true;
MachineInstr *MI = MF.CloneMachineInstr(I);
ToBBI.BB->insert(ToBBI.BB->end(), MI);
ToBBI.NonPredSize++;
- unsigned NumOps = TII->getNumMicroOps(MI, InstrItins);
- if (NumOps > 1)
- ToBBI.ExtraCost += NumOps-1;
+ unsigned ExtraPredCost = 0;
+ unsigned NumCycles = TII->getInstrLatency(InstrItins, &*I, &ExtraPredCost);
+ if (NumCycles > 1)
+ ToBBI.ExtraCost += NumCycles-1;
+ ToBBI.ExtraCost2 += ExtraPredCost;
if (!TII->isPredicated(I) && !MI->isDebugValue()) {
if (!TII->PredicateInstruction(MI, Cond)) {
ToBBI.NonPredSize += FromBBI.NonPredSize;
ToBBI.ExtraCost += FromBBI.ExtraCost;
+ ToBBI.ExtraCost2 += FromBBI.ExtraCost2;
FromBBI.NonPredSize = 0;
FromBBI.ExtraCost = 0;
+ FromBBI.ExtraCost2 = 0;
ToBBI.ClobbersPred |= FromBBI.ClobbersPred;
ToBBI.HasFallThrough = FromBBI.HasFallThrough;
"Cannot schedule terminators or labels!");
// Create the SUnit for this MI.
SUnit *SU = NewSUnit(MI);
+ SU->isCall = TID.isCall();
+ SU->isCommutable = TID.isCommutable();
// Assign the Latency field of SU using target-provided information.
if (UnitLatencies)
// extra time.
if (SU->getInstr()->getDesc().mayLoad())
SU->Latency += 2;
- } else
- SU->Latency =
- InstrItins->getStageLatency(SU->getInstr()->getDesc().getSchedClass());
+ } else {
+ SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr());
+ }
}
void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
}
bool hybrid_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const{
+ if (left->isCall || right->isCall)
+ // No way to compute latency of calls.
+ return BURRSort(left, right, SPQ);
+
bool LHigh = SPQ->HighRegPressure(left);
bool RHigh = SPQ->HighRegPressure(right);
// Avoid causing spills. If register pressure is high, schedule for
bool ilp_ls_rr_sort::operator()(const SUnit *left,
const SUnit *right) const {
+ if (left->isCall || right->isCall)
+ // No way to compute latency of calls.
+ return BURRSort(left, right, SPQ);
+
bool LHigh = SPQ->HighRegPressure(left);
bool RHigh = SPQ->HighRegPressure(right);
// Avoid causing spills. If register pressure is high, schedule for
SUnit *SU = NewSUnit(Old->getNode());
SU->OrigNode = Old->OrigNode;
SU->Latency = Old->Latency;
+ SU->isCall = Old->isCall;
SU->isTwoAddress = Old->isTwoAddress;
SU->isCommutable = Old->isCommutable;
SU->hasPhysRegDefs = Old->hasPhysRegDefs;
N = N->getOperand(N->getNumOperands()-1).getNode();
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
+ if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
+ NodeSUnit->isCall = true;
}
// Scan down to find any flagged succs.
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
N = *UI;
+ if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
+ NodeSUnit->isCall = true;
break;
}
if (!HasFlagUse) break;
// all nodes flagged together into this SUnit.
SU->Latency = 0;
for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
- if (N->isMachineOpcode()) {
- SU->Latency += InstrItins->
- getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
- }
+ if (N->isMachineOpcode())
+ SU->Latency += TII->getInstrLatency(InstrItins, N);
}
void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
cl::desc("Enable ARM 2-addr to 3-addr conv"));
-static cl::opt<bool>
-OldARMIfCvt("old-arm-ifcvt", cl::Hidden,
- cl::desc("Use old-style ARM if-conversion heuristics"));
-
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
: TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
Subtarget(STI) {
}
bool ARMBaseInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
- unsigned NumInstrs,
+ unsigned NumCyles,
+ unsigned ExtraPredCycles,
float Probability,
float Confidence) const {
- if (!NumInstrs)
+ if (!NumCyles)
return false;
- // Use old-style heuristics
- if (OldARMIfCvt) {
- if (Subtarget.getCPUString() == "generic")
- // Generic (and overly aggressive) if-conversion limits for testing.
- return NumInstrs <= 10;
- if (Subtarget.hasV7Ops())
- return NumInstrs <= 3;
- return NumInstrs <= 2;
- }
-
// Attempt to estimate the relative costs of predication versus branching.
- float UnpredCost = Probability * NumInstrs;
+ float UnpredCost = Probability * NumCyles;
UnpredCost += 1.0; // The branch itself
UnpredCost += (1.0 - Confidence) * Subtarget.getMispredictionPenalty();
- float PredCost = NumInstrs;
-
- return PredCost < UnpredCost;
-
+ return (float)(NumCyles + ExtraPredCycles) < UnpredCost;
}
bool ARMBaseInstrInfo::
-isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
- MachineBasicBlock &FMBB, unsigned NumF,
+isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned TCycles, unsigned TExtra,
+ MachineBasicBlock &FMBB,
+ unsigned FCycles, unsigned FExtra,
float Probability, float Confidence) const {
- // Use old-style if-conversion heuristics
- if (OldARMIfCvt) {
- return NumT && NumF && NumT <= 2 && NumF <= 2;
- }
-
- if (!NumT || !NumF)
+ if (!TCycles || !FCycles)
return false;
// Attempt to estimate the relative costs of predication versus branching.
- float UnpredCost = Probability * NumT + (1.0 - Probability) * NumF;
+ float UnpredCost = Probability * TCycles + (1.0 - Probability) * FCycles;
UnpredCost += 1.0; // The branch itself
UnpredCost += (1.0 - Confidence) * Subtarget.getMispredictionPenalty();
- float PredCost = NumT + NumF;
-
- return PredCost < UnpredCost;
+ return (float)(TCycles + FCycles + TExtra + FExtra) < UnpredCost;
}
/// getInstrPredicate - If instruction is predicated, returns its predicate
}
unsigned
-ARMBaseInstrInfo::getNumMicroOps(const MachineInstr *MI,
- const InstrItineraryData *ItinData) const {
+ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const {
if (!ItinData || ItinData->isEmpty())
return 1;
case ARM::t2STM_UPD: {
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
if (Subtarget.isCortexA8()) {
- // 4 registers would be issued: 1, 2, 1.
- // 5 registers would be issued: 1, 2, 2.
- return 1 + (NumRegs / 2);
+ if (NumRegs < 4)
+ return 2;
+ // 4 registers would be issued: 2, 2.
+ // 5 registers would be issued: 2, 2, 1.
+ UOps = (NumRegs / 2);
+ if (NumRegs % 2)
+ ++UOps;
+ return UOps;
} else if (Subtarget.isCortexA9()) {
UOps = (NumRegs / 2);
// If there are odd number of registers or if it's not 64-bit aligned,
return Latency;
}
+int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost) const {
+ if (MI->isCopyLike() || MI->isInsertSubreg() ||
+ MI->isRegSequence() || MI->isImplicitDef())
+ return 1;
+
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ const TargetInstrDesc &TID = MI->getDesc();
+ unsigned Class = TID.getSchedClass();
+ unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
+ if (PredCost && TID.hasImplicitDefOfPhysReg(ARM::CPSR))
+ // When predicated, CPSR is an additional source operand for CPSR updating
+ // instructions, this apparently increases their latencies.
+ *PredCost = 1;
+ if (UOps)
+ return ItinData->getStageLatency(Class);
+ return getNumMicroOps(ItinData, MI);
+}
+
+int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const {
+ if (!Node->isMachineOpcode())
+ return 1;
+
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ unsigned Opcode = Node->getMachineOpcode();
+ switch (Opcode) {
+ default:
+ return ItinData->getStageLatency(get(Opcode).getSchedClass());
+ case ARM::VLDMQ:
+ case ARM::VSTMQ:
+ return 2;
+ }
+}
+
bool ARMBaseInstrInfo::
hasHighOperandLatency(const InstrItineraryData *ItinData,
const MachineRegisterInfo *MRI,
const MachineFunction &MF) const;
virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB,
- unsigned NumInstrs,
+ unsigned NumCyles, unsigned ExtraPredCycles,
float Prob, float Confidence) const;
- virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,unsigned NumT,
- MachineBasicBlock &FMBB,unsigned NumF,
+ virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumT, unsigned ExtraT,
+ MachineBasicBlock &FMBB,
+ unsigned NumF, unsigned ExtraF,
float Probability, float Confidence) const;
virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
- unsigned NumInstrs,
+ unsigned NumCyles,
float Probability,
float Confidence) const {
- return NumInstrs == 1;
+ return NumCyles == 1;
}
/// AnalyzeCompare - For a comparison instruction, return the source register
const MachineRegisterInfo *MRI,
MachineBasicBlock::iterator &MII) const;
- virtual unsigned getNumMicroOps(const MachineInstr *MI,
- const InstrItineraryData *ItinData) const;
+ virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const;
virtual
int getOperandLatency(const InstrItineraryData *ItinData,
const TargetInstrDesc &UseTID,
unsigned UseIdx, unsigned UseAlign) const;
+ int getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI, unsigned *PredCost = 0) const;
+
+ int getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const;
+
bool hasHighOperandLatency(const InstrItineraryData *ItinData,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
return 0;
}
-bool Thumb2InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
- unsigned NumInstrs,
- float Prediction,
- float Confidence) const {
- if (!OldT2IfCvt)
- return ARMBaseInstrInfo::isProfitableToIfCvt(MBB, NumInstrs,
- Prediction, Confidence);
- return NumInstrs && NumInstrs <= 3;
-}
-
-bool Thumb2InstrInfo::
-isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumT,
- MachineBasicBlock &FMBB, unsigned NumF,
- float Prediction, float Confidence) const {
- if (!OldT2IfCvt)
- return ARMBaseInstrInfo::isProfitableToIfCvt(TMBB, NumT,
- FMBB, NumF,
- Prediction, Confidence);
-
- // FIXME: Catch optimization such as:
- // r0 = movne
- // r0 = moveq
- return NumT && NumF &&
- NumT <= 3 && NumF <= 3;
-}
-
-
void
Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
MachineBasicBlock *NewDest) const {
bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI) const;
- bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumInstrs,
- float Prediction, float Confidence) const;
- bool isProfitableToIfCvt(MachineBasicBlock &TMBB, unsigned NumTInstrs,
- MachineBasicBlock &FMBB, unsigned NumFInstrs,
- float Prediction, float Confidence) const;
-
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
}
unsigned
-TargetInstrInfo::getNumMicroOps(const MachineInstr *MI,
- const InstrItineraryData *ItinData) const {
+TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const {
if (!ItinData || ItinData->isEmpty())
return 1;
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
}
+int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost) const {
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ return ItinData->getStageLatency(MI->getDesc().getSchedClass());
+}
+
+int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *N) const {
+ if (!ItinData || ItinData->isEmpty())
+ return 1;
+
+ if (!N->isMachineOpcode())
+ return 1;
+
+ return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
+}
+
bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
const MachineInstr *DefMI,
unsigned DefIdx) const {
define fastcc i32 @dct_luma_sp(i32 %block_x, i32 %block_y, i32* %coeff_cost) {
entry:
; Make sure to use base-updating stores for saving callee-saved registers.
+; CHECK: push
; CHECK-NOT: sub sp
-; CHECK: vpush
+; CHECK: push
%predicted_block = alloca [4 x [4 x i32]], align 4 ; <[4 x [4 x i32]]*> [#uses=1]
br label %cond_next489
; micro-coded and would have long issue latency even if predicated on
; false predicate.
-%0 = type { float, float, float, float }
-%pln = type { %vec, float }
-%vec = type { [4 x float] }
-
-define arm_aapcs_vfpcc float @aaa(%vec* nocapture %ustart, %vec* nocapture %udir, %vec* nocapture %vstart, %vec* nocapture %vdir, %vec* %upoint, %vec* %vpoint) {
-; CHECK: aaa:
-; CHECK: vldr.32
-; CHECK-NOT: vldrne
-; CHECK-NOT: vpopne
-; CHECK-NOT: popne
-; CHECK: vpop
-; CHECK: pop
+define void @t(double %a, double %b, double %c, double %d, i32* nocapture %solutions, double* nocapture %x) nounwind {
entry:
- br i1 undef, label %bb81, label %bb48
+; CHECK: t:
+; CHECK: vpop {d8}
+; CHECK-NOT: vpopne
+; CHECK: ldmia sp!, {r7, pc}
+; CHECK: vpop {d8}
+; CHECK: ldmia sp!, {r7, pc}
+ br i1 undef, label %if.else, label %if.then
-bb48: ; preds = %entry
- %0 = call arm_aapcs_vfpcc %0 @bbb(%pln* undef, %vec* %vstart, %vec* undef) nounwind ; <%0> [#uses=0]
- ret float 0.000000e+00
+if.then: ; preds = %entry
+ %mul73 = fmul double undef, 0.000000e+00
+ %sub76 = fsub double %mul73, undef
+ store double %sub76, double* undef, align 4
+ %call88 = tail call double @cos(double 0.000000e+00) nounwind
+ %mul89 = fmul double undef, %call88
+ %sub92 = fsub double %mul89, undef
+ store double %sub92, double* undef, align 4
+ ret void
-bb81: ; preds = %entry
- ret float 0.000000e+00
+if.else: ; preds = %entry
+ %tmp101 = tail call double @llvm.pow.f64(double undef, double 0x3FD5555555555555)
+ %add112 = fadd double %tmp101, undef
+ %mul118 = fmul double %add112, undef
+ store double 0.000000e+00, double* %x, align 4
+ ret void
}
-declare arm_aapcs_vfpcc %0 @bbb(%pln* nocapture, %vec* nocapture, %vec* nocapture) nounwind
+declare double @acos(double)
+
+declare double @sqrt(double) readnone
+
+declare double @cos(double) readnone
+
+declare double @fabs(double)
+
+declare double @llvm.pow.f64(double, double) nounwind readonly
--- /dev/null
+; RUN: llc < %s -mtriple=arm-apple-darwin -mcpu=cortex-a8 | FileCheck %s
+; rdar://8598427
+; Adjust if-converter heuristics to avoid predicating vmrs which can cause
+; significant regression.
+
+%struct.xyz_t = type { double, double, double }
+
+define i32 @effie(i32 %tsets, %struct.xyz_t* nocapture %p, i32 %a, i32 %b, i32 %c) nounwind readonly noinline {
+; CHECK: effie:
+entry:
+ %0 = icmp sgt i32 %tsets, 0
+ br i1 %0, label %bb.nph, label %bb6
+
+bb.nph: ; preds = %entry
+ %1 = add nsw i32 %b, %a
+ %2 = add nsw i32 %1, %c
+ br label %bb
+
+bb: ; preds = %bb4, %bb.nph
+; CHECK: vcmpe.f64
+; CHECK: vmrs apsr_nzcv, fpscr
+ %r.19 = phi i32 [ 0, %bb.nph ], [ %r.0, %bb4 ]
+ %n.08 = phi i32 [ 0, %bb.nph ], [ %10, %bb4 ]
+ %scevgep10 = getelementptr inbounds %struct.xyz_t* %p, i32 %n.08, i32 0
+ %scevgep11 = getelementptr %struct.xyz_t* %p, i32 %n.08, i32 1
+ %3 = load double* %scevgep10, align 4
+ %4 = load double* %scevgep11, align 4
+ %5 = fcmp uge double %3, %4
+ br i1 %5, label %bb3, label %bb1
+
+bb1: ; preds = %bb
+; CHECK-NOT: it
+; CHECK-NOT: vcmpemi
+; CHECK-NOT: vmrsmi
+; CHECK: vcmpe.f64
+; CHECK: vmrs apsr_nzcv, fpscr
+ %scevgep12 = getelementptr %struct.xyz_t* %p, i32 %n.08, i32 2
+ %6 = load double* %scevgep12, align 4
+ %7 = fcmp uge double %3, %6
+ br i1 %7, label %bb3, label %bb2
+
+bb2: ; preds = %bb1
+ %8 = add nsw i32 %2, %r.19
+ br label %bb4
+
+bb3: ; preds = %bb1, %bb
+ %9 = add nsw i32 %r.19, 1
+ br label %bb4
+
+bb4: ; preds = %bb3, %bb2
+ %r.0 = phi i32 [ %9, %bb3 ], [ %8, %bb2 ]
+ %10 = add nsw i32 %n.08, 1
+ %exitcond = icmp eq i32 %10, %tsets
+ br i1 %exitcond, label %bb6, label %bb
+
+bb6: ; preds = %bb4, %entry
+ %r.1.lcssa = phi i32 [ 0, %entry ], [ %r.0, %bb4 ]
+ ret i32 %r.1.lcssa
+}
; constant offset addressing, so that each of the following stores
; uses the same register.
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #-128]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #-96]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #-64]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #-32]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #32]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #64]
-; CHECK: vstr.32 s{{.*}}, [r{{.*}}, #96]
+; CHECK: vstr.32 s{{.*}}, [lr, #-128]
+; CHECK: vstr.32 s{{.*}}, [lr, #-96]
+; CHECK: vstr.32 s{{.*}}, [lr, #-64]
+; CHECK: vstr.32 s{{.*}}, [lr, #-32]
+; CHECK: vstr.32 s{{.*}}, [lr]
+; CHECK: vstr.32 s{{.*}}, [lr, #32]
+; CHECK: vstr.32 s{{.*}}, [lr, #64]
+; CHECK: vstr.32 s{{.*}}, [lr, #96]
target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32-n32"
entry:
; CHECK: t10:
; CHECK: vmov.i32 q9, #0x3F000000
-; CHECK: vmov d0, d17
; CHECK: vmla.f32 q8, q8, d0[0]
%0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1]
%1 = insertelement <4 x float> %0, float undef, i32 1 ; <<4 x float>> [#uses=1]
%4 = insertelement <2 x double> %2, double %V.0.ph, i32 1 ; <<2 x double>> [#uses=2]
; Constant pool load followed by add.
; Then clobber the loaded register, not the sum.
-; CHECK: vldr.64
-; CHECK: vadd.f64
; CHECK: vldr.64 [[LDR:d.*]],
; CHECK: LPC0_0:
; CHECK: vadd.f64 [[ADD:d.*]], [[LDR]], [[LDR]]