MachineTraceMetrics::Trace BlockTrace);
bool
improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root,
- MachineTraceMetrics::Trace BlockTrace,
- SmallVectorImpl<MachineInstr *> &InsInstrs,
- DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
- bool NewCodeHasLessInsts);
+ MachineTraceMetrics::Trace BlockTrace,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
+ MachineCombinerPattern Pattern);
bool preservesResourceLen(MachineBasicBlock *MBB,
MachineTraceMetrics::Trace BlockTrace,
SmallVectorImpl<MachineInstr *> &InsInstrs,
return NewRootLatency;
}
-/// True when the new instruction sequence does not lengthen the critical path
-/// and the new sequence has less instructions or the new sequence improves the
-/// critical path.
+/// The combiner's goal may differ based on which pattern it is attempting
+/// to optimize.
+enum class CombinerObjective {
+ MustReduceDepth, // The data dependency chain must be improved.
+ Default // The critical path must not be lengthened.
+};
+
+static CombinerObjective getCombinerObjective(MachineCombinerPattern P) {
+ // TODO: If C++ ever gets a real enum class, make this part of the
+ // MachineCombinerPattern class.
+ switch (P) {
+ case MachineCombinerPattern::REASSOC_AX_BY:
+ case MachineCombinerPattern::REASSOC_AX_YB:
+ case MachineCombinerPattern::REASSOC_XA_BY:
+ case MachineCombinerPattern::REASSOC_XA_YB:
+ return CombinerObjective::MustReduceDepth;
+ default:
+ return CombinerObjective::Default;
+ }
+}
+
/// The DAGCombine code sequence ends in MI (Machine Instruction) Root.
/// The new code sequence ends in MI NewRoot. A necessary condition for the new
/// sequence to replace the old sequence is that it cannot lengthen the critical
-/// path. This is decided by the formula:
-/// (NewRootDepth + NewRootLatency) <= (RootDepth + RootLatency + RootSlack)).
-/// If the new sequence has an equal length critical path but does not reduce
-/// the number of instructions (NewCodeHasLessInsts is false), then it is not
-/// considered an improvement. The slack is the number of cycles Root can be
-/// delayed before the critical patch becomes longer.
+/// path. The definition of "improve" may be restricted by specifying that the
+/// new path improves the data dependency chain (MustReduceDepth).
bool MachineCombiner::improvesCriticalPathLen(
MachineBasicBlock *MBB, MachineInstr *Root,
MachineTraceMetrics::Trace BlockTrace,
SmallVectorImpl<MachineInstr *> &InsInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
- bool NewCodeHasLessInsts) {
+ MachineCombinerPattern Pattern) {
assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
"Missing machine model\n");
// NewRoot is the last instruction in the \p InsInstrs vector.
- // Get depth and latency of NewRoot.
unsigned NewRootIdx = InsInstrs.size() - 1;
MachineInstr *NewRoot = InsInstrs[NewRootIdx];
- unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
- unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace);
- // Get depth, latency and slack of Root.
+ // Get depth and latency of NewRoot and Root.
+ unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth;
+
+ DEBUG(dbgs() << "DEPENDENCE DATA FOR " << Root << "\n";
+ dbgs() << " NewRootDepth: " << NewRootDepth << "\n";
+ dbgs() << " RootDepth: " << RootDepth << "\n");
+
+ // For a transform such as reassociation, the cost equation is
+ // conservatively calculated so that we must improve the depth (data
+ // dependency cycles) in the critical path to proceed with the transform.
+ // Being conservative also protects against inaccuracies in the underlying
+ // machine trace metrics and CPU models.
+ if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth)
+ return NewRootDepth < RootDepth;
+
+ // A more flexible cost calculation for the critical path includes the slack
+ // of the original code sequence. This may allow the transform to proceed
+ // even if the instruction depths (data dependency cycles) become worse.
+ unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace);
unsigned RootLatency = TSchedModel.computeInstrLatency(Root);
unsigned RootSlack = BlockTrace.getInstrSlack(Root);
- DEBUG(dbgs() << "DEPENDENCE DATA FOR " << Root << "\n";
- dbgs() << " NewRootDepth: " << NewRootDepth
- << " NewRootLatency: " << NewRootLatency << "\n";
- dbgs() << " RootDepth: " << RootDepth << " RootLatency: " << RootLatency
- << " RootSlack: " << RootSlack << "\n";
+ DEBUG(dbgs() << " NewRootLatency: " << NewRootLatency << "\n";
+ dbgs() << " RootLatency: " << RootLatency << "\n";
+ dbgs() << " RootSlack: " << RootSlack << "\n";
dbgs() << " NewRootDepth + NewRootLatency = "
<< NewRootDepth + NewRootLatency << "\n";
dbgs() << " RootDepth + RootLatency + RootSlack = "
unsigned NewCycleCount = NewRootDepth + NewRootLatency;
unsigned OldCycleCount = RootDepth + RootLatency + RootSlack;
- if (NewCodeHasLessInsts)
- return NewCycleCount <= OldCycleCount;
- else
- return NewCycleCount < OldCycleCount;
+ return NewCycleCount <= OldCycleCount;
}
/// helper routine to convert instructions into SC
// in a single instruction.
if (!NewInstCount)
continue;
+
// Substitute when we optimize for codesize and the new sequence has
// fewer instructions OR
// the new sequence neither lengthens the critical path nor increases
// resource pressure.
if (doSubstitute(NewInstCount, OldInstCount) ||
(improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs,
- InstrIdxForVirtReg,
- NewInstCount < OldInstCount) &&
+ InstrIdxForVirtReg, P) &&
preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs))) {
for (auto *InstrPtr : InsInstrs)
MBB->insert((MachineBasicBlock::iterator) &MI, InstrPtr);