1 //===- BlockFrequencyImplInfo.cpp - Block Frequency Info Implementation ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Loops should be simplified before this analysis.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/BlockFrequencyInfoImpl.h"
15 #include "llvm/ADT/SCCIterator.h"
16 #include "llvm/Support/raw_ostream.h"
20 using namespace llvm::bfi_detail;
22 #define DEBUG_TYPE "block-freq"
24 //===----------------------------------------------------------------------===//
26 // BlockMass implementation.
28 //===----------------------------------------------------------------------===//
29 ScaledNumber<uint64_t> BlockMass::toScaled() const {
31 return ScaledNumber<uint64_t>(1, 0);
32 return ScaledNumber<uint64_t>(getMass() + 1, -64);
35 void BlockMass::dump() const { print(dbgs()); }
37 static char getHexDigit(int N) {
43 raw_ostream &BlockMass::print(raw_ostream &OS) const {
44 for (int Digits = 0; Digits < 16; ++Digits)
45 OS << getHexDigit(Mass >> (60 - Digits * 4) & 0xf);
49 //===----------------------------------------------------------------------===//
51 // BlockFrequencyInfoImpl implementation.
53 //===----------------------------------------------------------------------===//
56 typedef BlockFrequencyInfoImplBase::BlockNode BlockNode;
57 typedef BlockFrequencyInfoImplBase::Distribution Distribution;
58 typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList;
59 typedef BlockFrequencyInfoImplBase::Scaled64 Scaled64;
60 typedef BlockFrequencyInfoImplBase::LoopData LoopData;
61 typedef BlockFrequencyInfoImplBase::Weight Weight;
62 typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData;
64 /// \brief Dithering mass distributer.
66 /// This class splits up a single mass into portions by weight, dithering to
67 /// spread out error. No mass is lost. The dithering precision depends on the
68 /// precision of the product of \a BlockMass and \a BranchProbability.
70 /// The distribution algorithm follows.
72 /// 1. Initialize by saving the sum of the weights in \a RemWeight and the
73 /// mass to distribute in \a RemMass.
75 /// 2. For each portion:
77 /// 1. Construct a branch probability, P, as the portion's weight divided
78 /// by the current value of \a RemWeight.
79 /// 2. Calculate the portion's mass as \a RemMass times P.
80 /// 3. Update \a RemWeight and \a RemMass at each portion by subtracting
81 /// the current portion's weight and mass.
82 struct DitheringDistributer {
86 DitheringDistributer(Distribution &Dist, const BlockMass &Mass);
88 BlockMass takeMass(uint32_t Weight);
92 DitheringDistributer::DitheringDistributer(Distribution &Dist,
93 const BlockMass &Mass) {
95 RemWeight = Dist.Total;
99 BlockMass DitheringDistributer::takeMass(uint32_t Weight) {
100 assert(Weight && "invalid weight");
101 assert(Weight <= RemWeight);
102 BlockMass Mass = RemMass * BranchProbability(Weight, RemWeight);
104 // Decrement totals (dither).
110 void Distribution::add(const BlockNode &Node, uint64_t Amount,
111 Weight::DistType Type) {
112 assert(Amount && "invalid weight of 0");
113 uint64_t NewTotal = Total + Amount;
115 // Check for overflow. It should be impossible to overflow twice.
116 bool IsOverflow = NewTotal < Total;
117 assert(!(DidOverflow && IsOverflow) && "unexpected repeated overflow");
118 DidOverflow |= IsOverflow;
128 Weights.push_back(W);
131 static void combineWeight(Weight &W, const Weight &OtherW) {
132 assert(OtherW.TargetNode.isValid());
137 assert(W.Type == OtherW.Type);
138 assert(W.TargetNode == OtherW.TargetNode);
139 assert(W.Amount < W.Amount + OtherW.Amount && "Unexpected overflow");
140 W.Amount += OtherW.Amount;
142 static void combineWeightsBySorting(WeightList &Weights) {
143 // Sort so edges to the same node are adjacent.
144 std::sort(Weights.begin(), Weights.end(),
146 const Weight &R) { return L.TargetNode < R.TargetNode; });
148 // Combine adjacent edges.
149 WeightList::iterator O = Weights.begin();
150 for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E;
154 // Find the adjacent weights to the same node.
155 for (++L; L != E && I->TargetNode == L->TargetNode; ++L)
156 combineWeight(*O, *L);
159 // Erase extra entries.
160 Weights.erase(O, Weights.end());
163 static void combineWeightsByHashing(WeightList &Weights) {
164 // Collect weights into a DenseMap.
165 typedef DenseMap<BlockNode::IndexType, Weight> HashTable;
166 HashTable Combined(NextPowerOf2(2 * Weights.size()));
167 for (const Weight &W : Weights)
168 combineWeight(Combined[W.TargetNode.Index], W);
170 // Check whether anything changed.
171 if (Weights.size() == Combined.size())
174 // Fill in the new weights.
176 Weights.reserve(Combined.size());
177 for (const auto &I : Combined)
178 Weights.push_back(I.second);
180 static void combineWeights(WeightList &Weights) {
181 // Use a hash table for many successors to keep this linear.
182 if (Weights.size() > 128) {
183 combineWeightsByHashing(Weights);
187 combineWeightsBySorting(Weights);
189 static uint64_t shiftRightAndRound(uint64_t N, int Shift) {
194 return (N >> Shift) + (UINT64_C(1) & N >> (Shift - 1));
196 void Distribution::normalize() {
197 // Early exit for termination nodes.
201 // Only bother if there are multiple successors.
202 if (Weights.size() > 1)
203 combineWeights(Weights);
205 // Early exit when combined into a single successor.
206 if (Weights.size() == 1) {
208 Weights.front().Amount = 1;
212 // Determine how much to shift right so that the total fits into 32-bits.
214 // If we shift at all, shift by 1 extra. Otherwise, the lower limit of 1
215 // for each weight can cause a 32-bit overflow.
219 else if (Total > UINT32_MAX)
220 Shift = 33 - countLeadingZeros(Total);
222 // Early exit if nothing needs to be scaled.
226 // Recompute the total through accumulation (rather than shifting it) so that
227 // it's accurate after shifting.
230 // Sum the weights to each node and shift right if necessary.
231 for (Weight &W : Weights) {
232 // Scale down below UINT32_MAX. Since Shift is larger than necessary, we
233 // can round here without concern about overflow.
234 assert(W.TargetNode.isValid());
235 W.Amount = std::max(UINT64_C(1), shiftRightAndRound(W.Amount, Shift));
236 assert(W.Amount <= UINT32_MAX);
241 assert(Total <= UINT32_MAX);
244 void BlockFrequencyInfoImplBase::clear() {
245 // Swap with a default-constructed std::vector, since std::vector<>::clear()
246 // does not actually clear heap storage.
247 std::vector<FrequencyData>().swap(Freqs);
248 std::vector<WorkingData>().swap(Working);
252 /// \brief Clear all memory not needed downstream.
254 /// Releases all memory not used downstream. In particular, saves Freqs.
255 static void cleanup(BlockFrequencyInfoImplBase &BFI) {
256 std::vector<FrequencyData> SavedFreqs(std::move(BFI.Freqs));
258 BFI.Freqs = std::move(SavedFreqs);
261 bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
262 const LoopData *OuterLoop,
263 const BlockNode &Pred,
264 const BlockNode &Succ,
269 auto isLoopHeader = [&OuterLoop](const BlockNode &Node) {
270 return OuterLoop && OuterLoop->isHeader(Node);
273 BlockNode Resolved = Working[Succ.Index].getResolvedNode();
276 auto debugSuccessor = [&](const char *Type) {
278 << " [" << Type << "] weight = " << Weight;
279 if (!isLoopHeader(Resolved))
280 dbgs() << ", succ = " << getBlockName(Succ);
281 if (Resolved != Succ)
282 dbgs() << ", resolved = " << getBlockName(Resolved);
285 (void)debugSuccessor;
288 if (isLoopHeader(Resolved)) {
289 DEBUG(debugSuccessor("backedge"));
290 Dist.addBackedge(OuterLoop->getHeader(), Weight);
294 if (Working[Resolved.Index].getContainingLoop() != OuterLoop) {
295 DEBUG(debugSuccessor(" exit "));
296 Dist.addExit(Resolved, Weight);
300 if (Resolved < Pred) {
301 if (!isLoopHeader(Pred)) {
302 // If OuterLoop is an irreducible loop, we can't actually handle this.
303 assert((!OuterLoop || !OuterLoop->isIrreducible()) &&
304 "unhandled irreducible control flow");
306 // Irreducible backedge. Abort.
307 DEBUG(debugSuccessor("abort!!!"));
311 // If "Pred" is a loop header, then this isn't really a backedge; rather,
312 // OuterLoop must be irreducible. These false backedges can come only from
313 // secondary loop headers.
314 assert(OuterLoop && OuterLoop->isIrreducible() && !isLoopHeader(Resolved) &&
315 "unhandled irreducible control flow");
318 DEBUG(debugSuccessor(" local "));
319 Dist.addLocal(Resolved, Weight);
323 bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist(
324 const LoopData *OuterLoop, LoopData &Loop, Distribution &Dist) {
325 // Copy the exit map into Dist.
326 for (const auto &I : Loop.Exits)
327 if (!addToDist(Dist, OuterLoop, Loop.getHeader(), I.first,
329 // Irreducible backedge.
335 /// \brief Get the maximum allowed loop scale.
337 /// Gives the maximum number of estimated iterations allowed for a loop. Very
338 /// large numbers cause problems downstream (even within 64-bits).
339 static Scaled64 getMaxLoopScale() { return Scaled64(1, 12); }
341 /// \brief Compute the loop scale for a loop.
342 void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
343 // Compute loop scale.
344 DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n");
346 // LoopScale == 1 / ExitMass
347 // ExitMass == HeadMass - BackedgeMass
348 BlockMass ExitMass = BlockMass::getFull() - Loop.BackedgeMass;
350 // Block scale stores the inverse of the scale.
351 Loop.Scale = ExitMass.toScaled().inverse();
353 DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull()
354 << " - " << Loop.BackedgeMass << ")\n"
355 << " - scale = " << Loop.Scale << "\n");
357 if (Loop.Scale > getMaxLoopScale()) {
358 Loop.Scale = getMaxLoopScale();
359 DEBUG(dbgs() << " - reduced-to-max-scale: " << getMaxLoopScale() << "\n");
363 /// \brief Package up a loop.
364 void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) {
365 DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n");
367 // Clear the subloop exits to prevent quadratic memory usage.
368 for (const BlockNode &M : Loop.Nodes) {
369 if (auto *Loop = Working[M.Index].getPackagedLoop())
371 DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n");
373 Loop.IsPackaged = true;
376 void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
378 Distribution &Dist) {
379 BlockMass Mass = Working[Source.Index].getMass();
380 DEBUG(dbgs() << " => mass: " << Mass << "\n");
382 // Distribute mass to successors as laid out in Dist.
383 DitheringDistributer D(Dist, Mass);
386 auto debugAssign = [&](const BlockNode &T, const BlockMass &M,
388 dbgs() << " => assign " << M << " (" << D.RemMass << ")";
390 dbgs() << " [" << Desc << "]";
392 dbgs() << " to " << getBlockName(T);
398 for (const Weight &W : Dist.Weights) {
399 // Check for a local edge (non-backedge and non-exit).
400 BlockMass Taken = D.takeMass(W.Amount);
401 if (W.Type == Weight::Local) {
402 Working[W.TargetNode.Index].getMass() += Taken;
403 DEBUG(debugAssign(W.TargetNode, Taken, nullptr));
407 // Backedges and exits only make sense if we're processing a loop.
408 assert(OuterLoop && "backedge or exit outside of loop");
410 // Check for a backedge.
411 if (W.Type == Weight::Backedge) {
412 OuterLoop->BackedgeMass += Taken;
413 DEBUG(debugAssign(BlockNode(), Taken, "back"));
417 // This must be an exit.
418 assert(W.Type == Weight::Exit);
419 OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken));
420 DEBUG(debugAssign(W.TargetNode, Taken, "exit"));
424 static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI,
425 const Scaled64 &Min, const Scaled64 &Max) {
426 // Scale the Factor to a size that creates integers. Ideally, integers would
427 // be scaled so that Max == UINT64_MAX so that they can be best
428 // differentiated. However, the register allocator currently deals poorly
429 // with large numbers. Instead, push Min up a little from 1 to give some
430 // room to differentiate small, unequal numbers.
432 // TODO: fix issues downstream so that ScalingFactor can be
433 // Scaled64(1,64)/Max.
434 Scaled64 ScalingFactor = Min.inverse();
435 if ((Max / Min).lg() < 60)
438 // Translate the floats to integers.
439 DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max
440 << ", factor = " << ScalingFactor << "\n");
441 for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) {
442 Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor;
443 BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>());
444 DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = "
445 << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled
446 << ", int = " << BFI.Freqs[Index].Integer << "\n");
450 /// \brief Unwrap a loop package.
452 /// Visits all the members of a loop, adjusting their BlockData according to
453 /// the loop's pseudo-node.
454 static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) {
455 DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop)
456 << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale
458 Loop.Scale *= Loop.Mass.toScaled();
459 Loop.IsPackaged = false;
460 DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n");
462 // Propagate the head scale through the loop. Since members are visited in
463 // RPO, the head scale will be updated by the loop scale first, and then the
464 // final head scale will be used for updated the rest of the members.
465 for (const BlockNode &N : Loop.Nodes) {
466 const auto &Working = BFI.Working[N.Index];
467 Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale
468 : BFI.Freqs[N.Index].Scaled;
469 Scaled64 New = Loop.Scale * F;
470 DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New
476 void BlockFrequencyInfoImplBase::unwrapLoops() {
477 // Set initial frequencies from loop-local masses.
478 for (size_t Index = 0; Index < Working.size(); ++Index)
479 Freqs[Index].Scaled = Working[Index].Mass.toScaled();
481 for (LoopData &Loop : Loops)
482 unwrapLoop(*this, Loop);
485 void BlockFrequencyInfoImplBase::finalizeMetrics() {
486 // Unwrap loop packages in reverse post-order, tracking min and max
488 auto Min = Scaled64::getLargest();
489 auto Max = Scaled64::getZero();
490 for (size_t Index = 0; Index < Working.size(); ++Index) {
491 // Update min/max scale.
492 Min = std::min(Min, Freqs[Index].Scaled);
493 Max = std::max(Max, Freqs[Index].Scaled);
496 // Convert to integers.
497 convertFloatingToInteger(*this, Min, Max);
499 // Clean up data structures.
502 // Print out the final stats.
507 BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const {
510 return Freqs[Node.Index].Integer;
513 BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const {
515 return Scaled64::getZero();
516 return Freqs[Node.Index].Scaled;
520 BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const {
521 return std::string();
524 BlockFrequencyInfoImplBase::getLoopName(const LoopData &Loop) const {
525 return getBlockName(Loop.getHeader()) + (Loop.isIrreducible() ? "**" : "*");
529 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
530 const BlockNode &Node) const {
531 return OS << getFloatingBlockFreq(Node);
535 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
536 const BlockFrequency &Freq) const {
537 Scaled64 Block(Freq.getFrequency(), 0);
538 Scaled64 Entry(getEntryFreq(), 0);
540 return OS << Block / Entry;
543 void IrreducibleGraph::addNodesInLoop(const BFIBase::LoopData &OuterLoop) {
544 Start = OuterLoop.getHeader();
545 Nodes.reserve(OuterLoop.Nodes.size());
546 for (auto N : OuterLoop.Nodes)
550 void IrreducibleGraph::addNodesInFunction() {
552 for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
553 if (!BFI.Working[Index].isPackaged())
557 void IrreducibleGraph::indexNodes() {
558 for (auto &I : Nodes)
559 Lookup[I.Node.Index] = &I;
561 void IrreducibleGraph::addEdge(IrrNode &Irr, const BlockNode &Succ,
562 const BFIBase::LoopData *OuterLoop) {
563 if (OuterLoop && OuterLoop->isHeader(Succ))
565 auto L = Lookup.find(Succ.Index);
566 if (L == Lookup.end())
568 IrrNode &SuccIrr = *L->second;
569 Irr.Edges.push_back(&SuccIrr);
570 SuccIrr.Edges.push_front(&Irr);
575 template <> struct GraphTraits<IrreducibleGraph> {
576 typedef bfi_detail::IrreducibleGraph GraphT;
578 typedef const GraphT::IrrNode NodeType;
579 typedef GraphT::IrrNode::iterator ChildIteratorType;
581 static const NodeType *getEntryNode(const GraphT &G) {
584 static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); }
585 static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); }
589 /// \brief Find extra irreducible headers.
591 /// Find entry blocks and other blocks with backedges, which exist when \c G
592 /// contains irreducible sub-SCCs.
593 static void findIrreducibleHeaders(
594 const BlockFrequencyInfoImplBase &BFI,
595 const IrreducibleGraph &G,
596 const std::vector<const IrreducibleGraph::IrrNode *> &SCC,
597 LoopData::NodeList &Headers, LoopData::NodeList &Others) {
598 // Map from nodes in the SCC to whether it's an entry block.
599 SmallDenseMap<const IrreducibleGraph::IrrNode *, bool, 8> InSCC;
601 // InSCC also acts the set of nodes in the graph. Seed it.
602 for (const auto *I : SCC)
605 for (auto I = InSCC.begin(), E = InSCC.end(); I != E; ++I) {
606 auto &Irr = *I->first;
607 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) {
611 // This is an entry block.
613 Headers.push_back(Irr.Node);
614 DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n");
618 assert(Headers.size() >= 2 && "Should be irreducible");
619 if (Headers.size() == InSCC.size()) {
620 // Every block is a header.
621 std::sort(Headers.begin(), Headers.end());
625 // Look for extra headers from irreducible sub-SCCs.
626 for (const auto &I : InSCC) {
627 // Entry blocks are already headers.
631 auto &Irr = *I.first;
632 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) {
633 // Skip forward edges.
634 if (P->Node < Irr.Node)
637 // Skip predecessors from entry blocks. These can have inverted
642 // Store the extra header.
643 Headers.push_back(Irr.Node);
644 DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n");
647 if (Headers.back() == Irr.Node)
648 // Added this as a header.
651 // This is not a header.
652 Others.push_back(Irr.Node);
653 DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n");
655 std::sort(Headers.begin(), Headers.end());
656 std::sort(Others.begin(), Others.end());
659 static void createIrreducibleLoop(
660 BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G,
661 LoopData *OuterLoop, std::list<LoopData>::iterator Insert,
662 const std::vector<const IrreducibleGraph::IrrNode *> &SCC) {
663 // Translate the SCC into RPO.
664 DEBUG(dbgs() << " - found-scc\n");
666 LoopData::NodeList Headers;
667 LoopData::NodeList Others;
668 findIrreducibleHeaders(BFI, G, SCC, Headers, Others);
670 auto Loop = BFI.Loops.emplace(Insert, OuterLoop, Headers.begin(),
671 Headers.end(), Others.begin(), Others.end());
673 // Update loop hierarchy.
674 for (const auto &N : Loop->Nodes)
675 if (BFI.Working[N.Index].isLoopHeader())
676 BFI.Working[N.Index].Loop->Parent = &*Loop;
678 BFI.Working[N.Index].Loop = &*Loop;
681 iterator_range<std::list<LoopData>::iterator>
682 BlockFrequencyInfoImplBase::analyzeIrreducible(
683 const IrreducibleGraph &G, LoopData *OuterLoop,
684 std::list<LoopData>::iterator Insert) {
685 assert((OuterLoop == nullptr) == (Insert == Loops.begin()));
686 auto Prev = OuterLoop ? std::prev(Insert) : Loops.end();
688 for (auto I = scc_begin(G); !I.isAtEnd(); ++I) {
692 // Translate the SCC into RPO.
693 createIrreducibleLoop(*this, G, OuterLoop, Insert, *I);
697 return make_range(std::next(Prev), Insert);
698 return make_range(Loops.begin(), Insert);
702 BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) {
703 OuterLoop.Exits.clear();
704 OuterLoop.BackedgeMass = BlockMass::getEmpty();
705 auto O = OuterLoop.Nodes.begin() + 1;
706 for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I)
707 if (!Working[I->Index].isPackaged())
709 OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end());