1 //===- BlockFrequencyImplInfo.cpp - Block Frequency Info Implementation ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Loops should be simplified before this analysis.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/BlockFrequencyInfoImpl.h"
15 #include "llvm/ADT/SCCIterator.h"
16 #include "llvm/Support/raw_ostream.h"
20 using namespace llvm::bfi_detail;
22 #define DEBUG_TYPE "block-freq"
24 ScaledNumber<uint64_t> BlockMass::toScaled() const {
26 return ScaledNumber<uint64_t>(1, 0);
27 return ScaledNumber<uint64_t>(getMass() + 1, -64);
30 void BlockMass::dump() const { print(dbgs()); }
32 static char getHexDigit(int N) {
38 raw_ostream &BlockMass::print(raw_ostream &OS) const {
39 for (int Digits = 0; Digits < 16; ++Digits)
40 OS << getHexDigit(Mass >> (60 - Digits * 4) & 0xf);
46 typedef BlockFrequencyInfoImplBase::BlockNode BlockNode;
47 typedef BlockFrequencyInfoImplBase::Distribution Distribution;
48 typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList;
49 typedef BlockFrequencyInfoImplBase::Scaled64 Scaled64;
50 typedef BlockFrequencyInfoImplBase::LoopData LoopData;
51 typedef BlockFrequencyInfoImplBase::Weight Weight;
52 typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData;
54 /// \brief Dithering mass distributer.
56 /// This class splits up a single mass into portions by weight, dithering to
57 /// spread out error. No mass is lost. The dithering precision depends on the
58 /// precision of the product of \a BlockMass and \a BranchProbability.
60 /// The distribution algorithm follows.
62 /// 1. Initialize by saving the sum of the weights in \a RemWeight and the
63 /// mass to distribute in \a RemMass.
65 /// 2. For each portion:
67 /// 1. Construct a branch probability, P, as the portion's weight divided
68 /// by the current value of \a RemWeight.
69 /// 2. Calculate the portion's mass as \a RemMass times P.
70 /// 3. Update \a RemWeight and \a RemMass at each portion by subtracting
71 /// the current portion's weight and mass.
72 struct DitheringDistributer {
76 DitheringDistributer(Distribution &Dist, const BlockMass &Mass);
78 BlockMass takeMass(uint32_t Weight);
83 DitheringDistributer::DitheringDistributer(Distribution &Dist,
84 const BlockMass &Mass) {
86 RemWeight = Dist.Total;
90 BlockMass DitheringDistributer::takeMass(uint32_t Weight) {
91 assert(Weight && "invalid weight");
92 assert(Weight <= RemWeight);
93 BlockMass Mass = RemMass * BranchProbability(Weight, RemWeight);
95 // Decrement totals (dither).
101 void Distribution::add(const BlockNode &Node, uint64_t Amount,
102 Weight::DistType Type) {
103 assert(Amount && "invalid weight of 0");
104 uint64_t NewTotal = Total + Amount;
106 // Check for overflow. It should be impossible to overflow twice.
107 bool IsOverflow = NewTotal < Total;
108 assert(!(DidOverflow && IsOverflow) && "unexpected repeated overflow");
109 DidOverflow |= IsOverflow;
115 Weights.push_back(Weight(Type, Node, Amount));
118 static void combineWeight(Weight &W, const Weight &OtherW) {
119 assert(OtherW.TargetNode.isValid());
124 assert(W.Type == OtherW.Type);
125 assert(W.TargetNode == OtherW.TargetNode);
126 assert(W.Amount < W.Amount + OtherW.Amount && "Unexpected overflow");
127 W.Amount += OtherW.Amount;
129 static void combineWeightsBySorting(WeightList &Weights) {
130 // Sort so edges to the same node are adjacent.
131 std::sort(Weights.begin(), Weights.end(),
133 const Weight &R) { return L.TargetNode < R.TargetNode; });
135 // Combine adjacent edges.
136 WeightList::iterator O = Weights.begin();
137 for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E;
141 // Find the adjacent weights to the same node.
142 for (++L; L != E && I->TargetNode == L->TargetNode; ++L)
143 combineWeight(*O, *L);
146 // Erase extra entries.
147 Weights.erase(O, Weights.end());
150 static void combineWeightsByHashing(WeightList &Weights) {
151 // Collect weights into a DenseMap.
152 typedef DenseMap<BlockNode::IndexType, Weight> HashTable;
153 HashTable Combined(NextPowerOf2(2 * Weights.size()));
154 for (const Weight &W : Weights)
155 combineWeight(Combined[W.TargetNode.Index], W);
157 // Check whether anything changed.
158 if (Weights.size() == Combined.size())
161 // Fill in the new weights.
163 Weights.reserve(Combined.size());
164 for (const auto &I : Combined)
165 Weights.push_back(I.second);
167 static void combineWeights(WeightList &Weights) {
168 // Use a hash table for many successors to keep this linear.
169 if (Weights.size() > 128) {
170 combineWeightsByHashing(Weights);
174 combineWeightsBySorting(Weights);
176 static uint64_t shiftRightAndRound(uint64_t N, int Shift) {
181 return (N >> Shift) + (UINT64_C(1) & N >> (Shift - 1));
183 void Distribution::normalize() {
184 // Early exit for termination nodes.
188 // Only bother if there are multiple successors.
189 if (Weights.size() > 1)
190 combineWeights(Weights);
192 // Early exit when combined into a single successor.
193 if (Weights.size() == 1) {
195 Weights.front().Amount = 1;
199 // Determine how much to shift right so that the total fits into 32-bits.
201 // If we shift at all, shift by 1 extra. Otherwise, the lower limit of 1
202 // for each weight can cause a 32-bit overflow.
206 else if (Total > UINT32_MAX)
207 Shift = 33 - countLeadingZeros(Total);
209 // Early exit if nothing needs to be scaled.
213 // Recompute the total through accumulation (rather than shifting it) so that
214 // it's accurate after shifting.
217 // Sum the weights to each node and shift right if necessary.
218 for (Weight &W : Weights) {
219 // Scale down below UINT32_MAX. Since Shift is larger than necessary, we
220 // can round here without concern about overflow.
221 assert(W.TargetNode.isValid());
222 W.Amount = std::max(UINT64_C(1), shiftRightAndRound(W.Amount, Shift));
223 assert(W.Amount <= UINT32_MAX);
228 assert(Total <= UINT32_MAX);
231 void BlockFrequencyInfoImplBase::clear() {
232 // Swap with a default-constructed std::vector, since std::vector<>::clear()
233 // does not actually clear heap storage.
234 std::vector<FrequencyData>().swap(Freqs);
235 std::vector<WorkingData>().swap(Working);
239 /// \brief Clear all memory not needed downstream.
241 /// Releases all memory not used downstream. In particular, saves Freqs.
242 static void cleanup(BlockFrequencyInfoImplBase &BFI) {
243 std::vector<FrequencyData> SavedFreqs(std::move(BFI.Freqs));
245 BFI.Freqs = std::move(SavedFreqs);
248 bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
249 const LoopData *OuterLoop,
250 const BlockNode &Pred,
251 const BlockNode &Succ,
256 auto isLoopHeader = [&OuterLoop](const BlockNode &Node) {
257 return OuterLoop && OuterLoop->isHeader(Node);
260 BlockNode Resolved = Working[Succ.Index].getResolvedNode();
263 auto debugSuccessor = [&](const char *Type) {
265 << " [" << Type << "] weight = " << Weight;
266 if (!isLoopHeader(Resolved))
267 dbgs() << ", succ = " << getBlockName(Succ);
268 if (Resolved != Succ)
269 dbgs() << ", resolved = " << getBlockName(Resolved);
272 (void)debugSuccessor;
275 if (isLoopHeader(Resolved)) {
276 DEBUG(debugSuccessor("backedge"));
277 Dist.addBackedge(OuterLoop->getHeader(), Weight);
281 if (Working[Resolved.Index].getContainingLoop() != OuterLoop) {
282 DEBUG(debugSuccessor(" exit "));
283 Dist.addExit(Resolved, Weight);
287 if (Resolved < Pred) {
288 if (!isLoopHeader(Pred)) {
289 // If OuterLoop is an irreducible loop, we can't actually handle this.
290 assert((!OuterLoop || !OuterLoop->isIrreducible()) &&
291 "unhandled irreducible control flow");
293 // Irreducible backedge. Abort.
294 DEBUG(debugSuccessor("abort!!!"));
298 // If "Pred" is a loop header, then this isn't really a backedge; rather,
299 // OuterLoop must be irreducible. These false backedges can come only from
300 // secondary loop headers.
301 assert(OuterLoop && OuterLoop->isIrreducible() && !isLoopHeader(Resolved) &&
302 "unhandled irreducible control flow");
305 DEBUG(debugSuccessor(" local "));
306 Dist.addLocal(Resolved, Weight);
310 bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist(
311 const LoopData *OuterLoop, LoopData &Loop, Distribution &Dist) {
312 // Copy the exit map into Dist.
313 for (const auto &I : Loop.Exits)
314 if (!addToDist(Dist, OuterLoop, Loop.getHeader(), I.first,
316 // Irreducible backedge.
322 /// \brief Get the maximum allowed loop scale.
324 /// Gives the maximum number of estimated iterations allowed for a loop. Very
325 /// large numbers cause problems downstream (even within 64-bits).
326 static Scaled64 getMaxLoopScale() { return Scaled64(1, 12); }
328 /// \brief Compute the loop scale for a loop.
329 void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
330 // Compute loop scale.
331 DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n");
333 // LoopScale == 1 / ExitMass
334 // ExitMass == HeadMass - BackedgeMass
335 BlockMass ExitMass = BlockMass::getFull() - Loop.BackedgeMass;
337 // Block scale stores the inverse of the scale.
338 Loop.Scale = ExitMass.toScaled().inverse();
340 DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull()
341 << " - " << Loop.BackedgeMass << ")\n"
342 << " - scale = " << Loop.Scale << "\n");
344 if (Loop.Scale > getMaxLoopScale()) {
345 Loop.Scale = getMaxLoopScale();
346 DEBUG(dbgs() << " - reduced-to-max-scale: " << getMaxLoopScale() << "\n");
350 /// \brief Package up a loop.
351 void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) {
352 DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n");
354 // Clear the subloop exits to prevent quadratic memory usage.
355 for (const BlockNode &M : Loop.Nodes) {
356 if (auto *Loop = Working[M.Index].getPackagedLoop())
358 DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n");
360 Loop.IsPackaged = true;
363 void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
365 Distribution &Dist) {
366 BlockMass Mass = Working[Source.Index].getMass();
367 DEBUG(dbgs() << " => mass: " << Mass << "\n");
369 // Distribute mass to successors as laid out in Dist.
370 DitheringDistributer D(Dist, Mass);
373 auto debugAssign = [&](const BlockNode &T, const BlockMass &M,
375 dbgs() << " => assign " << M << " (" << D.RemMass << ")";
377 dbgs() << " [" << Desc << "]";
379 dbgs() << " to " << getBlockName(T);
385 for (const Weight &W : Dist.Weights) {
386 // Check for a local edge (non-backedge and non-exit).
387 BlockMass Taken = D.takeMass(W.Amount);
388 if (W.Type == Weight::Local) {
389 Working[W.TargetNode.Index].getMass() += Taken;
390 DEBUG(debugAssign(W.TargetNode, Taken, nullptr));
394 // Backedges and exits only make sense if we're processing a loop.
395 assert(OuterLoop && "backedge or exit outside of loop");
397 // Check for a backedge.
398 if (W.Type == Weight::Backedge) {
399 OuterLoop->BackedgeMass += Taken;
400 DEBUG(debugAssign(BlockNode(), Taken, "back"));
404 // This must be an exit.
405 assert(W.Type == Weight::Exit);
406 OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken));
407 DEBUG(debugAssign(W.TargetNode, Taken, "exit"));
411 static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI,
412 const Scaled64 &Min, const Scaled64 &Max) {
413 // Scale the Factor to a size that creates integers. Ideally, integers would
414 // be scaled so that Max == UINT64_MAX so that they can be best
415 // differentiated. However, the register allocator currently deals poorly
416 // with large numbers. Instead, push Min up a little from 1 to give some
417 // room to differentiate small, unequal numbers.
419 // TODO: fix issues downstream so that ScalingFactor can be
420 // Scaled64(1,64)/Max.
421 Scaled64 ScalingFactor = Min.inverse();
422 if ((Max / Min).lg() < 60)
425 // Translate the floats to integers.
426 DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max
427 << ", factor = " << ScalingFactor << "\n");
428 for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) {
429 Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor;
430 BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>());
431 DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = "
432 << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled
433 << ", int = " << BFI.Freqs[Index].Integer << "\n");
437 /// \brief Unwrap a loop package.
439 /// Visits all the members of a loop, adjusting their BlockData according to
440 /// the loop's pseudo-node.
441 static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) {
442 DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop)
443 << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale
445 Loop.Scale *= Loop.Mass.toScaled();
446 Loop.IsPackaged = false;
447 DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n");
449 // Propagate the head scale through the loop. Since members are visited in
450 // RPO, the head scale will be updated by the loop scale first, and then the
451 // final head scale will be used for updated the rest of the members.
452 for (const BlockNode &N : Loop.Nodes) {
453 const auto &Working = BFI.Working[N.Index];
454 Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale
455 : BFI.Freqs[N.Index].Scaled;
456 Scaled64 New = Loop.Scale * F;
457 DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New
463 void BlockFrequencyInfoImplBase::unwrapLoops() {
464 // Set initial frequencies from loop-local masses.
465 for (size_t Index = 0; Index < Working.size(); ++Index)
466 Freqs[Index].Scaled = Working[Index].Mass.toScaled();
468 for (LoopData &Loop : Loops)
469 unwrapLoop(*this, Loop);
472 void BlockFrequencyInfoImplBase::finalizeMetrics() {
473 // Unwrap loop packages in reverse post-order, tracking min and max
475 auto Min = Scaled64::getLargest();
476 auto Max = Scaled64::getZero();
477 for (size_t Index = 0; Index < Working.size(); ++Index) {
478 // Update min/max scale.
479 Min = std::min(Min, Freqs[Index].Scaled);
480 Max = std::max(Max, Freqs[Index].Scaled);
483 // Convert to integers.
484 convertFloatingToInteger(*this, Min, Max);
486 // Clean up data structures.
489 // Print out the final stats.
494 BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const {
497 return Freqs[Node.Index].Integer;
500 BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const {
502 return Scaled64::getZero();
503 return Freqs[Node.Index].Scaled;
507 BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const {
508 return std::string();
511 BlockFrequencyInfoImplBase::getLoopName(const LoopData &Loop) const {
512 return getBlockName(Loop.getHeader()) + (Loop.isIrreducible() ? "**" : "*");
516 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
517 const BlockNode &Node) const {
518 return OS << getFloatingBlockFreq(Node);
522 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
523 const BlockFrequency &Freq) const {
524 Scaled64 Block(Freq.getFrequency(), 0);
525 Scaled64 Entry(getEntryFreq(), 0);
527 return OS << Block / Entry;
530 void IrreducibleGraph::addNodesInLoop(const BFIBase::LoopData &OuterLoop) {
531 Start = OuterLoop.getHeader();
532 Nodes.reserve(OuterLoop.Nodes.size());
533 for (auto N : OuterLoop.Nodes)
537 void IrreducibleGraph::addNodesInFunction() {
539 for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
540 if (!BFI.Working[Index].isPackaged())
544 void IrreducibleGraph::indexNodes() {
545 for (auto &I : Nodes)
546 Lookup[I.Node.Index] = &I;
548 void IrreducibleGraph::addEdge(IrrNode &Irr, const BlockNode &Succ,
549 const BFIBase::LoopData *OuterLoop) {
550 if (OuterLoop && OuterLoop->isHeader(Succ))
552 auto L = Lookup.find(Succ.Index);
553 if (L == Lookup.end())
555 IrrNode &SuccIrr = *L->second;
556 Irr.Edges.push_back(&SuccIrr);
557 SuccIrr.Edges.push_front(&Irr);
562 template <> struct GraphTraits<IrreducibleGraph> {
563 typedef bfi_detail::IrreducibleGraph GraphT;
565 typedef const GraphT::IrrNode NodeType;
566 typedef GraphT::IrrNode::iterator ChildIteratorType;
568 static const NodeType *getEntryNode(const GraphT &G) {
571 static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); }
572 static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); }
576 /// \brief Find extra irreducible headers.
578 /// Find entry blocks and other blocks with backedges, which exist when \c G
579 /// contains irreducible sub-SCCs.
580 static void findIrreducibleHeaders(
581 const BlockFrequencyInfoImplBase &BFI,
582 const IrreducibleGraph &G,
583 const std::vector<const IrreducibleGraph::IrrNode *> &SCC,
584 LoopData::NodeList &Headers, LoopData::NodeList &Others) {
585 // Map from nodes in the SCC to whether it's an entry block.
586 SmallDenseMap<const IrreducibleGraph::IrrNode *, bool, 8> InSCC;
588 // InSCC also acts the set of nodes in the graph. Seed it.
589 for (const auto *I : SCC)
592 for (auto I = InSCC.begin(), E = InSCC.end(); I != E; ++I) {
593 auto &Irr = *I->first;
594 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) {
598 // This is an entry block.
600 Headers.push_back(Irr.Node);
601 DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n");
605 assert(Headers.size() >= 2 && "Should be irreducible");
606 if (Headers.size() == InSCC.size()) {
607 // Every block is a header.
608 std::sort(Headers.begin(), Headers.end());
612 // Look for extra headers from irreducible sub-SCCs.
613 for (const auto &I : InSCC) {
614 // Entry blocks are already headers.
618 auto &Irr = *I.first;
619 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) {
620 // Skip forward edges.
621 if (P->Node < Irr.Node)
624 // Skip predecessors from entry blocks. These can have inverted
629 // Store the extra header.
630 Headers.push_back(Irr.Node);
631 DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n");
634 if (Headers.back() == Irr.Node)
635 // Added this as a header.
638 // This is not a header.
639 Others.push_back(Irr.Node);
640 DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n");
642 std::sort(Headers.begin(), Headers.end());
643 std::sort(Others.begin(), Others.end());
646 static void createIrreducibleLoop(
647 BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G,
648 LoopData *OuterLoop, std::list<LoopData>::iterator Insert,
649 const std::vector<const IrreducibleGraph::IrrNode *> &SCC) {
650 // Translate the SCC into RPO.
651 DEBUG(dbgs() << " - found-scc\n");
653 LoopData::NodeList Headers;
654 LoopData::NodeList Others;
655 findIrreducibleHeaders(BFI, G, SCC, Headers, Others);
657 auto Loop = BFI.Loops.emplace(Insert, OuterLoop, Headers.begin(),
658 Headers.end(), Others.begin(), Others.end());
660 // Update loop hierarchy.
661 for (const auto &N : Loop->Nodes)
662 if (BFI.Working[N.Index].isLoopHeader())
663 BFI.Working[N.Index].Loop->Parent = &*Loop;
665 BFI.Working[N.Index].Loop = &*Loop;
668 iterator_range<std::list<LoopData>::iterator>
669 BlockFrequencyInfoImplBase::analyzeIrreducible(
670 const IrreducibleGraph &G, LoopData *OuterLoop,
671 std::list<LoopData>::iterator Insert) {
672 assert((OuterLoop == nullptr) == (Insert == Loops.begin()));
673 auto Prev = OuterLoop ? std::prev(Insert) : Loops.end();
675 for (auto I = scc_begin(G); !I.isAtEnd(); ++I) {
679 // Translate the SCC into RPO.
680 createIrreducibleLoop(*this, G, OuterLoop, Insert, *I);
684 return make_range(std::next(Prev), Insert);
685 return make_range(Loops.begin(), Insert);
689 BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) {
690 OuterLoop.Exits.clear();
691 OuterLoop.BackedgeMass = BlockMass::getEmpty();
692 auto O = OuterLoop.Nodes.begin() + 1;
693 for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I)
694 if (!Working[I->Index].isPackaged())
696 OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end());