1 //==- BlockFrequencyInfoImpl.h - Block Frequency Implementation -*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Shared implementation of BlockFrequency for IR and Machine Instructions.
11 // See the documentation below for BlockFrequencyInfoImpl for details.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
16 #define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/PostOrderIterator.h"
20 #include "llvm/ADT/SCCIterator.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/Support/BlockFrequency.h"
24 #include "llvm/Support/BranchProbability.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/raw_ostream.h"
31 #define DEBUG_TYPE "block-freq"
33 //===----------------------------------------------------------------------===//
35 // UnsignedFloat definition.
37 // TODO: Make this private to BlockFrequencyInfoImpl or delete.
39 //===----------------------------------------------------------------------===//
42 class UnsignedFloatBase {
44 static const int32_t MaxExponent = 16383;
45 static const int32_t MinExponent = -16382;
46 static const int DefaultPrecision = 10;
48 static void dump(uint64_t D, int16_t E, int Width);
49 static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
51 static std::string toString(uint64_t D, int16_t E, int Width,
53 static int countLeadingZeros32(uint32_t N) { return countLeadingZeros(N); }
54 static int countLeadingZeros64(uint64_t N) { return countLeadingZeros(N); }
55 static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
57 static std::pair<uint64_t, bool> splitSigned(int64_t N) {
59 return std::make_pair(N, false);
60 uint64_t Unsigned = N == INT64_MIN ? UINT64_C(1) << 63 : uint64_t(-N);
61 return std::make_pair(Unsigned, true);
63 static int64_t joinSigned(uint64_t U, bool IsNeg) {
64 if (U > uint64_t(INT64_MAX))
65 return IsNeg ? INT64_MIN : INT64_MAX;
66 return IsNeg ? -int64_t(U) : int64_t(U);
69 static int32_t extractLg(const std::pair<int32_t, int> &Lg) {
72 static int32_t extractLgFloor(const std::pair<int32_t, int> &Lg) {
73 return Lg.first - (Lg.second > 0);
75 static int32_t extractLgCeiling(const std::pair<int32_t, int> &Lg) {
76 return Lg.first + (Lg.second < 0);
79 static std::pair<uint64_t, int16_t> divide64(uint64_t L, uint64_t R);
80 static std::pair<uint64_t, int16_t> multiply64(uint64_t L, uint64_t R);
82 static int compare(uint64_t L, uint64_t R, int Shift) {
86 uint64_t L_adjusted = L >> Shift;
92 return L > L_adjusted << Shift ? 1 : 0;
96 /// \brief Simple representation of an unsigned floating point.
98 /// UnsignedFloat is a unsigned floating point number. It uses simple
99 /// saturation arithmetic, and every operation is well-defined for every value.
101 /// The number is split into a signed exponent and unsigned digits. The number
102 /// represented is \c getDigits()*2^getExponent(). In this way, the digits are
103 /// much like the mantissa in the x87 long double, but there is no canonical
104 /// form, so the same number can be represented by many bit representations
105 /// (it's always in "denormal" mode).
107 /// UnsignedFloat is templated on the underlying integer type for digits, which
108 /// is expected to be one of uint64_t, uint32_t, uint16_t or uint8_t.
110 /// Unlike builtin floating point types, UnsignedFloat is portable.
112 /// Unlike APFloat, UnsignedFloat does not model architecture floating point
113 /// behaviour (this should make it a little faster), and implements most
114 /// operators (this makes it usable).
116 /// UnsignedFloat is totally ordered. However, there is no canonical form, so
117 /// there are multiple representations of most scalars. E.g.:
119 /// UnsignedFloat(8u, 0) == UnsignedFloat(4u, 1)
120 /// UnsignedFloat(4u, 1) == UnsignedFloat(2u, 2)
121 /// UnsignedFloat(2u, 2) == UnsignedFloat(1u, 3)
123 /// UnsignedFloat implements most arithmetic operations. Precision is kept
124 /// where possible. Uses simple saturation arithmetic, so that operations
125 /// saturate to 0.0 or getLargest() rather than under or overflowing. It has
126 /// some extra arithmetic for unit inversion. 0.0/0.0 is defined to be 0.0.
127 /// Any other division by 0.0 is defined to be getLargest().
129 /// As a convenience for modifying the exponent, left and right shifting are
130 /// both implemented, and both interpret negative shifts as positive shifts in
131 /// the opposite direction.
133 /// Exponents are limited to the range accepted by x87 long double. This makes
134 /// it trivial to add functionality to convert to APFloat (this is already
135 /// relied on for the implementation of printing).
137 /// The current plan is to gut this and make the necessary parts of it (even
138 /// more) private to BlockFrequencyInfo.
139 template <class DigitsT> class UnsignedFloat : UnsignedFloatBase {
141 static_assert(!std::numeric_limits<DigitsT>::is_signed,
142 "only unsigned floats supported");
144 typedef DigitsT DigitsType;
147 typedef std::numeric_limits<DigitsType> DigitsLimits;
149 static const int Width = sizeof(DigitsType) * 8;
150 static_assert(Width <= 64, "invalid integer width for digits");
157 UnsignedFloat() : Digits(0), Exponent(0) {}
159 UnsignedFloat(DigitsType Digits, int16_t Exponent)
160 : Digits(Digits), Exponent(Exponent) {}
163 UnsignedFloat(const std::pair<uint64_t, int16_t> &X)
164 : Digits(X.first), Exponent(X.second) {}
167 static UnsignedFloat getZero() { return UnsignedFloat(0, 0); }
168 static UnsignedFloat getOne() { return UnsignedFloat(1, 0); }
169 static UnsignedFloat getLargest() {
170 return UnsignedFloat(DigitsLimits::max(), MaxExponent);
172 static UnsignedFloat getFloat(uint64_t N) { return adjustToWidth(N, 0); }
173 static UnsignedFloat getInverseFloat(uint64_t N) {
174 return getFloat(N).invert();
176 static UnsignedFloat getFraction(DigitsType N, DigitsType D) {
177 return getQuotient(N, D);
180 int16_t getExponent() const { return Exponent; }
181 DigitsType getDigits() const { return Digits; }
183 /// \brief Convert to the given integer type.
185 /// Convert to \c IntT using simple saturating arithmetic, truncating if
187 template <class IntT> IntT toInt() const;
189 bool isZero() const { return !Digits; }
190 bool isLargest() const { return *this == getLargest(); }
192 if (Exponent > 0 || Exponent <= -Width)
194 return Digits == DigitsType(1) << -Exponent;
197 /// \brief The log base 2, rounded.
199 /// Get the lg of the scalar. lg 0 is defined to be INT32_MIN.
200 int32_t lg() const { return extractLg(lgImpl()); }
202 /// \brief The log base 2, rounded towards INT32_MIN.
204 /// Get the lg floor. lg 0 is defined to be INT32_MIN.
205 int32_t lgFloor() const { return extractLgFloor(lgImpl()); }
207 /// \brief The log base 2, rounded towards INT32_MAX.
209 /// Get the lg ceiling. lg 0 is defined to be INT32_MIN.
210 int32_t lgCeiling() const { return extractLgCeiling(lgImpl()); }
212 bool operator==(const UnsignedFloat &X) const { return compare(X) == 0; }
213 bool operator<(const UnsignedFloat &X) const { return compare(X) < 0; }
214 bool operator!=(const UnsignedFloat &X) const { return compare(X) != 0; }
215 bool operator>(const UnsignedFloat &X) const { return compare(X) > 0; }
216 bool operator<=(const UnsignedFloat &X) const { return compare(X) <= 0; }
217 bool operator>=(const UnsignedFloat &X) const { return compare(X) >= 0; }
219 bool operator!() const { return isZero(); }
221 /// \brief Convert to a decimal representation in a string.
223 /// Convert to a string. Uses scientific notation for very large/small
224 /// numbers. Scientific notation is used roughly for numbers outside of the
225 /// range 2^-64 through 2^64.
227 /// \c Precision indicates the number of decimal digits of precision to use;
228 /// 0 requests the maximum available.
230 /// As a special case to make debugging easier, if the number is small enough
231 /// to convert without scientific notation and has more than \c Precision
232 /// digits before the decimal place, it's printed accurately to the first
233 /// digit past zero. E.g., assuming 10 digits of precision:
235 /// 98765432198.7654... => 98765432198.8
236 /// 8765432198.7654... => 8765432198.8
237 /// 765432198.7654... => 765432198.8
238 /// 65432198.7654... => 65432198.77
239 /// 5432198.7654... => 5432198.765
240 std::string toString(unsigned Precision = DefaultPrecision) {
241 return UnsignedFloatBase::toString(Digits, Exponent, Width, Precision);
244 /// \brief Print a decimal representation.
246 /// Print a string. See toString for documentation.
247 raw_ostream &print(raw_ostream &OS,
248 unsigned Precision = DefaultPrecision) const {
249 return UnsignedFloatBase::print(OS, Digits, Exponent, Width, Precision);
251 void dump() const { return UnsignedFloatBase::dump(Digits, Exponent, Width); }
253 UnsignedFloat &operator+=(const UnsignedFloat &X);
254 UnsignedFloat &operator-=(const UnsignedFloat &X);
255 UnsignedFloat &operator*=(const UnsignedFloat &X);
256 UnsignedFloat &operator/=(const UnsignedFloat &X);
257 UnsignedFloat &operator<<=(int16_t Shift) { shiftLeft(Shift); return *this; }
258 UnsignedFloat &operator>>=(int16_t Shift) { shiftRight(Shift); return *this; }
261 void shiftLeft(int32_t Shift);
262 void shiftRight(int32_t Shift);
264 /// \brief Adjust two floats to have matching exponents.
266 /// Adjust \c this and \c X to have matching exponents. Returns the new \c X
267 /// by value. Does nothing if \a isZero() for either.
269 /// The value that compares smaller will lose precision, and possibly become
271 UnsignedFloat matchExponents(UnsignedFloat X);
273 /// \brief Increase exponent to match another float.
275 /// Increases \c this to have an exponent matching \c X. May decrease the
276 /// exponent of \c X in the process, and \c this may possibly become \a
278 void increaseExponentToMatch(UnsignedFloat &X, int32_t ExponentDiff);
281 /// \brief Scale a large number accurately.
283 /// Scale N (multiply it by this). Uses full precision multiplication, even
284 /// if Width is smaller than 64, so information is not lost.
285 uint64_t scale(uint64_t N) const;
286 uint64_t scaleByInverse(uint64_t N) const {
287 // TODO: implement directly, rather than relying on inverse. Inverse is
289 return inverse().scale(N);
291 int64_t scale(int64_t N) const {
292 std::pair<uint64_t, bool> Unsigned = splitSigned(N);
293 return joinSigned(scale(Unsigned.first), Unsigned.second);
295 int64_t scaleByInverse(int64_t N) const {
296 std::pair<uint64_t, bool> Unsigned = splitSigned(N);
297 return joinSigned(scaleByInverse(Unsigned.first), Unsigned.second);
300 int compare(const UnsignedFloat &X) const;
301 int compareTo(uint64_t N) const {
302 UnsignedFloat Float = getFloat(N);
303 int Compare = compare(Float);
304 if (Width == 64 || Compare != 0)
307 // Check for precision loss. We know *this == RoundTrip.
308 uint64_t RoundTrip = Float.template toInt<uint64_t>();
309 return N == RoundTrip ? 0 : RoundTrip < N ? -1 : 1;
311 int compareTo(int64_t N) const { return N < 0 ? 1 : compareTo(uint64_t(N)); }
313 UnsignedFloat &invert() { return *this = UnsignedFloat::getFloat(1) / *this; }
314 UnsignedFloat inverse() const { return UnsignedFloat(*this).invert(); }
317 static UnsignedFloat getProduct(DigitsType L, DigitsType R);
318 static UnsignedFloat getQuotient(DigitsType Dividend, DigitsType Divisor);
320 std::pair<int32_t, int> lgImpl() const;
321 static int countLeadingZerosWidth(DigitsType Digits) {
323 return countLeadingZeros64(Digits);
325 return countLeadingZeros32(Digits);
326 return countLeadingZeros32(Digits) + Width - 32;
329 static UnsignedFloat adjustToWidth(uint64_t N, int32_t S) {
330 assert(S >= MinExponent);
331 assert(S <= MaxExponent);
332 if (Width == 64 || N <= DigitsLimits::max())
333 return UnsignedFloat(N, S);
336 int Shift = 64 - Width - countLeadingZeros64(N);
337 DigitsType Shifted = N >> Shift;
340 assert(S + Shift <= MaxExponent);
341 return getRounded(UnsignedFloat(Shifted, S + Shift),
342 N & UINT64_C(1) << (Shift - 1));
345 static UnsignedFloat getRounded(UnsignedFloat P, bool Round) {
348 if (P.Digits == DigitsLimits::max())
349 // Careful of overflow in the exponent.
350 return UnsignedFloat(1, P.Exponent) <<= Width;
351 return UnsignedFloat(P.Digits + 1, P.Exponent);
355 #define UNSIGNED_FLOAT_BOP(op, base) \
356 template <class DigitsT> \
357 UnsignedFloat<DigitsT> operator op(const UnsignedFloat<DigitsT> &L, \
358 const UnsignedFloat<DigitsT> &R) { \
359 return UnsignedFloat<DigitsT>(L) base R; \
361 UNSIGNED_FLOAT_BOP(+, += )
362 UNSIGNED_FLOAT_BOP(-, -= )
363 UNSIGNED_FLOAT_BOP(*, *= )
364 UNSIGNED_FLOAT_BOP(/, /= )
365 UNSIGNED_FLOAT_BOP(<<, <<= )
366 UNSIGNED_FLOAT_BOP(>>, >>= )
367 #undef UNSIGNED_FLOAT_BOP
369 template <class DigitsT>
370 raw_ostream &operator<<(raw_ostream &OS, const UnsignedFloat<DigitsT> &X) {
371 return X.print(OS, 10);
374 #define UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, T1, T2) \
375 template <class DigitsT> \
376 bool operator op(const UnsignedFloat<DigitsT> &L, T1 R) { \
377 return L.compareTo(T2(R)) op 0; \
379 template <class DigitsT> \
380 bool operator op(T1 L, const UnsignedFloat<DigitsT> &R) { \
381 return 0 op R.compareTo(T2(L)); \
383 #define UNSIGNED_FLOAT_COMPARE_TO(op) \
384 UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, uint64_t, uint64_t) \
385 UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, uint32_t, uint64_t) \
386 UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, int64_t, int64_t) \
387 UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, int32_t, int64_t)
388 UNSIGNED_FLOAT_COMPARE_TO(< )
389 UNSIGNED_FLOAT_COMPARE_TO(> )
390 UNSIGNED_FLOAT_COMPARE_TO(== )
391 UNSIGNED_FLOAT_COMPARE_TO(!= )
392 UNSIGNED_FLOAT_COMPARE_TO(<= )
393 UNSIGNED_FLOAT_COMPARE_TO(>= )
394 #undef UNSIGNED_FLOAT_COMPARE_TO
395 #undef UNSIGNED_FLOAT_COMPARE_TO_TYPE
397 template <class DigitsT>
398 uint64_t UnsignedFloat<DigitsT>::scale(uint64_t N) const {
399 if (Width == 64 || N <= DigitsLimits::max())
400 return (getFloat(N) * *this).template toInt<uint64_t>();
402 // Defer to the 64-bit version.
403 return UnsignedFloat<uint64_t>(Digits, Exponent).scale(N);
406 template <class DigitsT>
407 UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::getProduct(DigitsType L,
413 // Check for numbers that we can compute with 64-bit math.
414 if (Width <= 32 || (L <= UINT32_MAX && R <= UINT32_MAX))
415 return adjustToWidth(uint64_t(L) * uint64_t(R), 0);
417 // Do the full thing.
418 return UnsignedFloat(multiply64(L, R));
420 template <class DigitsT>
421 UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::getQuotient(DigitsType Dividend,
422 DigitsType Divisor) {
430 return UnsignedFloat(divide64(Dividend, Divisor));
432 // We can compute this with 64-bit math.
433 int Shift = countLeadingZeros64(Dividend);
434 uint64_t Shifted = uint64_t(Dividend) << Shift;
435 uint64_t Quotient = Shifted / Divisor;
437 // If Quotient needs to be shifted, then adjustToWidth will round.
438 if (Quotient > DigitsLimits::max())
439 return adjustToWidth(Quotient, -Shift);
441 // Round based on the value of the next bit.
442 return getRounded(UnsignedFloat(Quotient, -Shift),
443 Shifted % Divisor >= getHalf(Divisor));
446 template <class DigitsT>
447 template <class IntT>
448 IntT UnsignedFloat<DigitsT>::toInt() const {
449 typedef std::numeric_limits<IntT> Limits;
452 if (*this >= Limits::max())
453 return Limits::max();
457 assert(size_t(Exponent) < sizeof(IntT) * 8);
458 return N << Exponent;
461 assert(size_t(-Exponent) < sizeof(IntT) * 8);
462 return N >> -Exponent;
467 template <class DigitsT>
468 std::pair<int32_t, int> UnsignedFloat<DigitsT>::lgImpl() const {
470 return std::make_pair(INT32_MIN, 0);
472 // Get the floor of the lg of Digits.
473 int32_t LocalFloor = Width - countLeadingZerosWidth(Digits) - 1;
475 // Get the floor of the lg of this.
476 int32_t Floor = Exponent + LocalFloor;
477 if (Digits == UINT64_C(1) << LocalFloor)
478 return std::make_pair(Floor, 0);
480 // Round based on the next digit.
481 assert(LocalFloor >= 1);
482 bool Round = Digits & UINT64_C(1) << (LocalFloor - 1);
483 return std::make_pair(Floor + Round, Round ? 1 : -1);
486 template <class DigitsT>
487 UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::matchExponents(UnsignedFloat X) {
488 if (isZero() || X.isZero() || Exponent == X.Exponent)
491 int32_t Diff = int32_t(X.Exponent) - int32_t(Exponent);
493 increaseExponentToMatch(X, Diff);
495 X.increaseExponentToMatch(*this, -Diff);
498 template <class DigitsT>
499 void UnsignedFloat<DigitsT>::increaseExponentToMatch(UnsignedFloat &X,
500 int32_t ExponentDiff) {
501 assert(ExponentDiff > 0);
502 if (ExponentDiff >= 2 * Width) {
507 // Use up any leading zeros on X, and then shift this.
508 int32_t ShiftX = std::min(countLeadingZerosWidth(X.Digits), ExponentDiff);
509 assert(ShiftX < Width);
511 int32_t ShiftThis = ExponentDiff - ShiftX;
512 if (ShiftThis >= Width) {
518 X.Exponent -= ShiftX;
519 Digits >>= ShiftThis;
520 Exponent += ShiftThis;
524 template <class DigitsT>
525 UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
526 operator+=(const UnsignedFloat &X) {
527 if (isLargest() || X.isZero())
529 if (isZero() || X.isLargest())
532 // Normalize exponents.
533 UnsignedFloat Scaled = matchExponents(X);
535 // Check for zero again.
537 return *this = Scaled;
542 DigitsType Sum = Digits + Scaled.Digits;
543 bool DidOverflow = Sum < Digits;
548 if (Exponent == MaxExponent)
549 return *this = getLargest();
552 Digits = UINT64_C(1) << (Width - 1) | Digits >> 1;
556 template <class DigitsT>
557 UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
558 operator-=(const UnsignedFloat &X) {
562 return *this = getZero();
564 // Normalize exponents.
565 UnsignedFloat Scaled = matchExponents(X);
566 assert(Digits >= Scaled.Digits);
568 // Compute difference.
569 if (!Scaled.isZero()) {
570 Digits -= Scaled.Digits;
574 // Check if X just barely lost its last bit. E.g., for 32-bit:
576 // 1*2^32 - 1*2^0 == 0xffffffff != 1*2^32
577 if (*this == UnsignedFloat(1, X.lgFloor() + Width)) {
578 Digits = DigitsType(0) - 1;
583 template <class DigitsT>
584 UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
585 operator*=(const UnsignedFloat &X) {
591 // Save the exponents.
592 int32_t Exponents = int32_t(Exponent) + int32_t(X.Exponent);
594 // Get the raw product.
595 *this = getProduct(Digits, X.Digits);
597 // Combine with exponents.
598 return *this <<= Exponents;
600 template <class DigitsT>
601 UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
602 operator/=(const UnsignedFloat &X) {
606 return *this = getLargest();
608 // Save the exponents.
609 int32_t Exponents = int32_t(Exponent) - int32_t(X.Exponent);
611 // Get the raw quotient.
612 *this = getQuotient(Digits, X.Digits);
614 // Combine with exponents.
615 return *this <<= Exponents;
617 template <class DigitsT>
618 void UnsignedFloat<DigitsT>::shiftLeft(int32_t Shift) {
619 if (!Shift || isZero())
621 assert(Shift != INT32_MIN);
627 // Shift as much as we can in the exponent.
628 int32_t ExponentShift = std::min(Shift, MaxExponent - Exponent);
629 Exponent += ExponentShift;
630 if (ExponentShift == Shift)
633 // Check this late, since it's rare.
637 // Shift the digits themselves.
638 Shift -= ExponentShift;
639 if (Shift > countLeadingZerosWidth(Digits)) {
641 *this = getLargest();
649 template <class DigitsT>
650 void UnsignedFloat<DigitsT>::shiftRight(int32_t Shift) {
651 if (!Shift || isZero())
653 assert(Shift != INT32_MIN);
659 // Shift as much as we can in the exponent.
660 int32_t ExponentShift = std::min(Shift, Exponent - MinExponent);
661 Exponent -= ExponentShift;
662 if (ExponentShift == Shift)
665 // Shift the digits themselves.
666 Shift -= ExponentShift;
667 if (Shift >= Width) {
677 template <class DigitsT>
678 int UnsignedFloat<DigitsT>::compare(const UnsignedFloat &X) const {
681 return X.isZero() ? 0 : -1;
685 // Check for the scale. Use lgFloor to be sure that the exponent difference
686 // is always lower than 64.
687 int32_t lgL = lgFloor(), lgR = X.lgFloor();
689 return lgL < lgR ? -1 : 1;
692 if (Exponent < X.Exponent)
693 return UnsignedFloatBase::compare(Digits, X.Digits, X.Exponent - Exponent);
695 return -UnsignedFloatBase::compare(X.Digits, Digits, Exponent - X.Exponent);
698 template <class T> struct isPodLike<UnsignedFloat<T>> {
699 static const bool value = true;
703 //===----------------------------------------------------------------------===//
705 // BlockMass definition.
707 // TODO: Make this private to BlockFrequencyInfoImpl or delete.
709 //===----------------------------------------------------------------------===//
712 /// \brief Mass of a block.
714 /// This class implements a sort of fixed-point fraction always between 0.0 and
715 /// 1.0. getMass() == UINT64_MAX indicates a value of 1.0.
717 /// Masses can be added and subtracted. Simple saturation arithmetic is used,
718 /// so arithmetic operations never overflow or underflow.
720 /// Masses can be multiplied. Multiplication treats full mass as 1.0 and uses
721 /// an inexpensive floating-point algorithm that's off-by-one (almost, but not
722 /// quite, maximum precision).
724 /// Masses can be scaled by \a BranchProbability at maximum precision.
729 BlockMass() : Mass(0) {}
730 explicit BlockMass(uint64_t Mass) : Mass(Mass) {}
732 static BlockMass getEmpty() { return BlockMass(); }
733 static BlockMass getFull() { return BlockMass(UINT64_MAX); }
735 uint64_t getMass() const { return Mass; }
737 bool isFull() const { return Mass == UINT64_MAX; }
738 bool isEmpty() const { return !Mass; }
740 bool operator!() const { return isEmpty(); }
742 /// \brief Add another mass.
744 /// Adds another mass, saturating at \a isFull() rather than overflowing.
745 BlockMass &operator+=(const BlockMass &X) {
746 uint64_t Sum = Mass + X.Mass;
747 Mass = Sum < Mass ? UINT64_MAX : Sum;
751 /// \brief Subtract another mass.
753 /// Subtracts another mass, saturating at \a isEmpty() rather than
755 BlockMass &operator-=(const BlockMass &X) {
756 uint64_t Diff = Mass - X.Mass;
757 Mass = Diff > Mass ? 0 : Diff;
761 /// \brief Multiply by a branch probability.
763 /// Multiply by P. Guarantees full precision.
765 /// This could be naively implemented by multiplying by the numerator and
766 /// dividing by the denominator, but in what order? Multiplying first can
767 /// overflow, while dividing first will lose precision (potentially, changing
768 /// a non-zero mass to zero).
770 /// The implementation mixes the two methods. Since \a BranchProbability
771 /// uses 32-bits and \a BlockMass 64-bits, shift the mass as far to the left
772 /// as there is room, then divide by the denominator to get a quotient.
773 /// Multiplying by the numerator and right shifting gives a first
776 /// Calculate the error in this first approximation by calculating the
777 /// opposite mass (multiply by the opposite numerator and shift) and
778 /// subtracting both from teh original mass.
780 /// Add to the first approximation the correct fraction of this error value.
781 /// This time, multiply first and then divide, since there is no danger of
784 /// \pre P represents a fraction between 0.0 and 1.0.
785 BlockMass &operator*=(const BranchProbability &P);
787 bool operator==(const BlockMass &X) const { return Mass == X.Mass; }
788 bool operator!=(const BlockMass &X) const { return Mass != X.Mass; }
789 bool operator<=(const BlockMass &X) const { return Mass <= X.Mass; }
790 bool operator>=(const BlockMass &X) const { return Mass >= X.Mass; }
791 bool operator<(const BlockMass &X) const { return Mass < X.Mass; }
792 bool operator>(const BlockMass &X) const { return Mass > X.Mass; }
794 /// \brief Convert to floating point.
796 /// Convert to a float. \a isFull() gives 1.0, while \a isEmpty() gives
797 /// slightly above 0.0.
798 UnsignedFloat<uint64_t> toFloat() const;
801 raw_ostream &print(raw_ostream &OS) const;
804 inline BlockMass operator+(const BlockMass &L, const BlockMass &R) {
805 return BlockMass(L) += R;
807 inline BlockMass operator-(const BlockMass &L, const BlockMass &R) {
808 return BlockMass(L) -= R;
810 inline BlockMass operator*(const BlockMass &L, const BranchProbability &R) {
811 return BlockMass(L) *= R;
813 inline BlockMass operator*(const BranchProbability &L, const BlockMass &R) {
814 return BlockMass(R) *= L;
817 inline raw_ostream &operator<<(raw_ostream &OS, const BlockMass &X) {
821 template <> struct isPodLike<BlockMass> {
822 static const bool value = true;
826 //===----------------------------------------------------------------------===//
828 // BlockFrequencyInfoImpl definition.
830 //===----------------------------------------------------------------------===//
834 class BranchProbabilityInfo;
838 class MachineBasicBlock;
839 class MachineBranchProbabilityInfo;
840 class MachineFunction;
842 class MachineLoopInfo;
844 namespace bfi_detail {
845 struct IrreducibleGraph;
847 // This is part of a workaround for a GCC 4.7 crash on lambdas.
848 template <class BT> struct BlockEdgesAdder;
851 /// \brief Base class for BlockFrequencyInfoImpl
853 /// BlockFrequencyInfoImplBase has supporting data structures and some
854 /// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
855 /// the block type (or that call such algorithms) are skipped here.
857 /// Nevertheless, the majority of the overall algorithm documention lives with
858 /// BlockFrequencyInfoImpl. See there for details.
859 class BlockFrequencyInfoImplBase {
861 typedef UnsignedFloat<uint64_t> Float;
863 /// \brief Representative of a block.
865 /// This is a simple wrapper around an index into the reverse-post-order
866 /// traversal of the blocks.
868 /// Unlike a block pointer, its order has meaning (location in the
869 /// topological sort) and it's class is the same regardless of block type.
871 typedef uint32_t IndexType;
874 bool operator==(const BlockNode &X) const { return Index == X.Index; }
875 bool operator!=(const BlockNode &X) const { return Index != X.Index; }
876 bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
877 bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
878 bool operator<(const BlockNode &X) const { return Index < X.Index; }
879 bool operator>(const BlockNode &X) const { return Index > X.Index; }
881 BlockNode() : Index(UINT32_MAX) {}
882 BlockNode(IndexType Index) : Index(Index) {}
884 bool isValid() const { return Index <= getMaxIndex(); }
885 static size_t getMaxIndex() { return UINT32_MAX - 1; }
888 /// \brief Stats about a block itself.
889 struct FrequencyData {
894 /// \brief Data about a loop.
896 /// Contains the data necessary to represent represent a loop as a
897 /// pseudo-node once it's packaged.
899 typedef SmallVector<std::pair<BlockNode, BlockMass>, 4> ExitMap;
900 typedef SmallVector<BlockNode, 4> NodeList;
901 LoopData *Parent; ///< The parent loop.
902 bool IsPackaged; ///< Whether this has been packaged.
903 uint32_t NumHeaders; ///< Number of headers.
904 ExitMap Exits; ///< Successor edges (and weights).
905 NodeList Nodes; ///< Header and the members of the loop.
906 BlockMass BackedgeMass; ///< Mass returned to loop header.
910 LoopData(LoopData *Parent, const BlockNode &Header)
911 : Parent(Parent), IsPackaged(false), NumHeaders(1), Nodes(1, Header) {}
912 template <class It1, class It2>
913 LoopData(LoopData *Parent, It1 FirstHeader, It1 LastHeader, It2 FirstOther,
915 : Parent(Parent), IsPackaged(false), Nodes(FirstHeader, LastHeader) {
916 NumHeaders = Nodes.size();
917 Nodes.insert(Nodes.end(), FirstOther, LastOther);
919 bool isHeader(const BlockNode &Node) const {
921 return std::binary_search(Nodes.begin(), Nodes.begin() + NumHeaders,
923 return Node == Nodes[0];
925 BlockNode getHeader() const { return Nodes[0]; }
926 bool isIrreducible() const { return NumHeaders > 1; }
928 NodeList::const_iterator members_begin() const {
929 return Nodes.begin() + NumHeaders;
931 NodeList::const_iterator members_end() const { return Nodes.end(); }
932 iterator_range<NodeList::const_iterator> members() const {
933 return make_range(members_begin(), members_end());
937 /// \brief Index of loop information.
939 BlockNode Node; ///< This node.
940 LoopData *Loop; ///< The loop this block is inside.
941 BlockMass Mass; ///< Mass distribution from the entry block.
943 WorkingData(const BlockNode &Node) : Node(Node), Loop(nullptr) {}
945 bool isLoopHeader() const { return Loop && Loop->isHeader(Node); }
946 bool isDoubleLoopHeader() const {
947 return isLoopHeader() && Loop->Parent && Loop->Parent->isIrreducible() &&
948 Loop->Parent->isHeader(Node);
951 LoopData *getContainingLoop() const {
954 if (!isDoubleLoopHeader())
956 return Loop->Parent->Parent;
959 /// \brief Resolve a node to its representative.
961 /// Get the node currently representing Node, which could be a containing
964 /// This function should only be called when distributing mass. As long as
965 /// there are no irreducilbe edges to Node, then it will have complexity
966 /// O(1) in this context.
968 /// In general, the complexity is O(L), where L is the number of loop
969 /// headers Node has been packaged into. Since this method is called in
970 /// the context of distributing mass, L will be the number of loop headers
971 /// an early exit edge jumps out of.
972 BlockNode getResolvedNode() const {
973 auto L = getPackagedLoop();
974 return L ? L->getHeader() : Node;
976 LoopData *getPackagedLoop() const {
977 if (!Loop || !Loop->IsPackaged)
980 while (L->Parent && L->Parent->IsPackaged)
985 /// \brief Get the appropriate mass for a node.
987 /// Get appropriate mass for Node. If Node is a loop-header (whose loop
988 /// has been packaged), returns the mass of its pseudo-node. If it's a
989 /// node inside a packaged loop, it returns the loop's mass.
990 BlockMass &getMass() {
993 if (!isADoublePackage())
995 return Loop->Parent->Mass;
998 /// \brief Has ContainingLoop been packaged up?
999 bool isPackaged() const { return getResolvedNode() != Node; }
1000 /// \brief Has Loop been packaged up?
1001 bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
1002 /// \brief Has Loop been packaged up twice?
1003 bool isADoublePackage() const {
1004 return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
1008 /// \brief Unscaled probability weight.
1010 /// Probability weight for an edge in the graph (including the
1011 /// successor/target node).
1013 /// All edges in the original function are 32-bit. However, exit edges from
1014 /// loop packages are taken from 64-bit exit masses, so we need 64-bits of
1015 /// space in general.
1017 /// In addition to the raw weight amount, Weight stores the type of the edge
1018 /// in the current context (i.e., the context of the loop being processed).
1019 /// Is this a local edge within the loop, an exit from the loop, or a
1020 /// backedge to the loop header?
1022 enum DistType { Local, Exit, Backedge };
1024 BlockNode TargetNode;
1026 Weight() : Type(Local), Amount(0) {}
1029 /// \brief Distribution of unscaled probability weight.
1031 /// Distribution of unscaled probability weight to a set of successors.
1033 /// This class collates the successor edge weights for later processing.
1035 /// \a DidOverflow indicates whether \a Total did overflow while adding to
1036 /// the distribution. It should never overflow twice.
1037 struct Distribution {
1038 typedef SmallVector<Weight, 4> WeightList;
1039 WeightList Weights; ///< Individual successor weights.
1040 uint64_t Total; ///< Sum of all weights.
1041 bool DidOverflow; ///< Whether \a Total did overflow.
1043 Distribution() : Total(0), DidOverflow(false) {}
1044 void addLocal(const BlockNode &Node, uint64_t Amount) {
1045 add(Node, Amount, Weight::Local);
1047 void addExit(const BlockNode &Node, uint64_t Amount) {
1048 add(Node, Amount, Weight::Exit);
1050 void addBackedge(const BlockNode &Node, uint64_t Amount) {
1051 add(Node, Amount, Weight::Backedge);
1054 /// \brief Normalize the distribution.
1056 /// Combines multiple edges to the same \a Weight::TargetNode and scales
1057 /// down so that \a Total fits into 32-bits.
1059 /// This is linear in the size of \a Weights. For the vast majority of
1060 /// cases, adjacent edge weights are combined by sorting WeightList and
1061 /// combining adjacent weights. However, for very large edge lists an
1062 /// auxiliary hash table is used.
1066 void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
1069 /// \brief Data about each block. This is used downstream.
1070 std::vector<FrequencyData> Freqs;
1072 /// \brief Loop data: see initializeLoops().
1073 std::vector<WorkingData> Working;
1075 /// \brief Indexed information about loops.
1076 std::list<LoopData> Loops;
1078 /// \brief Add all edges out of a packaged loop to the distribution.
1080 /// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
1083 /// \return \c true unless there's an irreducible backedge.
1084 bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
1085 Distribution &Dist);
1087 /// \brief Add an edge to the distribution.
1089 /// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
1090 /// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
1091 /// every edge should be a local edge (since all the loops are packaged up).
1093 /// \return \c true unless aborted due to an irreducible backedge.
1094 bool addToDist(Distribution &Dist, const LoopData *OuterLoop,
1095 const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);
1097 LoopData &getLoopPackage(const BlockNode &Head) {
1098 assert(Head.Index < Working.size());
1099 assert(Working[Head.Index].isLoopHeader());
1100 return *Working[Head.Index].Loop;
1103 /// \brief Analyze irreducible SCCs.
1105 /// Separate irreducible SCCs from \c G, which is an explict graph of \c
1106 /// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
1107 /// Insert them into \a Loops before \c Insert.
1109 /// \return the \c LoopData nodes representing the irreducible SCCs.
1110 iterator_range<std::list<LoopData>::iterator>
1111 analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
1112 std::list<LoopData>::iterator Insert);
1114 /// \brief Update a loop after packaging irreducible SCCs inside of it.
1116 /// Update \c OuterLoop. Before finding irreducible control flow, it was
1117 /// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
1118 /// LoopData::BackedgeMass need to be reset. Also, nodes that were packaged
1119 /// up need to be removed from \a OuterLoop::Nodes.
1120 void updateLoopWithIrreducible(LoopData &OuterLoop);
1122 /// \brief Distribute mass according to a distribution.
1124 /// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
1125 /// backedges and exits are stored in its entry in Loops.
1127 /// Mass is distributed in parallel from two copies of the source mass.
1128 void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
1129 Distribution &Dist);
1131 /// \brief Compute the loop scale for a loop.
1132 void computeLoopScale(LoopData &Loop);
1134 /// \brief Package up a loop.
1135 void packageLoop(LoopData &Loop);
1137 /// \brief Unwrap loops.
1140 /// \brief Finalize frequency metrics.
1142 /// Calculates final frequencies and cleans up no-longer-needed data
1144 void finalizeMetrics();
1146 /// \brief Clear all memory.
1149 virtual std::string getBlockName(const BlockNode &Node) const;
1150 std::string getLoopName(const LoopData &Loop) const;
1152 virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
1153 void dump() const { print(dbgs()); }
1155 Float getFloatingBlockFreq(const BlockNode &Node) const;
1157 BlockFrequency getBlockFreq(const BlockNode &Node) const;
1159 raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
1160 raw_ostream &printBlockFreq(raw_ostream &OS,
1161 const BlockFrequency &Freq) const;
1163 uint64_t getEntryFreq() const {
1164 assert(!Freqs.empty());
1165 return Freqs[0].Integer;
1167 /// \brief Virtual destructor.
1169 /// Need a virtual destructor to mask the compiler warning about
1171 virtual ~BlockFrequencyInfoImplBase() {}
1174 namespace bfi_detail {
1175 template <class BlockT> struct TypeMap {};
1176 template <> struct TypeMap<BasicBlock> {
1177 typedef BasicBlock BlockT;
1178 typedef Function FunctionT;
1179 typedef BranchProbabilityInfo BranchProbabilityInfoT;
1181 typedef LoopInfo LoopInfoT;
1183 template <> struct TypeMap<MachineBasicBlock> {
1184 typedef MachineBasicBlock BlockT;
1185 typedef MachineFunction FunctionT;
1186 typedef MachineBranchProbabilityInfo BranchProbabilityInfoT;
1187 typedef MachineLoop LoopT;
1188 typedef MachineLoopInfo LoopInfoT;
1191 /// \brief Get the name of a MachineBasicBlock.
1193 /// Get the name of a MachineBasicBlock. It's templated so that including from
1194 /// CodeGen is unnecessary (that would be a layering issue).
1196 /// This is used mainly for debug output. The name is similar to
1197 /// MachineBasicBlock::getFullName(), but skips the name of the function.
1198 template <class BlockT> std::string getBlockName(const BlockT *BB) {
1199 assert(BB && "Unexpected nullptr");
1200 auto MachineName = "BB" + Twine(BB->getNumber());
1201 if (BB->getBasicBlock())
1202 return (MachineName + "[" + BB->getName() + "]").str();
1203 return MachineName.str();
1205 /// \brief Get the name of a BasicBlock.
1206 template <> inline std::string getBlockName(const BasicBlock *BB) {
1207 assert(BB && "Unexpected nullptr");
1208 return BB->getName().str();
1211 /// \brief Graph of irreducible control flow.
1213 /// This graph is used for determining the SCCs in a loop (or top-level
1214 /// function) that has irreducible control flow.
1216 /// During the block frequency algorithm, the local graphs are defined in a
1217 /// light-weight way, deferring to the \a BasicBlock or \a MachineBasicBlock
1218 /// graphs for most edges, but getting others from \a LoopData::ExitMap. The
1219 /// latter only has successor information.
1221 /// \a IrreducibleGraph makes this graph explicit. It's in a form that can use
1222 /// \a GraphTraits (so that \a analyzeIrreducible() can use \a scc_iterator),
1223 /// and it explicitly lists predecessors and successors. The initialization
1224 /// that relies on \c MachineBasicBlock is defined in the header.
1225 struct IrreducibleGraph {
1226 typedef BlockFrequencyInfoImplBase BFIBase;
1230 typedef BFIBase::BlockNode BlockNode;
1234 std::deque<const IrrNode *> Edges;
1235 IrrNode(const BlockNode &Node) : Node(Node), NumIn(0) {}
1237 typedef std::deque<const IrrNode *>::const_iterator iterator;
1238 iterator pred_begin() const { return Edges.begin(); }
1239 iterator succ_begin() const { return Edges.begin() + NumIn; }
1240 iterator pred_end() const { return succ_begin(); }
1241 iterator succ_end() const { return Edges.end(); }
1244 const IrrNode *StartIrr;
1245 std::vector<IrrNode> Nodes;
1246 SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
1248 /// \brief Construct an explicit graph containing irreducible control flow.
1250 /// Construct an explicit graph of the control flow in \c OuterLoop (or the
1251 /// top-level function, if \c OuterLoop is \c nullptr). Uses \c
1252 /// addBlockEdges to add block successors that have not been packaged into
1255 /// \a BlockFrequencyInfoImpl::computeIrreducibleMass() is the only expected
1257 template <class BlockEdgesAdder>
1258 IrreducibleGraph(BFIBase &BFI, const BFIBase::LoopData *OuterLoop,
1259 BlockEdgesAdder addBlockEdges)
1260 : BFI(BFI), StartIrr(nullptr) {
1261 initialize(OuterLoop, addBlockEdges);
1264 template <class BlockEdgesAdder>
1265 void initialize(const BFIBase::LoopData *OuterLoop,
1266 BlockEdgesAdder addBlockEdges);
1267 void addNodesInLoop(const BFIBase::LoopData &OuterLoop);
1268 void addNodesInFunction();
1269 void addNode(const BlockNode &Node) {
1270 Nodes.emplace_back(Node);
1271 BFI.Working[Node.Index].getMass() = BlockMass::getEmpty();
1274 template <class BlockEdgesAdder>
1275 void addEdges(const BlockNode &Node, const BFIBase::LoopData *OuterLoop,
1276 BlockEdgesAdder addBlockEdges);
1277 void addEdge(IrrNode &Irr, const BlockNode &Succ,
1278 const BFIBase::LoopData *OuterLoop);
1280 template <class BlockEdgesAdder>
1281 void IrreducibleGraph::initialize(const BFIBase::LoopData *OuterLoop,
1282 BlockEdgesAdder addBlockEdges) {
1284 addNodesInLoop(*OuterLoop);
1285 for (auto N : OuterLoop->Nodes)
1286 addEdges(N, OuterLoop, addBlockEdges);
1288 addNodesInFunction();
1289 for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
1290 addEdges(Index, OuterLoop, addBlockEdges);
1292 StartIrr = Lookup[Start.Index];
1294 template <class BlockEdgesAdder>
1295 void IrreducibleGraph::addEdges(const BlockNode &Node,
1296 const BFIBase::LoopData *OuterLoop,
1297 BlockEdgesAdder addBlockEdges) {
1298 auto L = Lookup.find(Node.Index);
1299 if (L == Lookup.end())
1301 IrrNode &Irr = *L->second;
1302 const auto &Working = BFI.Working[Node.Index];
1304 if (Working.isAPackage())
1305 for (const auto &I : Working.Loop->Exits)
1306 addEdge(Irr, I.first, OuterLoop);
1308 addBlockEdges(*this, Irr, OuterLoop);
1312 /// \brief Shared implementation for block frequency analysis.
1314 /// This is a shared implementation of BlockFrequencyInfo and
1315 /// MachineBlockFrequencyInfo, and calculates the relative frequencies of
1318 /// LoopInfo defines a loop as a "non-trivial" SCC dominated by a single block,
1319 /// which is called the header. A given loop, L, can have sub-loops, which are
1320 /// loops within the subgraph of L that exclude its header. (A "trivial" SCC
1321 /// consists of a single block that does not have a self-edge.)
1323 /// In addition to loops, this algorithm has limited support for irreducible
1324 /// SCCs, which are SCCs with multiple entry blocks. Irreducible SCCs are
1325 /// discovered on they fly, and modelled as loops with multiple headers.
1327 /// The headers of irreducible sub-SCCs consist of its entry blocks and all
1328 /// nodes that are targets of a backedge within it (excluding backedges within
1329 /// true sub-loops). Block frequency calculations act as if a block is
1330 /// inserted that intercepts all the edges to the headers. All backedges and
1331 /// entries point to this block. Its successors are the headers, which split
1332 /// the frequency evenly.
1334 /// This algorithm leverages BlockMass and UnsignedFloat to maintain precision,
1335 /// separates mass distribution from loop scaling, and dithers to eliminate
1336 /// probability mass loss.
1338 /// The implementation is split between BlockFrequencyInfoImpl, which knows the
1339 /// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
1340 /// BlockFrequencyInfoImplBase, which doesn't. The base class uses \a
1341 /// BlockNode, a wrapper around a uint32_t. BlockNode is numbered from 0 in
1342 /// reverse-post order. This gives two advantages: it's easy to compare the
1343 /// relative ordering of two nodes, and maps keyed on BlockT can be represented
1346 /// This algorithm is O(V+E), unless there is irreducible control flow, in
1347 /// which case it's O(V*E) in the worst case.
1349 /// These are the main stages:
1351 /// 0. Reverse post-order traversal (\a initializeRPOT()).
1353 /// Run a single post-order traversal and save it (in reverse) in RPOT.
1354 /// All other stages make use of this ordering. Save a lookup from BlockT
1355 /// to BlockNode (the index into RPOT) in Nodes.
1357 /// 1. Loop initialization (\a initializeLoops()).
1359 /// Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
1360 /// the algorithm. In particular, store the immediate members of each loop
1361 /// in reverse post-order.
1363 /// 2. Calculate mass and scale in loops (\a computeMassInLoops()).
1365 /// For each loop (bottom-up), distribute mass through the DAG resulting
1366 /// from ignoring backedges and treating sub-loops as a single pseudo-node.
1367 /// Track the backedge mass distributed to the loop header, and use it to
1368 /// calculate the loop scale (number of loop iterations). Immediate
1369 /// members that represent sub-loops will already have been visited and
1370 /// packaged into a pseudo-node.
1372 /// Distributing mass in a loop is a reverse-post-order traversal through
1373 /// the loop. Start by assigning full mass to the Loop header. For each
1374 /// node in the loop:
1376 /// - Fetch and categorize the weight distribution for its successors.
1377 /// If this is a packaged-subloop, the weight distribution is stored
1378 /// in \a LoopData::Exits. Otherwise, fetch it from
1379 /// BranchProbabilityInfo.
1381 /// - Each successor is categorized as \a Weight::Local, a local edge
1382 /// within the current loop, \a Weight::Backedge, a backedge to the
1383 /// loop header, or \a Weight::Exit, any successor outside the loop.
1384 /// The weight, the successor, and its category are stored in \a
1385 /// Distribution. There can be multiple edges to each successor.
1387 /// - If there's a backedge to a non-header, there's an irreducible SCC.
1388 /// The usual flow is temporarily aborted. \a
1389 /// computeIrreducibleMass() finds the irreducible SCCs within the
1390 /// loop, packages them up, and restarts the flow.
1392 /// - Normalize the distribution: scale weights down so that their sum
1393 /// is 32-bits, and coalesce multiple edges to the same node.
1395 /// - Distribute the mass accordingly, dithering to minimize mass loss,
1396 /// as described in \a distributeMass().
1398 /// Finally, calculate the loop scale from the accumulated backedge mass.
1400 /// 3. Distribute mass in the function (\a computeMassInFunction()).
1402 /// Finally, distribute mass through the DAG resulting from packaging all
1403 /// loops in the function. This uses the same algorithm as distributing
1404 /// mass in a loop, except that there are no exit or backedge edges.
1406 /// 4. Unpackage loops (\a unwrapLoops()).
1408 /// Initialize each block's frequency to a floating point representation of
1411 /// Visit loops top-down, scaling the frequencies of its immediate members
1412 /// by the loop's pseudo-node's frequency.
1414 /// 5. Convert frequencies to a 64-bit range (\a finalizeMetrics()).
1416 /// Using the min and max frequencies as a guide, translate floating point
1417 /// frequencies to an appropriate range in uint64_t.
1419 /// It has some known flaws.
1421 /// - Loop scale is limited to 4096 per loop (2^12) to avoid exhausting
1422 /// BlockFrequency's 64-bit integer precision.
1424 /// - The model of irreducible control flow is a rough approximation.
1426 /// Modelling irreducible control flow exactly involves setting up and
1427 /// solving a group of infinite geometric series. Such precision is
1428 /// unlikely to be worthwhile, since most of our algorithms give up on
1429 /// irreducible control flow anyway.
1431 /// Nevertheless, we might find that we need to get closer. Here's a sort
1432 /// of TODO list for the model with diminishing returns, to be completed as
1435 /// - The headers for the \a LoopData representing an irreducible SCC
1436 /// include non-entry blocks. When these extra blocks exist, they
1437 /// indicate a self-contained irreducible sub-SCC. We could treat them
1438 /// as sub-loops, rather than arbitrarily shoving the problematic
1439 /// blocks into the headers of the main irreducible SCC.
1441 /// - Backedge frequencies are assumed to be evenly split between the
1442 /// headers of a given irreducible SCC. Instead, we could track the
1443 /// backedge mass separately for each header, and adjust their relative
1446 /// - Entry frequencies are assumed to be evenly split between the
1447 /// headers of a given irreducible SCC, which is the only option if we
1448 /// need to compute mass in the SCC before its parent loop. Instead,
1449 /// we could partially compute mass in the parent loop, and stop when
1450 /// we get to the SCC. Here, we have the correct ratio of entry
1451 /// masses, which we can use to adjust their relative frequencies.
1452 /// Compute mass in the SCC, and then continue propagation in the
1455 /// - We can propagate mass iteratively through the SCC, for some fixed
1456 /// number of iterations. Each iteration starts by assigning the entry
1457 /// blocks their backedge mass from the prior iteration. The final
1458 /// mass for each block (and each exit, and the total backedge mass
1459 /// used for computing loop scale) is the sum of all iterations.
1460 /// (Running this until fixed point would "solve" the geometric
1461 /// series by simulation.)
1462 template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
1463 typedef typename bfi_detail::TypeMap<BT>::BlockT BlockT;
1464 typedef typename bfi_detail::TypeMap<BT>::FunctionT FunctionT;
1465 typedef typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT
1466 BranchProbabilityInfoT;
1467 typedef typename bfi_detail::TypeMap<BT>::LoopT LoopT;
1468 typedef typename bfi_detail::TypeMap<BT>::LoopInfoT LoopInfoT;
1470 // This is part of a workaround for a GCC 4.7 crash on lambdas.
1471 friend struct bfi_detail::BlockEdgesAdder<BT>;
1473 typedef GraphTraits<const BlockT *> Successor;
1474 typedef GraphTraits<Inverse<const BlockT *>> Predecessor;
1476 const BranchProbabilityInfoT *BPI;
1477 const LoopInfoT *LI;
1480 // All blocks in reverse postorder.
1481 std::vector<const BlockT *> RPOT;
1482 DenseMap<const BlockT *, BlockNode> Nodes;
1484 typedef typename std::vector<const BlockT *>::const_iterator rpot_iterator;
1486 rpot_iterator rpot_begin() const { return RPOT.begin(); }
1487 rpot_iterator rpot_end() const { return RPOT.end(); }
1489 size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }
1491 BlockNode getNode(const rpot_iterator &I) const {
1492 return BlockNode(getIndex(I));
1494 BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB); }
1496 const BlockT *getBlock(const BlockNode &Node) const {
1497 assert(Node.Index < RPOT.size());
1498 return RPOT[Node.Index];
1501 /// \brief Run (and save) a post-order traversal.
1503 /// Saves a reverse post-order traversal of all the nodes in \a F.
1504 void initializeRPOT();
1506 /// \brief Initialize loop data.
1508 /// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
1509 /// each block to the deepest loop it's in, but we need the inverse. For each
1510 /// loop, we store in reverse post-order its "immediate" members, defined as
1511 /// the header, the headers of immediate sub-loops, and all other blocks in
1512 /// the loop that are not in sub-loops.
1513 void initializeLoops();
1515 /// \brief Propagate to a block's successors.
1517 /// In the context of distributing mass through \c OuterLoop, divide the mass
1518 /// currently assigned to \c Node between its successors.
1520 /// \return \c true unless there's an irreducible backedge.
1521 bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
1523 /// \brief Compute mass in a particular loop.
1525 /// Assign mass to \c Loop's header, and then for each block in \c Loop in
1526 /// reverse post-order, distribute mass to its successors. Only visits nodes
1527 /// that have not been packaged into sub-loops.
1529 /// \pre \a computeMassInLoop() has been called for each subloop of \c Loop.
1530 /// \return \c true unless there's an irreducible backedge.
1531 bool computeMassInLoop(LoopData &Loop);
1533 /// \brief Try to compute mass in the top-level function.
1535 /// Assign mass to the entry block, and then for each block in reverse
1536 /// post-order, distribute mass to its successors. Skips nodes that have
1537 /// been packaged into loops.
1539 /// \pre \a computeMassInLoops() has been called.
1540 /// \return \c true unless there's an irreducible backedge.
1541 bool tryToComputeMassInFunction();
1543 /// \brief Compute mass in (and package up) irreducible SCCs.
1545 /// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
1546 /// of \c Insert), and call \a computeMassInLoop() on each of them.
1548 /// If \c OuterLoop is \c nullptr, it refers to the top-level function.
1550 /// \pre \a computeMassInLoop() has been called for each subloop of \c
1552 /// \pre \c Insert points at the the last loop successfully processed by \a
1553 /// computeMassInLoop().
1554 /// \pre \c OuterLoop has irreducible SCCs.
1555 void computeIrreducibleMass(LoopData *OuterLoop,
1556 std::list<LoopData>::iterator Insert);
1558 /// \brief Compute mass in all loops.
1560 /// For each loop bottom-up, call \a computeMassInLoop().
1562 /// \a computeMassInLoop() aborts (and returns \c false) on loops that
1563 /// contain a irreducible sub-SCCs. Use \a computeIrreducibleMass() and then
1564 /// re-enter \a computeMassInLoop().
1566 /// \post \a computeMassInLoop() has returned \c true for every loop.
1567 void computeMassInLoops();
1569 /// \brief Compute mass in the top-level function.
1571 /// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
1572 /// compute mass in the top-level function.
1574 /// \post \a tryToComputeMassInFunction() has returned \c true.
1575 void computeMassInFunction();
1577 std::string getBlockName(const BlockNode &Node) const override {
1578 return bfi_detail::getBlockName(getBlock(Node));
1582 const FunctionT *getFunction() const { return F; }
1584 void doFunction(const FunctionT *F, const BranchProbabilityInfoT *BPI,
1585 const LoopInfoT *LI);
1586 BlockFrequencyInfoImpl() : BPI(nullptr), LI(nullptr), F(nullptr) {}
1588 using BlockFrequencyInfoImplBase::getEntryFreq;
1589 BlockFrequency getBlockFreq(const BlockT *BB) const {
1590 return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
1592 Float getFloatingBlockFreq(const BlockT *BB) const {
1593 return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
1596 /// \brief Print the frequencies for the current function.
1598 /// Prints the frequencies for the blocks in the current function.
1600 /// Blocks are printed in the natural iteration order of the function, rather
1601 /// than reverse post-order. This provides two advantages: writing -analyze
1602 /// tests is easier (since blocks come out in source order), and even
1603 /// unreachable blocks are printed.
1605 /// \a BlockFrequencyInfoImplBase::print() only knows reverse post-order, so
1606 /// we need to override it here.
1607 raw_ostream &print(raw_ostream &OS) const override;
1608 using BlockFrequencyInfoImplBase::dump;
1610 using BlockFrequencyInfoImplBase::printBlockFreq;
1611 raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
1612 return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
1617 void BlockFrequencyInfoImpl<BT>::doFunction(const FunctionT *F,
1618 const BranchProbabilityInfoT *BPI,
1619 const LoopInfoT *LI) {
1620 // Save the parameters.
1625 // Clean up left-over data structures.
1626 BlockFrequencyInfoImplBase::clear();
1631 DEBUG(dbgs() << "\nblock-frequency: " << F->getName() << "\n================="
1632 << std::string(F->getName().size(), '=') << "\n");
1636 // Visit loops in post-order to find thelocal mass distribution, and then do
1637 // the full function.
1638 computeMassInLoops();
1639 computeMassInFunction();
1644 template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
1645 const BlockT *Entry = F->begin();
1646 RPOT.reserve(F->size());
1647 std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
1648 std::reverse(RPOT.begin(), RPOT.end());
1650 assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
1651 "More nodes in function than Block Frequency Info supports");
1653 DEBUG(dbgs() << "reverse-post-order-traversal\n");
1654 for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
1655 BlockNode Node = getNode(I);
1656 DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
1660 Working.reserve(RPOT.size());
1661 for (size_t Index = 0; Index < RPOT.size(); ++Index)
1662 Working.emplace_back(Index);
1663 Freqs.resize(RPOT.size());
1666 template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
1667 DEBUG(dbgs() << "loop-detection\n");
1671 // Visit loops top down and assign them an index.
1672 std::deque<std::pair<const LoopT *, LoopData *>> Q;
1673 for (const LoopT *L : *LI)
1674 Q.emplace_back(L, nullptr);
1675 while (!Q.empty()) {
1676 const LoopT *Loop = Q.front().first;
1677 LoopData *Parent = Q.front().second;
1680 BlockNode Header = getNode(Loop->getHeader());
1681 assert(Header.isValid());
1683 Loops.emplace_back(Parent, Header);
1684 Working[Header.Index].Loop = &Loops.back();
1685 DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
1687 for (const LoopT *L : *Loop)
1688 Q.emplace_back(L, &Loops.back());
1691 // Visit nodes in reverse post-order and add them to their deepest containing
1693 for (size_t Index = 0; Index < RPOT.size(); ++Index) {
1694 // Loop headers have already been mostly mapped.
1695 if (Working[Index].isLoopHeader()) {
1696 LoopData *ContainingLoop = Working[Index].getContainingLoop();
1698 ContainingLoop->Nodes.push_back(Index);
1702 const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
1706 // Add this node to its containing loop's member list.
1707 BlockNode Header = getNode(Loop->getHeader());
1708 assert(Header.isValid());
1709 const auto &HeaderData = Working[Header.Index];
1710 assert(HeaderData.isLoopHeader());
1712 Working[Index].Loop = HeaderData.Loop;
1713 HeaderData.Loop->Nodes.push_back(Index);
1714 DEBUG(dbgs() << " - loop = " << getBlockName(Header)
1715 << ": member = " << getBlockName(Index) << "\n");
1719 template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
1720 // Visit loops with the deepest first, and the top-level loops last.
1721 for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L) {
1722 if (computeMassInLoop(*L))
1724 auto Next = std::next(L);
1725 computeIrreducibleMass(&*L, L.base());
1726 L = std::prev(Next);
1727 if (computeMassInLoop(*L))
1729 llvm_unreachable("unhandled irreducible control flow");
1734 bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
1735 // Compute mass in loop.
1736 DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
1738 if (Loop.isIrreducible()) {
1739 BlockMass Remaining = BlockMass::getFull();
1740 for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
1741 auto &Mass = Working[Loop.Nodes[H].Index].getMass();
1742 Mass = Remaining * BranchProbability(1, Loop.NumHeaders - H);
1745 for (const BlockNode &M : Loop.Nodes)
1746 if (!propagateMassToSuccessors(&Loop, M))
1747 llvm_unreachable("unhandled irreducible control flow");
1749 Working[Loop.getHeader().Index].getMass() = BlockMass::getFull();
1750 if (!propagateMassToSuccessors(&Loop, Loop.getHeader()))
1751 llvm_unreachable("irreducible control flow to loop header!?");
1752 for (const BlockNode &M : Loop.members())
1753 if (!propagateMassToSuccessors(&Loop, M))
1754 // Irreducible backedge.
1758 computeLoopScale(Loop);
1764 bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
1765 // Compute mass in function.
1766 DEBUG(dbgs() << "compute-mass-in-function\n");
1767 assert(!Working.empty() && "no blocks in function");
1768 assert(!Working[0].isLoopHeader() && "entry block is a loop header");
1770 Working[0].getMass() = BlockMass::getFull();
1771 for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
1772 // Check for nodes that have been packaged.
1773 BlockNode Node = getNode(I);
1774 if (Working[Node.Index].isPackaged())
1777 if (!propagateMassToSuccessors(nullptr, Node))
1783 template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
1784 if (tryToComputeMassInFunction())
1786 computeIrreducibleMass(nullptr, Loops.begin());
1787 if (tryToComputeMassInFunction())
1789 llvm_unreachable("unhandled irreducible control flow");
1792 /// \note This should be a lambda, but that crashes GCC 4.7.
1793 namespace bfi_detail {
1794 template <class BT> struct BlockEdgesAdder {
1796 typedef BlockFrequencyInfoImplBase::LoopData LoopData;
1797 typedef GraphTraits<const BlockT *> Successor;
1799 const BlockFrequencyInfoImpl<BT> &BFI;
1800 explicit BlockEdgesAdder(const BlockFrequencyInfoImpl<BT> &BFI)
1802 void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr,
1803 const LoopData *OuterLoop) {
1804 const BlockT *BB = BFI.RPOT[Irr.Node.Index];
1805 for (auto I = Successor::child_begin(BB), E = Successor::child_end(BB);
1807 G.addEdge(Irr, BFI.getNode(*I), OuterLoop);
1812 void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
1813 LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
1814 DEBUG(dbgs() << "analyze-irreducible-in-";
1815 if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
1816 else dbgs() << "function\n");
1818 using namespace bfi_detail;
1819 // Ideally, addBlockEdges() would be declared here as a lambda, but that
1821 BlockEdgesAdder<BT> addBlockEdges(*this);
1822 IrreducibleGraph G(*this, OuterLoop, addBlockEdges);
1824 for (auto &L : analyzeIrreducible(G, OuterLoop, Insert))
1825 computeMassInLoop(L);
1829 updateLoopWithIrreducible(*OuterLoop);
1834 BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
1835 const BlockNode &Node) {
1836 DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
1837 // Calculate probability for successors.
1839 if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
1840 assert(Loop != OuterLoop && "Cannot propagate mass in a packaged loop");
1841 if (!addLoopSuccessorsToDist(OuterLoop, *Loop, Dist))
1842 // Irreducible backedge.
1845 const BlockT *BB = getBlock(Node);
1846 for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB);
1848 // Do not dereference SI, or getEdgeWeight() is linear in the number of
1850 if (!addToDist(Dist, OuterLoop, Node, getNode(*SI),
1851 BPI->getEdgeWeight(BB, SI)))
1852 // Irreducible backedge.
1856 // Distribute mass to successors, saving exit and backedge data in the
1858 distributeMass(Node, OuterLoop, Dist);
1863 raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
1866 OS << "block-frequency-info: " << F->getName() << "\n";
1867 for (const BlockT &BB : *F)
1868 OS << " - " << bfi_detail::getBlockName(&BB)
1869 << ": float = " << getFloatingBlockFreq(&BB)
1870 << ", int = " << getBlockFreq(&BB).getFrequency() << "\n";
1872 // Add an extra newline for readability.