1 //==- BlockFrequencyInfoImpl.h - Block Frequency Implementation -*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Shared implementation of BlockFrequency for IR and Machine Instructions.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
15 #define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PostOrderIterator.h"
19 #include "llvm/ADT/iterator_range.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/Support/BlockFrequency.h"
22 #include "llvm/Support/BranchProbability.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
29 #define DEBUG_TYPE "block-freq"
31 //===----------------------------------------------------------------------===//
33 // UnsignedFloat definition.
35 // TODO: Make this private to BlockFrequencyInfoImpl or delete.
37 //===----------------------------------------------------------------------===//
40 class UnsignedFloatBase {
42 static const int32_t MaxExponent = 16383;
43 static const int32_t MinExponent = -16382;
44 static const int DefaultPrecision = 10;
46 static void dump(uint64_t D, int16_t E, int Width);
47 static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
49 static std::string toString(uint64_t D, int16_t E, int Width,
51 static int countLeadingZeros32(uint32_t N) { return countLeadingZeros(N); }
52 static int countLeadingZeros64(uint64_t N) { return countLeadingZeros(N); }
53 static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
55 static std::pair<uint64_t, bool> splitSigned(int64_t N) {
57 return std::make_pair(N, false);
58 uint64_t Unsigned = N == INT64_MIN ? UINT64_C(1) << 63 : uint64_t(-N);
59 return std::make_pair(Unsigned, true);
61 static int64_t joinSigned(uint64_t U, bool IsNeg) {
62 if (U > uint64_t(INT64_MAX))
63 return IsNeg ? INT64_MIN : INT64_MAX;
64 return IsNeg ? -int64_t(U) : int64_t(U);
67 static int32_t extractLg(const std::pair<int32_t, int> &Lg) {
70 static int32_t extractLgFloor(const std::pair<int32_t, int> &Lg) {
71 return Lg.first - (Lg.second > 0);
73 static int32_t extractLgCeiling(const std::pair<int32_t, int> &Lg) {
74 return Lg.first + (Lg.second < 0);
77 static std::pair<uint64_t, int16_t> divide64(uint64_t L, uint64_t R);
78 static std::pair<uint64_t, int16_t> multiply64(uint64_t L, uint64_t R);
80 static int compare(uint64_t L, uint64_t R, int Shift) {
84 uint64_t L_adjusted = L >> Shift;
90 return L > L_adjusted << Shift ? 1 : 0;
94 /// \brief Simple representation of an unsigned floating point.
96 /// UnsignedFloat is a unsigned floating point number. It uses simple
97 /// saturation arithmetic, and every operation is well-defined for every value.
99 /// The number is split into a signed exponent and unsigned digits. The number
100 /// represented is \c getDigits()*2^getExponent(). In this way, the digits are
101 /// much like the mantissa in the x87 long double, but there is no canonical
102 /// form, so the same number can be represented by many bit representations
103 /// (it's always in "denormal" mode).
105 /// UnsignedFloat is templated on the underlying integer type for digits, which
106 /// is expected to be one of uint64_t, uint32_t, uint16_t or uint8_t.
108 /// Unlike builtin floating point types, UnsignedFloat is portable.
110 /// Unlike APFloat, UnsignedFloat does not model architecture floating point
111 /// behaviour (this should make it a little faster), and implements most
112 /// operators (this makes it usable).
114 /// UnsignedFloat is totally ordered. However, there is no canonical form, so
115 /// there are multiple representations of most scalars. E.g.:
117 /// UnsignedFloat(8u, 0) == UnsignedFloat(4u, 1)
118 /// UnsignedFloat(4u, 1) == UnsignedFloat(2u, 2)
119 /// UnsignedFloat(2u, 2) == UnsignedFloat(1u, 3)
121 /// UnsignedFloat implements most arithmetic operations. Precision is kept
122 /// where possible. Uses simple saturation arithmetic, so that operations
123 /// saturate to 0.0 or getLargest() rather than under or overflowing. It has
124 /// some extra arithmetic for unit inversion. 0.0/0.0 is defined to be 0.0.
125 /// Any other division by 0.0 is defined to be getLargest().
127 /// As a convenience for modifying the exponent, left and right shifting are
128 /// both implemented, and both interpret negative shifts as positive shifts in
129 /// the opposite direction.
131 /// Exponents are limited to the range accepted by x87 long double. This makes
132 /// it trivial to add functionality to convert to APFloat (this is already
133 /// relied on for the implementation of printing).
135 /// The current plan is to gut this and make the necessary parts of it (even
136 /// more) private to BlockFrequencyInfo.
137 template <class DigitsT> class UnsignedFloat : UnsignedFloatBase {
139 static_assert(!std::numeric_limits<DigitsT>::is_signed,
140 "only unsigned floats supported");
142 typedef DigitsT DigitsType;
145 typedef std::numeric_limits<DigitsType> DigitsLimits;
147 static const int Width = sizeof(DigitsType) * 8;
148 static_assert(Width <= 64, "invalid integer width for digits");
155 UnsignedFloat() : Digits(0), Exponent(0) {}
157 UnsignedFloat(DigitsType Digits, int16_t Exponent)
158 : Digits(Digits), Exponent(Exponent) {}
161 UnsignedFloat(const std::pair<uint64_t, int16_t> &X)
162 : Digits(X.first), Exponent(X.second) {}
165 static UnsignedFloat getZero() { return UnsignedFloat(0, 0); }
166 static UnsignedFloat getOne() { return UnsignedFloat(1, 0); }
167 static UnsignedFloat getLargest() {
168 return UnsignedFloat(DigitsLimits::max(), MaxExponent);
170 static UnsignedFloat getFloat(uint64_t N) { return adjustToWidth(N, 0); }
171 static UnsignedFloat getInverseFloat(uint64_t N) {
172 return getFloat(N).invert();
174 static UnsignedFloat getFraction(DigitsType N, DigitsType D) {
175 return getQuotient(N, D);
178 int16_t getExponent() const { return Exponent; }
179 DigitsType getDigits() const { return Digits; }
181 /// \brief Convert to the given integer type.
183 /// Convert to \c IntT using simple saturating arithmetic, truncating if
185 template <class IntT> IntT toInt() const;
187 bool isZero() const { return !Digits; }
188 bool isLargest() const { return *this == getLargest(); }
190 if (Exponent > 0 || Exponent <= -Width)
192 return Digits == DigitsType(1) << -Exponent;
195 /// \brief The log base 2, rounded.
197 /// Get the lg of the scalar. lg 0 is defined to be INT32_MIN.
198 int32_t lg() const { return extractLg(lgImpl()); }
200 /// \brief The log base 2, rounded towards INT32_MIN.
202 /// Get the lg floor. lg 0 is defined to be INT32_MIN.
203 int32_t lgFloor() const { return extractLgFloor(lgImpl()); }
205 /// \brief The log base 2, rounded towards INT32_MAX.
207 /// Get the lg ceiling. lg 0 is defined to be INT32_MIN.
208 int32_t lgCeiling() const { return extractLgCeiling(lgImpl()); }
210 bool operator==(const UnsignedFloat &X) const { return compare(X) == 0; }
211 bool operator<(const UnsignedFloat &X) const { return compare(X) < 0; }
212 bool operator!=(const UnsignedFloat &X) const { return compare(X) != 0; }
213 bool operator>(const UnsignedFloat &X) const { return compare(X) > 0; }
214 bool operator<=(const UnsignedFloat &X) const { return compare(X) <= 0; }
215 bool operator>=(const UnsignedFloat &X) const { return compare(X) >= 0; }
217 bool operator!() const { return isZero(); }
219 /// \brief Convert to a decimal representation in a string.
221 /// Convert to a string. Uses scientific notation for very large/small
222 /// numbers. Scientific notation is used roughly for numbers outside of the
223 /// range 2^-64 through 2^64.
225 /// \c Precision indicates the number of decimal digits of precision to use;
226 /// 0 requests the maximum available.
228 /// As a special case to make debugging easier, if the number is small enough
229 /// to convert without scientific notation and has more than \c Precision
230 /// digits before the decimal place, it's printed accurately to the first
231 /// digit past zero. E.g., assuming 10 digits of precision:
233 /// 98765432198.7654... => 98765432198.8
234 /// 8765432198.7654... => 8765432198.8
235 /// 765432198.7654... => 765432198.8
236 /// 65432198.7654... => 65432198.77
237 /// 5432198.7654... => 5432198.765
238 std::string toString(unsigned Precision = DefaultPrecision) {
239 return UnsignedFloatBase::toString(Digits, Exponent, Width, Precision);
242 /// \brief Print a decimal representation.
244 /// Print a string. See toString for documentation.
245 raw_ostream &print(raw_ostream &OS,
246 unsigned Precision = DefaultPrecision) const {
247 return UnsignedFloatBase::print(OS, Digits, Exponent, Width, Precision);
249 void dump() const { return UnsignedFloatBase::dump(Digits, Exponent, Width); }
251 UnsignedFloat &operator+=(const UnsignedFloat &X);
252 UnsignedFloat &operator-=(const UnsignedFloat &X);
253 UnsignedFloat &operator*=(const UnsignedFloat &X);
254 UnsignedFloat &operator/=(const UnsignedFloat &X);
255 UnsignedFloat &operator<<=(int16_t Shift) { shiftLeft(Shift); return *this; }
256 UnsignedFloat &operator>>=(int16_t Shift) { shiftRight(Shift); return *this; }
259 void shiftLeft(int32_t Shift);
260 void shiftRight(int32_t Shift);
262 /// \brief Adjust two floats to have matching exponents.
264 /// Adjust \c this and \c X to have matching exponents. Returns the new \c X
265 /// by value. Does nothing if \a isZero() for either.
267 /// The value that compares smaller will lose precision, and possibly become
269 UnsignedFloat matchExponents(UnsignedFloat X);
271 /// \brief Increase exponent to match another float.
273 /// Increases \c this to have an exponent matching \c X. May decrease the
274 /// exponent of \c X in the process, and \c this may possibly become \a
276 void increaseExponentToMatch(UnsignedFloat &X, int32_t ExponentDiff);
279 /// \brief Scale a large number accurately.
281 /// Scale N (multiply it by this). Uses full precision multiplication, even
282 /// if Width is smaller than 64, so information is not lost.
283 uint64_t scale(uint64_t N) const;
284 uint64_t scaleByInverse(uint64_t N) const {
285 // TODO: implement directly, rather than relying on inverse. Inverse is
287 return inverse().scale(N);
289 int64_t scale(int64_t N) const {
290 std::pair<uint64_t, bool> Unsigned = splitSigned(N);
291 return joinSigned(scale(Unsigned.first), Unsigned.second);
293 int64_t scaleByInverse(int64_t N) const {
294 std::pair<uint64_t, bool> Unsigned = splitSigned(N);
295 return joinSigned(scaleByInverse(Unsigned.first), Unsigned.second);
298 int compare(const UnsignedFloat &X) const;
299 int compareTo(uint64_t N) const {
300 UnsignedFloat Float = getFloat(N);
301 int Compare = compare(Float);
302 if (Width == 64 || Compare != 0)
305 // Check for precision loss. We know *this == RoundTrip.
306 uint64_t RoundTrip = Float.template toInt<uint64_t>();
307 return N == RoundTrip ? 0 : RoundTrip < N ? -1 : 1;
309 int compareTo(int64_t N) const { return N < 0 ? 1 : compareTo(uint64_t(N)); }
311 UnsignedFloat &invert() { return *this = UnsignedFloat::getFloat(1) / *this; }
312 UnsignedFloat inverse() const { return UnsignedFloat(*this).invert(); }
315 static UnsignedFloat getProduct(DigitsType L, DigitsType R);
316 static UnsignedFloat getQuotient(DigitsType Dividend, DigitsType Divisor);
318 std::pair<int32_t, int> lgImpl() const;
319 static int countLeadingZerosWidth(DigitsType Digits) {
321 return countLeadingZeros64(Digits);
323 return countLeadingZeros32(Digits);
324 return countLeadingZeros32(Digits) + Width - 32;
327 static UnsignedFloat adjustToWidth(uint64_t N, int32_t S) {
328 assert(S >= MinExponent);
329 assert(S <= MaxExponent);
330 if (Width == 64 || N <= DigitsLimits::max())
331 return UnsignedFloat(N, S);
334 int Shift = 64 - Width - countLeadingZeros64(N);
335 DigitsType Shifted = N >> Shift;
338 assert(S + Shift <= MaxExponent);
339 return getRounded(UnsignedFloat(Shifted, S + Shift),
340 N & UINT64_C(1) << (Shift - 1));
343 static UnsignedFloat getRounded(UnsignedFloat P, bool Round) {
346 if (P.Digits == DigitsLimits::max())
347 // Careful of overflow in the exponent.
348 return UnsignedFloat(1, P.Exponent) <<= Width;
349 return UnsignedFloat(P.Digits + 1, P.Exponent);
353 #define UNSIGNED_FLOAT_BOP(op, base) \
354 template <class DigitsT> \
355 UnsignedFloat<DigitsT> operator op(const UnsignedFloat<DigitsT> &L, \
356 const UnsignedFloat<DigitsT> &R) { \
357 return UnsignedFloat<DigitsT>(L) base R; \
359 UNSIGNED_FLOAT_BOP(+, += )
360 UNSIGNED_FLOAT_BOP(-, -= )
361 UNSIGNED_FLOAT_BOP(*, *= )
362 UNSIGNED_FLOAT_BOP(/, /= )
363 UNSIGNED_FLOAT_BOP(<<, <<= )
364 UNSIGNED_FLOAT_BOP(>>, >>= )
365 #undef UNSIGNED_FLOAT_BOP
367 template <class DigitsT>
368 raw_ostream &operator<<(raw_ostream &OS, const UnsignedFloat<DigitsT> &X) {
369 return X.print(OS, 10);
372 #define UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, T1, T2) \
373 template <class DigitsT> \
374 bool operator op(const UnsignedFloat<DigitsT> &L, T1 R) { \
375 return L.compareTo(T2(R)) op 0; \
377 template <class DigitsT> \
378 bool operator op(T1 L, const UnsignedFloat<DigitsT> &R) { \
379 return 0 op R.compareTo(T2(L)); \
381 #define UNSIGNED_FLOAT_COMPARE_TO(op) \
382 UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, uint64_t, uint64_t) \
383 UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, uint32_t, uint64_t) \
384 UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, int64_t, int64_t) \
385 UNSIGNED_FLOAT_COMPARE_TO_TYPE(op, int32_t, int64_t)
386 UNSIGNED_FLOAT_COMPARE_TO(< )
387 UNSIGNED_FLOAT_COMPARE_TO(> )
388 UNSIGNED_FLOAT_COMPARE_TO(== )
389 UNSIGNED_FLOAT_COMPARE_TO(!= )
390 UNSIGNED_FLOAT_COMPARE_TO(<= )
391 UNSIGNED_FLOAT_COMPARE_TO(>= )
392 #undef UNSIGNED_FLOAT_COMPARE_TO
393 #undef UNSIGNED_FLOAT_COMPARE_TO_TYPE
395 template <class DigitsT>
396 uint64_t UnsignedFloat<DigitsT>::scale(uint64_t N) const {
397 if (Width == 64 || N <= DigitsLimits::max())
398 return (getFloat(N) * *this).template toInt<uint64_t>();
400 // Defer to the 64-bit version.
401 return UnsignedFloat<uint64_t>(Digits, Exponent).scale(N);
404 template <class DigitsT>
405 UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::getProduct(DigitsType L,
411 // Check for numbers that we can compute with 64-bit math.
412 if (Width <= 32 || (L <= UINT32_MAX && R <= UINT32_MAX))
413 return adjustToWidth(uint64_t(L) * uint64_t(R), 0);
415 // Do the full thing.
416 return UnsignedFloat(multiply64(L, R));
418 template <class DigitsT>
419 UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::getQuotient(DigitsType Dividend,
420 DigitsType Divisor) {
428 return UnsignedFloat(divide64(Dividend, Divisor));
430 // We can compute this with 64-bit math.
431 int Shift = countLeadingZeros64(Dividend);
432 uint64_t Shifted = uint64_t(Dividend) << Shift;
433 uint64_t Quotient = Shifted / Divisor;
435 // If Quotient needs to be shifted, then adjustToWidth will round.
436 if (Quotient > DigitsLimits::max())
437 return adjustToWidth(Quotient, -Shift);
439 // Round based on the value of the next bit.
440 return getRounded(UnsignedFloat(Quotient, -Shift),
441 Shifted % Divisor >= getHalf(Divisor));
444 template <class DigitsT>
445 template <class IntT>
446 IntT UnsignedFloat<DigitsT>::toInt() const {
447 typedef std::numeric_limits<IntT> Limits;
450 if (*this >= Limits::max())
451 return Limits::max();
455 assert(size_t(Exponent) < sizeof(IntT) * 8);
456 return N << Exponent;
459 assert(size_t(-Exponent) < sizeof(IntT) * 8);
460 return N >> -Exponent;
465 template <class DigitsT>
466 std::pair<int32_t, int> UnsignedFloat<DigitsT>::lgImpl() const {
468 return std::make_pair(INT32_MIN, 0);
470 // Get the floor of the lg of Digits.
471 int32_t LocalFloor = Width - countLeadingZerosWidth(Digits) - 1;
473 // Get the floor of the lg of this.
474 int32_t Floor = Exponent + LocalFloor;
475 if (Digits == UINT64_C(1) << LocalFloor)
476 return std::make_pair(Floor, 0);
478 // Round based on the next digit.
479 assert(LocalFloor >= 1);
480 bool Round = Digits & UINT64_C(1) << (LocalFloor - 1);
481 return std::make_pair(Floor + Round, Round ? 1 : -1);
484 template <class DigitsT>
485 UnsignedFloat<DigitsT> UnsignedFloat<DigitsT>::matchExponents(UnsignedFloat X) {
486 if (isZero() || X.isZero() || Exponent == X.Exponent)
489 int32_t Diff = int32_t(X.Exponent) - int32_t(Exponent);
491 increaseExponentToMatch(X, Diff);
493 X.increaseExponentToMatch(*this, -Diff);
496 template <class DigitsT>
497 void UnsignedFloat<DigitsT>::increaseExponentToMatch(UnsignedFloat &X,
498 int32_t ExponentDiff) {
499 assert(ExponentDiff > 0);
500 if (ExponentDiff >= 2 * Width) {
505 // Use up any leading zeros on X, and then shift this.
506 int32_t ShiftX = std::min(countLeadingZerosWidth(X.Digits), ExponentDiff);
507 assert(ShiftX < Width);
509 int32_t ShiftThis = ExponentDiff - ShiftX;
510 if (ShiftThis >= Width) {
516 X.Exponent -= ShiftX;
517 Digits >>= ShiftThis;
518 Exponent += ShiftThis;
522 template <class DigitsT>
523 UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
524 operator+=(const UnsignedFloat &X) {
525 if (isLargest() || X.isZero())
527 if (isZero() || X.isLargest())
530 // Normalize exponents.
531 UnsignedFloat Scaled = matchExponents(X);
533 // Check for zero again.
535 return *this = Scaled;
540 DigitsType Sum = Digits + Scaled.Digits;
541 bool DidOverflow = Sum < Digits;
546 if (Exponent == MaxExponent)
547 return *this = getLargest();
550 Digits = UINT64_C(1) << (Width - 1) | Digits >> 1;
554 template <class DigitsT>
555 UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
556 operator-=(const UnsignedFloat &X) {
560 return *this = getZero();
562 // Normalize exponents.
563 UnsignedFloat Scaled = matchExponents(X);
564 assert(Digits >= Scaled.Digits);
566 // Compute difference.
567 if (!Scaled.isZero()) {
568 Digits -= Scaled.Digits;
572 // Check if X just barely lost its last bit. E.g., for 32-bit:
574 // 1*2^32 - 1*2^0 == 0xffffffff != 1*2^32
575 if (*this == UnsignedFloat(1, X.lgFloor() + Width)) {
576 Digits = DigitsType(0) - 1;
581 template <class DigitsT>
582 UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
583 operator*=(const UnsignedFloat &X) {
589 // Save the exponents.
590 int32_t Exponents = int32_t(Exponent) + int32_t(X.Exponent);
592 // Get the raw product.
593 *this = getProduct(Digits, X.Digits);
595 // Combine with exponents.
596 return *this <<= Exponents;
598 template <class DigitsT>
599 UnsignedFloat<DigitsT> &UnsignedFloat<DigitsT>::
600 operator/=(const UnsignedFloat &X) {
604 return *this = getLargest();
606 // Save the exponents.
607 int32_t Exponents = int32_t(Exponent) - int32_t(X.Exponent);
609 // Get the raw quotient.
610 *this = getQuotient(Digits, X.Digits);
612 // Combine with exponents.
613 return *this <<= Exponents;
615 template <class DigitsT>
616 void UnsignedFloat<DigitsT>::shiftLeft(int32_t Shift) {
617 if (!Shift || isZero())
619 assert(Shift != INT32_MIN);
625 // Shift as much as we can in the exponent.
626 int32_t ExponentShift = std::min(Shift, MaxExponent - Exponent);
627 Exponent += ExponentShift;
628 if (ExponentShift == Shift)
631 // Check this late, since it's rare.
635 // Shift the digits themselves.
636 Shift -= ExponentShift;
637 if (Shift > countLeadingZerosWidth(Digits)) {
639 *this = getLargest();
647 template <class DigitsT>
648 void UnsignedFloat<DigitsT>::shiftRight(int32_t Shift) {
649 if (!Shift || isZero())
651 assert(Shift != INT32_MIN);
657 // Shift as much as we can in the exponent.
658 int32_t ExponentShift = std::min(Shift, Exponent - MinExponent);
659 Exponent -= ExponentShift;
660 if (ExponentShift == Shift)
663 // Shift the digits themselves.
664 Shift -= ExponentShift;
665 if (Shift >= Width) {
675 template <class DigitsT>
676 int UnsignedFloat<DigitsT>::compare(const UnsignedFloat &X) const {
679 return X.isZero() ? 0 : -1;
683 // Check for the scale. Use lgFloor to be sure that the exponent difference
684 // is always lower than 64.
685 int32_t lgL = lgFloor(), lgR = X.lgFloor();
687 return lgL < lgR ? -1 : 1;
690 if (Exponent < X.Exponent)
691 return UnsignedFloatBase::compare(Digits, X.Digits, X.Exponent - Exponent);
693 return -UnsignedFloatBase::compare(X.Digits, Digits, Exponent - X.Exponent);
696 template <class T> struct isPodLike<UnsignedFloat<T>> {
697 static const bool value = true;
701 //===----------------------------------------------------------------------===//
703 // BlockMass definition.
705 // TODO: Make this private to BlockFrequencyInfoImpl or delete.
707 //===----------------------------------------------------------------------===//
710 /// \brief Mass of a block.
712 /// This class implements a sort of fixed-point fraction always between 0.0 and
713 /// 1.0. getMass() == UINT64_MAX indicates a value of 1.0.
715 /// Masses can be added and subtracted. Simple saturation arithmetic is used,
716 /// so arithmetic operations never overflow or underflow.
718 /// Masses can be multiplied. Multiplication treats full mass as 1.0 and uses
719 /// an inexpensive floating-point algorithm that's off-by-one (almost, but not
720 /// quite, maximum precision).
722 /// Masses can be scaled by \a BranchProbability at maximum precision.
727 BlockMass() : Mass(0) {}
728 explicit BlockMass(uint64_t Mass) : Mass(Mass) {}
730 static BlockMass getEmpty() { return BlockMass(); }
731 static BlockMass getFull() { return BlockMass(UINT64_MAX); }
733 uint64_t getMass() const { return Mass; }
735 bool isFull() const { return Mass == UINT64_MAX; }
736 bool isEmpty() const { return !Mass; }
738 bool operator!() const { return isEmpty(); }
740 /// \brief Add another mass.
742 /// Adds another mass, saturating at \a isFull() rather than overflowing.
743 BlockMass &operator+=(const BlockMass &X) {
744 uint64_t Sum = Mass + X.Mass;
745 Mass = Sum < Mass ? UINT64_MAX : Sum;
749 /// \brief Subtract another mass.
751 /// Subtracts another mass, saturating at \a isEmpty() rather than
753 BlockMass &operator-=(const BlockMass &X) {
754 uint64_t Diff = Mass - X.Mass;
755 Mass = Diff > Mass ? 0 : Diff;
759 /// \brief Scale by another mass.
761 /// The current implementation is a little imprecise, but it's relatively
762 /// fast, never overflows, and maintains the property that 1.0*1.0==1.0
763 /// (where isFull represents the number 1.0). It's an approximation of
764 /// 128-bit multiply that gets right-shifted by 64-bits.
766 /// For a given digit size, multiplying two-digit numbers looks like:
772 /// + 0 . U1*L2 . 0 // (shift left once by a digit-size)
773 /// + 0 . U2*L1 . 0 // (shift left once by a digit-size)
774 /// + U1*L2 . 0 . 0 // (shift left twice by a digit-size)
776 /// BlockMass has 64-bit numbers. Split each into two 32-bit digits, stored
777 /// 64-bit. Add 1 to the lower digits, to model isFull as 1.0; this won't
778 /// overflow, since we have 64-bit storage for each digit.
780 /// To do this accurately, (a) multiply into two 64-bit digits, incrementing
781 /// the upper digit on overflows of the lower digit (carry), (b) subtract 1
782 /// from the lower digit, decrementing the upper digit on underflow (carry),
783 /// and (c) truncate the lower digit. For the 1.0*1.0 case, the upper digit
784 /// will be 0 at the end of step (a), and then will underflow back to isFull
785 /// (1.0) in step (b).
787 /// Instead, the implementation does something a little faster with a small
788 /// loss of accuracy: ignore the lower 64-bit digit entirely. The loss of
789 /// accuracy is small, since the sum of the unmodelled carries is 0 or 1
790 /// (i.e., step (a) will overflow at most once, and step (b) will underflow
791 /// only if step (a) overflows).
793 /// This is the formula we're calculating:
795 /// U1.L1 * U2.L2 == U1 * U2 + (U1 * (L2+1))>>32 + (U2 * (L1+1))>>32
797 /// As a demonstration of 1.0*1.0, consider two 4-bit numbers that are both
800 /// U1.L1 * U2.L2 == U1 * U2 + (U1 * (L2+1))>>2 + (U2 * (L1+1))>>2
801 /// 11.11 * 11.11 == 11 * 11 + (11 * (11+1))/4 + (11 * (11+1))/4
802 /// == 1001 + (11 * 100)/4 + (11 * 100)/4
803 /// == 1001 + 1100/4 + 1100/4
804 /// == 1001 + 0011 + 0011
806 BlockMass &operator*=(const BlockMass &X) {
807 uint64_t U1 = Mass >> 32, L1 = Mass & UINT32_MAX, U2 = X.Mass >> 32,
808 L2 = X.Mass & UINT32_MAX;
809 Mass = U1 * U2 + (U1 * (L2 + 1) >> 32) + ((L1 + 1) * U2 >> 32);
813 /// \brief Multiply by a branch probability.
815 /// Multiply by P. Guarantees full precision.
817 /// This could be naively implemented by multiplying by the numerator and
818 /// dividing by the denominator, but in what order? Multiplying first can
819 /// overflow, while dividing first will lose precision (potentially, changing
820 /// a non-zero mass to zero).
822 /// The implementation mixes the two methods. Since \a BranchProbability
823 /// uses 32-bits and \a BlockMass 64-bits, shift the mass as far to the left
824 /// as there is room, then divide by the denominator to get a quotient.
825 /// Multiplying by the numerator and right shifting gives a first
828 /// Calculate the error in this first approximation by calculating the
829 /// opposite mass (multiply by the opposite numerator and shift) and
830 /// subtracting both from teh original mass.
832 /// Add to the first approximation the correct fraction of this error value.
833 /// This time, multiply first and then divide, since there is no danger of
836 /// \pre P represents a fraction between 0.0 and 1.0.
837 BlockMass &operator*=(const BranchProbability &P);
839 bool operator==(const BlockMass &X) const { return Mass == X.Mass; }
840 bool operator!=(const BlockMass &X) const { return Mass != X.Mass; }
841 bool operator<=(const BlockMass &X) const { return Mass <= X.Mass; }
842 bool operator>=(const BlockMass &X) const { return Mass >= X.Mass; }
843 bool operator<(const BlockMass &X) const { return Mass < X.Mass; }
844 bool operator>(const BlockMass &X) const { return Mass > X.Mass; }
846 /// \brief Convert to floating point.
848 /// Convert to a float. \a isFull() gives 1.0, while \a isEmpty() gives
849 /// slightly above 0.0.
850 UnsignedFloat<uint64_t> toFloat() const;
853 raw_ostream &print(raw_ostream &OS) const;
856 inline BlockMass operator+(const BlockMass &L, const BlockMass &R) {
857 return BlockMass(L) += R;
859 inline BlockMass operator-(const BlockMass &L, const BlockMass &R) {
860 return BlockMass(L) -= R;
862 inline BlockMass operator*(const BlockMass &L, const BlockMass &R) {
863 return BlockMass(L) *= R;
865 inline BlockMass operator*(const BlockMass &L, const BranchProbability &R) {
866 return BlockMass(L) *= R;
868 inline BlockMass operator*(const BranchProbability &L, const BlockMass &R) {
869 return BlockMass(R) *= L;
872 inline raw_ostream &operator<<(raw_ostream &OS, const BlockMass &X) {
876 template <> struct isPodLike<BlockMass> {
877 static const bool value = true;
881 //===----------------------------------------------------------------------===//
883 // BlockFrequencyInfoImpl definition.
885 //===----------------------------------------------------------------------===//
889 class BranchProbabilityInfo;
893 class MachineBasicBlock;
894 class MachineBranchProbabilityInfo;
895 class MachineFunction;
897 class MachineLoopInfo;
899 /// \brief Base class for BlockFrequencyInfoImpl
901 /// BlockFrequencyInfoImplBase has supporting data structures and some
902 /// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
903 /// the block type (or that call such algorithms) are skipped here.
905 /// Nevertheless, the majority of the overall algorithm documention lives with
906 /// BlockFrequencyInfoImpl. See there for details.
907 class BlockFrequencyInfoImplBase {
909 typedef UnsignedFloat<uint64_t> Float;
911 /// \brief Representative of a block.
913 /// This is a simple wrapper around an index into the reverse-post-order
914 /// traversal of the blocks.
916 /// Unlike a block pointer, its order has meaning (location in the
917 /// topological sort) and it's class is the same regardless of block type.
919 typedef uint32_t IndexType;
922 bool operator==(const BlockNode &X) const { return Index == X.Index; }
923 bool operator!=(const BlockNode &X) const { return Index != X.Index; }
924 bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
925 bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
926 bool operator<(const BlockNode &X) const { return Index < X.Index; }
927 bool operator>(const BlockNode &X) const { return Index > X.Index; }
929 BlockNode() : Index(UINT32_MAX) {}
930 BlockNode(IndexType Index) : Index(Index) {}
932 bool isValid() const { return Index <= getMaxIndex(); }
933 static size_t getMaxIndex() { return UINT32_MAX - 1; }
936 /// \brief Stats about a block itself.
937 struct FrequencyData {
942 /// \brief Data about a loop.
944 /// Contains the data necessary to represent represent a loop as a
945 /// pseudo-node once it's packaged.
947 typedef SmallVector<std::pair<BlockNode, BlockMass>, 4> ExitMap;
948 typedef SmallVector<BlockNode, 4> NodeList;
949 LoopData *Parent; ///< The parent loop.
950 bool IsPackaged; ///< Whether this has been packaged.
951 ExitMap Exits; ///< Successor edges (and weights).
952 NodeList Nodes; ///< Header and the members of the loop.
953 BlockMass BackedgeMass; ///< Mass returned to loop header.
957 LoopData(LoopData *Parent, const BlockNode &Header)
958 : Parent(Parent), IsPackaged(false), Nodes(1, Header) {}
959 bool isHeader(const BlockNode &Node) const { return Node == Nodes[0]; }
960 BlockNode getHeader() const { return Nodes[0]; }
962 NodeList::const_iterator members_begin() const { return Nodes.begin() + 1; }
963 NodeList::const_iterator members_end() const { return Nodes.end(); }
964 iterator_range<NodeList::const_iterator> members() const {
965 return make_range(members_begin(), members_end());
969 /// \brief Index of loop information.
971 BlockNode Node; ///< This node.
972 LoopData *Loop; ///< The loop this block is inside.
973 BlockMass Mass; ///< Mass distribution from the entry block.
975 WorkingData(const BlockNode &Node) : Node(Node), Loop(nullptr) {}
977 bool isLoopHeader() const { return Loop && Loop->isHeader(Node); }
978 bool hasLoopHeader() const { return isLoopHeader() ? Loop->Parent : Loop; }
980 LoopData *getContainingLoop() const {
981 return isLoopHeader() ? Loop->Parent : Loop;
983 BlockNode getContainingHeader() const {
984 auto *ContainingLoop = getContainingLoop();
986 return ContainingLoop->getHeader();
990 /// \brief Has ContainingLoop been packaged up?
991 bool isPackaged() const {
992 auto *ContainingLoop = getContainingLoop();
993 return ContainingLoop && ContainingLoop->IsPackaged;
995 /// \brief Has Loop been packaged up?
996 bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
999 /// \brief Unscaled probability weight.
1001 /// Probability weight for an edge in the graph (including the
1002 /// successor/target node).
1004 /// All edges in the original function are 32-bit. However, exit edges from
1005 /// loop packages are taken from 64-bit exit masses, so we need 64-bits of
1006 /// space in general.
1008 /// In addition to the raw weight amount, Weight stores the type of the edge
1009 /// in the current context (i.e., the context of the loop being processed).
1010 /// Is this a local edge within the loop, an exit from the loop, or a
1011 /// backedge to the loop header?
1013 enum DistType { Local, Exit, Backedge };
1015 BlockNode TargetNode;
1017 Weight() : Type(Local), Amount(0) {}
1020 /// \brief Distribution of unscaled probability weight.
1022 /// Distribution of unscaled probability weight to a set of successors.
1024 /// This class collates the successor edge weights for later processing.
1026 /// \a DidOverflow indicates whether \a Total did overflow while adding to
1027 /// the distribution. It should never overflow twice.
1028 struct Distribution {
1029 typedef SmallVector<Weight, 4> WeightList;
1030 WeightList Weights; ///< Individual successor weights.
1031 uint64_t Total; ///< Sum of all weights.
1032 bool DidOverflow; ///< Whether \a Total did overflow.
1034 Distribution() : Total(0), DidOverflow(false) {}
1035 void addLocal(const BlockNode &Node, uint64_t Amount) {
1036 add(Node, Amount, Weight::Local);
1038 void addExit(const BlockNode &Node, uint64_t Amount) {
1039 add(Node, Amount, Weight::Exit);
1041 void addBackedge(const BlockNode &Node, uint64_t Amount) {
1042 add(Node, Amount, Weight::Backedge);
1045 /// \brief Normalize the distribution.
1047 /// Combines multiple edges to the same \a Weight::TargetNode and scales
1048 /// down so that \a Total fits into 32-bits.
1050 /// This is linear in the size of \a Weights. For the vast majority of
1051 /// cases, adjacent edge weights are combined by sorting WeightList and
1052 /// combining adjacent weights. However, for very large edge lists an
1053 /// auxiliary hash table is used.
1057 void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
1060 /// \brief Data about each block. This is used downstream.
1061 std::vector<FrequencyData> Freqs;
1063 /// \brief Loop data: see initializeLoops().
1064 std::vector<WorkingData> Working;
1066 /// \brief Indexed information about loops.
1067 std::list<LoopData> Loops;
1069 /// \brief Add all edges out of a packaged loop to the distribution.
1071 /// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
1073 void addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
1074 Distribution &Dist);
1076 /// \brief Add an edge to the distribution.
1078 /// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
1079 /// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
1080 /// every edge should be a local edge (since all the loops are packaged up).
1081 void addToDist(Distribution &Dist, const LoopData *OuterLoop,
1082 const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);
1084 LoopData &getLoopPackage(const BlockNode &Head) {
1085 assert(Head.Index < Working.size());
1086 assert(Working[Head.Index].isLoopHeader());
1087 return *Working[Head.Index].Loop;
1090 /// \brief Get a possibly packaged node.
1092 /// Get the node currently representing Node, which could be a containing
1095 /// This function should only be called when distributing mass. As long as
1096 /// there are no irreducilbe edges to Node, then it will have complexity O(1)
1097 /// in this context.
1099 /// In general, the complexity is O(L), where L is the number of loop headers
1100 /// Node has been packaged into. Since this method is called in the context
1101 /// of distributing mass, L will be the number of loop headers an early exit
1102 /// edge jumps out of.
1103 BlockNode getPackagedNode(const BlockNode &Node) {
1104 assert(Node.isValid());
1105 if (!Working[Node.Index].isPackaged())
1107 if (!Working[Node.Index].isAPackage())
1109 return getPackagedNode(Working[Node.Index].getContainingHeader());
1112 /// \brief Distribute mass according to a distribution.
1114 /// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
1115 /// backedges and exits are stored in its entry in Loops.
1117 /// Mass is distributed in parallel from two copies of the source mass.
1118 void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
1119 Distribution &Dist);
1121 /// \brief Compute the loop scale for a loop.
1122 void computeLoopScale(LoopData &Loop);
1124 /// \brief Package up a loop.
1125 void packageLoop(LoopData &Loop);
1127 /// \brief Unwrap loops.
1130 /// \brief Finalize frequency metrics.
1132 /// Calculates final frequencies and cleans up no-longer-needed data
1134 void finalizeMetrics();
1136 /// \brief Clear all memory.
1139 virtual std::string getBlockName(const BlockNode &Node) const;
1141 virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
1142 void dump() const { print(dbgs()); }
1144 Float getFloatingBlockFreq(const BlockNode &Node) const;
1146 BlockFrequency getBlockFreq(const BlockNode &Node) const;
1148 raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
1149 raw_ostream &printBlockFreq(raw_ostream &OS,
1150 const BlockFrequency &Freq) const;
1152 uint64_t getEntryFreq() const {
1153 assert(!Freqs.empty());
1154 return Freqs[0].Integer;
1156 /// \brief Virtual destructor.
1158 /// Need a virtual destructor to mask the compiler warning about
1160 virtual ~BlockFrequencyInfoImplBase() {}
1163 namespace bfi_detail {
1164 template <class BlockT> struct TypeMap {};
1165 template <> struct TypeMap<BasicBlock> {
1166 typedef BasicBlock BlockT;
1167 typedef Function FunctionT;
1168 typedef BranchProbabilityInfo BranchProbabilityInfoT;
1170 typedef LoopInfo LoopInfoT;
1172 template <> struct TypeMap<MachineBasicBlock> {
1173 typedef MachineBasicBlock BlockT;
1174 typedef MachineFunction FunctionT;
1175 typedef MachineBranchProbabilityInfo BranchProbabilityInfoT;
1176 typedef MachineLoop LoopT;
1177 typedef MachineLoopInfo LoopInfoT;
1180 /// \brief Get the name of a MachineBasicBlock.
1182 /// Get the name of a MachineBasicBlock. It's templated so that including from
1183 /// CodeGen is unnecessary (that would be a layering issue).
1185 /// This is used mainly for debug output. The name is similar to
1186 /// MachineBasicBlock::getFullName(), but skips the name of the function.
1187 template <class BlockT> std::string getBlockName(const BlockT *BB) {
1188 assert(BB && "Unexpected nullptr");
1189 auto MachineName = "BB" + Twine(BB->getNumber());
1190 if (BB->getBasicBlock())
1191 return (MachineName + "[" + BB->getName() + "]").str();
1192 return MachineName.str();
1194 /// \brief Get the name of a BasicBlock.
1195 template <> inline std::string getBlockName(const BasicBlock *BB) {
1196 assert(BB && "Unexpected nullptr");
1197 return BB->getName().str();
1201 /// \brief Shared implementation for block frequency analysis.
1203 /// This is a shared implementation of BlockFrequencyInfo and
1204 /// MachineBlockFrequencyInfo, and calculates the relative frequencies of
1207 /// This algorithm leverages BlockMass and UnsignedFloat to maintain precision,
1208 /// separates mass distribution from loop scaling, and dithers to eliminate
1209 /// probability mass loss.
1211 /// The implementation is split between BlockFrequencyInfoImpl, which knows the
1212 /// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
1213 /// BlockFrequencyInfoImplBase, which doesn't. The base class uses \a
1214 /// BlockNode, a wrapper around a uint32_t. BlockNode is numbered from 0 in
1215 /// reverse-post order. This gives two advantages: it's easy to compare the
1216 /// relative ordering of two nodes, and maps keyed on BlockT can be represented
1219 /// This algorithm is O(V+E), unless there is irreducible control flow, in
1220 /// which case it's O(V*E) in the worst case.
1222 /// These are the main stages:
1224 /// 0. Reverse post-order traversal (\a initializeRPOT()).
1226 /// Run a single post-order traversal and save it (in reverse) in RPOT.
1227 /// All other stages make use of this ordering. Save a lookup from BlockT
1228 /// to BlockNode (the index into RPOT) in Nodes.
1230 /// 1. Loop indexing (\a initializeLoops()).
1232 /// Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
1233 /// the algorithm. In particular, store the immediate members of each loop
1234 /// in reverse post-order.
1236 /// 2. Calculate mass and scale in loops (\a computeMassInLoops()).
1238 /// For each loop (bottom-up), distribute mass through the DAG resulting
1239 /// from ignoring backedges and treating sub-loops as a single pseudo-node.
1240 /// Track the backedge mass distributed to the loop header, and use it to
1241 /// calculate the loop scale (number of loop iterations).
1243 /// Visiting loops bottom-up is a post-order traversal of loop headers.
1244 /// For each loop, immediate members that represent sub-loops will already
1245 /// have been visited and packaged into a pseudo-node.
1247 /// Distributing mass in a loop is a reverse-post-order traversal through
1248 /// the loop. Start by assigning full mass to the Loop header. For each
1249 /// node in the loop:
1251 /// - Fetch and categorize the weight distribution for its successors.
1252 /// If this is a packaged-subloop, the weight distribution is stored
1253 /// in \a LoopData::Exits. Otherwise, fetch it from
1254 /// BranchProbabilityInfo.
1256 /// - Each successor is categorized as \a Weight::Local, a local edge
1257 /// within the current loop, \a Weight::Backedge, a backedge to the
1258 /// loop header, or \a Weight::Exit, any successor outside the loop.
1259 /// The weight, the successor, and its category are stored in \a
1260 /// Distribution. There can be multiple edges to each successor.
1262 /// - Normalize the distribution: scale weights down so that their sum
1263 /// is 32-bits, and coalesce multiple edges to the same node.
1265 /// - Distribute the mass accordingly, dithering to minimize mass loss,
1266 /// as described in \a distributeMass().
1268 /// Finally, calculate the loop scale from the accumulated backedge mass.
1270 /// 3. Distribute mass in the function (\a computeMassInFunction()).
1272 /// Finally, distribute mass through the DAG resulting from packaging all
1273 /// loops in the function. This uses the same algorithm as distributing
1274 /// mass in a loop, except that there are no exit or backedge edges.
1276 /// 4. Loop unpackaging and cleanup (\a finalizeMetrics()).
1278 /// Initialize the frequency to a floating point representation of its
1281 /// Visit loops top-down (reverse post-order), scaling the loop header's
1282 /// frequency by its psuedo-node's mass and loop scale. Keep track of the
1283 /// minimum and maximum final frequencies.
1285 /// Using the min and max frequencies as a guide, translate floating point
1286 /// frequencies to an appropriate range in uint64_t.
1288 /// It has some known flaws.
1290 /// - Irreducible control flow isn't modelled correctly. In particular,
1291 /// LoopInfo and MachineLoopInfo ignore irreducible backedges. The main
1292 /// result is that irreducible SCCs will under-scaled. No mass is lost,
1293 /// but the computed branch weights for the loop pseudo-node will be
1296 /// Modelling irreducible control flow exactly involves setting up and
1297 /// solving a group of infinite geometric series. Such precision is
1298 /// unlikely to be worthwhile, since most of our algorithms give up on
1299 /// irreducible control flow anyway.
1301 /// Nevertheless, we might find that we need to get closer. If
1302 /// LoopInfo/MachineLoopInfo flags loops with irreducible control flow
1303 /// (and/or the function as a whole), we can find the SCCs, compute an
1304 /// approximate exit frequency for the SCC as a whole, and scale up
1307 /// - Loop scale is limited to 4096 per loop (2^12) to avoid exhausting
1308 /// BlockFrequency's 64-bit integer precision.
1309 template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
1310 typedef typename bfi_detail::TypeMap<BT>::BlockT BlockT;
1311 typedef typename bfi_detail::TypeMap<BT>::FunctionT FunctionT;
1312 typedef typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT
1313 BranchProbabilityInfoT;
1314 typedef typename bfi_detail::TypeMap<BT>::LoopT LoopT;
1315 typedef typename bfi_detail::TypeMap<BT>::LoopInfoT LoopInfoT;
1317 typedef GraphTraits<const BlockT *> Successor;
1318 typedef GraphTraits<Inverse<const BlockT *>> Predecessor;
1320 const BranchProbabilityInfoT *BPI;
1321 const LoopInfoT *LI;
1324 // All blocks in reverse postorder.
1325 std::vector<const BlockT *> RPOT;
1326 DenseMap<const BlockT *, BlockNode> Nodes;
1328 typedef typename std::vector<const BlockT *>::const_iterator rpot_iterator;
1330 rpot_iterator rpot_begin() const { return RPOT.begin(); }
1331 rpot_iterator rpot_end() const { return RPOT.end(); }
1333 size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }
1335 BlockNode getNode(const rpot_iterator &I) const {
1336 return BlockNode(getIndex(I));
1338 BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB); }
1340 const BlockT *getBlock(const BlockNode &Node) const {
1341 assert(Node.Index < RPOT.size());
1342 return RPOT[Node.Index];
1345 /// \brief Run (and save) a post-order traversal.
1347 /// Saves a reverse post-order traversal of all the nodes in \a F.
1348 void initializeRPOT();
1350 /// \brief Initialize loop data.
1352 /// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
1353 /// each block to the deepest loop it's in, but we need the inverse. For each
1354 /// loop, we store in reverse post-order its "immediate" members, defined as
1355 /// the header, the headers of immediate sub-loops, and all other blocks in
1356 /// the loop that are not in sub-loops.
1357 void initializeLoops();
1359 /// \brief Propagate to a block's successors.
1361 /// In the context of distributing mass through \c OuterLoop, divide the mass
1362 /// currently assigned to \c Node between its successors.
1363 void propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
1365 /// \brief Compute mass in a particular loop.
1367 /// Assign mass to \c Loop's header, and then for each block in \c Loop in
1368 /// reverse post-order, distribute mass to its successors. Only visits nodes
1369 /// that have not been packaged into sub-loops.
1371 /// \pre \a computeMassInLoop() has been called for each subloop of \c Loop.
1372 void computeMassInLoop(LoopData &Loop);
1374 /// \brief Compute mass in all loops.
1376 /// For each loop bottom-up, call \a computeMassInLoop().
1377 void computeMassInLoops();
1379 /// \brief Compute mass in the top-level function.
1381 /// Assign mass to the entry block, and then for each block in reverse
1382 /// post-order, distribute mass to its successors. Skips nodes that have
1383 /// been packaged into loops.
1385 /// \pre \a computeMassInLoops() has been called.
1386 void computeMassInFunction();
1388 std::string getBlockName(const BlockNode &Node) const override {
1389 return bfi_detail::getBlockName(getBlock(Node));
1393 const FunctionT *getFunction() const { return F; }
1395 void doFunction(const FunctionT *F, const BranchProbabilityInfoT *BPI,
1396 const LoopInfoT *LI);
1397 BlockFrequencyInfoImpl() : BPI(0), LI(0), F(0) {}
1399 using BlockFrequencyInfoImplBase::getEntryFreq;
1400 BlockFrequency getBlockFreq(const BlockT *BB) const {
1401 return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
1403 Float getFloatingBlockFreq(const BlockT *BB) const {
1404 return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
1407 /// \brief Print the frequencies for the current function.
1409 /// Prints the frequencies for the blocks in the current function.
1411 /// Blocks are printed in the natural iteration order of the function, rather
1412 /// than reverse post-order. This provides two advantages: writing -analyze
1413 /// tests is easier (since blocks come out in source order), and even
1414 /// unreachable blocks are printed.
1416 /// \a BlockFrequencyInfoImplBase::print() only knows reverse post-order, so
1417 /// we need to override it here.
1418 raw_ostream &print(raw_ostream &OS) const override;
1419 using BlockFrequencyInfoImplBase::dump;
1421 using BlockFrequencyInfoImplBase::printBlockFreq;
1422 raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
1423 return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
1428 void BlockFrequencyInfoImpl<BT>::doFunction(const FunctionT *F,
1429 const BranchProbabilityInfoT *BPI,
1430 const LoopInfoT *LI) {
1431 // Save the parameters.
1436 // Clean up left-over data structures.
1437 BlockFrequencyInfoImplBase::clear();
1442 DEBUG(dbgs() << "\nblock-frequency: " << F->getName() << "\n================="
1443 << std::string(F->getName().size(), '=') << "\n");
1447 // Visit loops in post-order to find thelocal mass distribution, and then do
1448 // the full function.
1449 computeMassInLoops();
1450 computeMassInFunction();
1455 template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
1456 const BlockT *Entry = F->begin();
1457 RPOT.reserve(F->size());
1458 std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
1459 std::reverse(RPOT.begin(), RPOT.end());
1461 assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
1462 "More nodes in function than Block Frequency Info supports");
1464 DEBUG(dbgs() << "reverse-post-order-traversal\n");
1465 for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
1466 BlockNode Node = getNode(I);
1467 DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
1471 Working.reserve(RPOT.size());
1472 for (size_t Index = 0; Index < RPOT.size(); ++Index)
1473 Working.emplace_back(Index);
1474 Freqs.resize(RPOT.size());
1477 template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
1478 DEBUG(dbgs() << "loop-detection\n");
1482 // Visit loops top down and assign them an index.
1483 std::deque<std::pair<const LoopT *, LoopData *>> Q;
1484 for (const LoopT *L : *LI)
1485 Q.emplace_back(L, nullptr);
1486 while (!Q.empty()) {
1487 const LoopT *Loop = Q.front().first;
1488 LoopData *Parent = Q.front().second;
1491 BlockNode Header = getNode(Loop->getHeader());
1492 assert(Header.isValid());
1494 Loops.emplace_back(Parent, Header);
1495 Working[Header.Index].Loop = &Loops.back();
1496 DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
1498 for (const LoopT *L : *Loop)
1499 Q.emplace_back(L, &Loops.back());
1502 // Visit nodes in reverse post-order and add them to their deepest containing
1504 for (size_t Index = 0; Index < RPOT.size(); ++Index) {
1505 // Loop headers have already been mostly mapped.
1506 if (Working[Index].isLoopHeader()) {
1507 LoopData *ContainingLoop = Working[Index].getContainingLoop();
1509 ContainingLoop->Nodes.push_back(Index);
1513 const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
1517 // Add this node to its containing loop's member list.
1518 BlockNode Header = getNode(Loop->getHeader());
1519 assert(Header.isValid());
1520 const auto &HeaderData = Working[Header.Index];
1521 assert(HeaderData.isLoopHeader());
1523 Working[Index].Loop = HeaderData.Loop;
1524 HeaderData.Loop->Nodes.push_back(Index);
1525 DEBUG(dbgs() << " - loop = " << getBlockName(Header)
1526 << ": member = " << getBlockName(Index) << "\n");
1530 template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
1531 // Visit loops with the deepest first, and the top-level loops last.
1532 for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L)
1533 computeMassInLoop(*L);
1537 void BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
1538 // Compute mass in loop.
1539 DEBUG(dbgs() << "compute-mass-in-loop: " << getBlockName(Loop.getHeader())
1542 Working[Loop.getHeader().Index].Mass = BlockMass::getFull();
1543 propagateMassToSuccessors(&Loop, Loop.getHeader());
1545 for (const BlockNode &M : Loop.members())
1546 propagateMassToSuccessors(&Loop, M);
1548 computeLoopScale(Loop);
1552 template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
1553 // Compute mass in function.
1554 DEBUG(dbgs() << "compute-mass-in-function\n");
1555 assert(!Working.empty() && "no blocks in function");
1556 assert(!Working[0].isLoopHeader() && "entry block is a loop header");
1558 Working[0].Mass = BlockMass::getFull();
1559 for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
1560 // Check for nodes that have been packaged.
1561 BlockNode Node = getNode(I);
1562 if (Working[Node.Index].hasLoopHeader())
1565 propagateMassToSuccessors(nullptr, Node);
1571 BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
1572 const BlockNode &Node) {
1573 DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
1574 // Calculate probability for successors.
1576 if (Working[Node.Index].isLoopHeader() &&
1577 Working[Node.Index].Loop != OuterLoop)
1578 addLoopSuccessorsToDist(OuterLoop, *Working[Node.Index].Loop, Dist);
1580 const BlockT *BB = getBlock(Node);
1581 for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB);
1583 // Do not dereference SI, or getEdgeWeight() is linear in the number of
1585 addToDist(Dist, OuterLoop, Node, getNode(*SI),
1586 BPI->getEdgeWeight(BB, SI));
1589 // Distribute mass to successors, saving exit and backedge data in the
1591 distributeMass(Node, OuterLoop, Dist);
1595 raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
1598 OS << "block-frequency-info: " << F->getName() << "\n";
1599 for (const BlockT &BB : *F)
1600 OS << " - " << bfi_detail::getBlockName(&BB)
1601 << ": float = " << getFloatingBlockFreq(&BB)
1602 << ", int = " << getBlockFreq(&BB).getFrequency() << "\n";
1604 // Add an extra newline for readability.