1 //===-- AArch64BranchFixupPass.cpp - AArch64 branch fixup -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that fixes AArch64 branches which have ended up out
11 // of range for their immediate operands.
13 //===----------------------------------------------------------------------===//
16 #include "AArch64InstrInfo.h"
17 #include "Utils/AArch64BaseInfo.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/Format.h"
24 #include "llvm/Support/raw_ostream.h"
27 #define DEBUG_TYPE "aarch64-branch-fixup"
29 STATISTIC(NumSplit, "Number of uncond branches inserted");
30 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
32 /// Return the worst case padding that could result from unknown offset bits.
33 /// This does not include alignment padding caused by known offset bits.
35 /// @param LogAlign log2(alignment)
36 /// @param KnownBits Number of known low offset bits.
37 static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
38 if (KnownBits < LogAlign)
39 return (1u << LogAlign) - (1u << KnownBits);
44 /// Due to limited PC-relative displacements, conditional branches to distant
45 /// blocks may need converting into an unconditional equivalent. For example:
46 /// tbz w1, #0, far_away
51 class AArch64BranchFixup : public MachineFunctionPass {
52 /// Information about the offset and size of a single basic block.
53 struct BasicBlockInfo {
54 /// Distance from the beginning of the function to the beginning of this
57 /// Offsets are computed assuming worst case padding before an aligned
58 /// block. This means that subtracting basic block offsets always gives a
59 /// conservative estimate of the real distance which may be smaller.
61 /// Because worst case padding is used, the computed offset of an aligned
62 /// block may not actually be aligned.
65 /// Size of the basic block in bytes. If the block contains inline
66 /// assembly, this is a worst case estimate.
68 /// The size does not include any alignment padding whether from the
69 /// beginning of the block, or from an aligned jump table at the end.
72 /// The number of low bits in Offset that are known to be exact. The
73 /// remaining bits of Offset are an upper bound.
76 /// When non-zero, the block contains instructions (inline asm) of unknown
77 /// size. The real size may be smaller than Size bytes by a multiple of 1
81 BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0) {}
83 /// Compute the number of known offset bits internally to this block.
84 /// This number should be used to predict worst case padding when
85 /// splitting the block.
86 unsigned internalKnownBits() const {
87 unsigned Bits = Unalign ? Unalign : KnownBits;
88 // If the block size isn't a multiple of the known bits, assume the
89 // worst case padding.
90 if (Size & ((1u << Bits) - 1))
91 Bits = countTrailingZeros(Size);
95 /// Compute the offset immediately following this block. If LogAlign is
96 /// specified, return the offset the successor block will get if it has
98 unsigned postOffset(unsigned LogAlign = 0) const {
99 unsigned PO = Offset + Size;
102 // Add alignment padding from the terminator.
103 return PO + UnknownPadding(LogAlign, internalKnownBits());
106 /// Compute the number of known low bits of postOffset. If this block
107 /// contains inline asm, the number of known bits drops to the
108 /// instruction alignment. An aligned terminator may increase the number
110 /// If LogAlign is given, also consider the alignment of the next block.
111 unsigned postKnownBits(unsigned LogAlign = 0) const {
112 return std::max(LogAlign, internalKnownBits());
116 std::vector<BasicBlockInfo> BBInfo;
118 /// One per immediate branch, keeping the machine instruction pointer,
119 /// conditional or unconditional, the max displacement, and (if IsCond is
120 /// true) the corresponding inverted branch opcode.
123 unsigned OffsetBits : 31;
125 ImmBranch(MachineInstr *mi, unsigned offsetbits, bool cond)
126 : MI(mi), OffsetBits(offsetbits), IsCond(cond) {}
129 /// Keep track of all the immediate branch instructions.
131 std::vector<ImmBranch> ImmBranches;
134 const AArch64InstrInfo *TII;
137 AArch64BranchFixup() : MachineFunctionPass(ID) {}
139 bool runOnMachineFunction(MachineFunction &MF) override;
141 const char *getPassName() const override {
142 return "AArch64 branch fixup pass";
146 void initializeFunctionInfo();
147 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
148 void adjustBBOffsetsAfter(MachineBasicBlock *BB);
149 bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB,
150 unsigned OffsetBits);
151 bool fixupImmediateBr(ImmBranch &Br);
152 bool fixupConditionalBr(ImmBranch &Br);
154 void computeBlockSize(MachineBasicBlock *MBB);
155 unsigned getOffsetOf(MachineInstr *MI) const;
159 char AArch64BranchFixup::ID = 0;
163 void AArch64BranchFixup::verify() {
165 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
167 MachineBasicBlock *MBB = MBBI;
168 unsigned MBBId = MBB->getNumber();
169 assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
174 /// print block size and offset information - debugging
175 void AArch64BranchFixup::dumpBBs() {
177 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
178 const BasicBlockInfo &BBI = BBInfo[J];
179 dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
180 << " kb=" << unsigned(BBI.KnownBits)
181 << " ua=" << unsigned(BBI.Unalign)
182 << format(" size=%#x\n", BBInfo[J].Size);
187 /// Returns an instance of the branch fixup pass.
188 FunctionPass *llvm::createAArch64BranchFixupPass() {
189 return new AArch64BranchFixup();
192 bool AArch64BranchFixup::runOnMachineFunction(MachineFunction &mf) {
194 DEBUG(dbgs() << "***** AArch64BranchFixup ******");
195 TII = (const AArch64InstrInfo*)MF->getTarget().getInstrInfo();
197 // This pass invalidates liveness information when it splits basic blocks.
198 MF->getRegInfo().invalidateLiveness();
200 // Renumber all of the machine basic blocks in the function, guaranteeing that
201 // the numbers agree with the position of the block in the function.
202 MF->RenumberBlocks();
204 // Do the initial scan of the function, building up information about the
205 // sizes of each block and location of each immediate branch.
206 initializeFunctionInfo();
208 // Iteratively fix up branches until there is no change.
209 unsigned NoBRIters = 0;
210 bool MadeChange = false;
212 DEBUG(dbgs() << "Beginning iteration #" << NoBRIters << '\n');
213 bool BRChange = false;
214 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
215 BRChange |= fixupImmediateBr(ImmBranches[i]);
216 if (BRChange && ++NoBRIters > 30)
217 report_fatal_error("Branch Fix Up pass failed to converge!");
225 // After a while, this might be made debug-only, but it is not expensive.
228 DEBUG(dbgs() << '\n'; dumpBBs());
236 /// Return true if the specified basic block can fallthrough into the block
237 /// immediately after it.
238 static bool BBHasFallthrough(MachineBasicBlock *MBB) {
239 // Get the next machine basic block in the function.
240 MachineFunction::iterator MBBI = MBB;
241 // Can't fall off end of function.
242 if (std::next(MBBI) == MBB->getParent()->end())
245 MachineBasicBlock *NextBB = std::next(MBBI);
246 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
247 E = MBB->succ_end(); I != E; ++I)
254 /// Do the initial scan of the function, building up information about the sizes
255 /// of each block, and each immediate branch.
256 void AArch64BranchFixup::initializeFunctionInfo() {
258 BBInfo.resize(MF->getNumBlockIDs());
260 // First thing, compute the size of all basic blocks, and see if the function
261 // has any inline assembly in it. If so, we have to be conservative about
262 // alignment assumptions, as we don't know for sure the size of any
263 // instructions in the inline assembly.
264 for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
267 // The known bits of the entry block offset are determined by the function
269 BBInfo.front().KnownBits = MF->getAlignment();
271 // Compute block offsets and known bits.
272 adjustBBOffsetsAfter(MF->begin());
274 // Now go back through the instructions and build up our data structures.
275 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
277 MachineBasicBlock &MBB = *MBBI;
279 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
281 if (I->isDebugValue())
284 int Opc = I->getOpcode();
288 // The offsets encoded in instructions here scale by the instruction
289 // size (4 bytes), effectively increasing their range by 2 bits.
293 continue; // Ignore other JT branches
294 case AArch64::TBZxii:
295 case AArch64::TBZwii:
296 case AArch64::TBNZxii:
297 case AArch64::TBNZwii:
314 // Record this immediate branch.
315 ImmBranches.push_back(ImmBranch(I, Bits, IsCond));
321 /// Compute the size and some alignment information for MBB. This function
322 /// updates BBInfo directly.
323 void AArch64BranchFixup::computeBlockSize(MachineBasicBlock *MBB) {
324 BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
328 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
330 BBI.Size += TII->getInstSizeInBytes(*I);
331 // For inline asm, GetInstSizeInBytes returns a conservative estimate.
332 // The actual size may be smaller, but still a multiple of the instr size.
333 if (I->isInlineAsm())
338 /// Return the current offset of the specified machine instruction from the
339 /// start of the function. This offset changes as stuff is moved around inside
341 unsigned AArch64BranchFixup::getOffsetOf(MachineInstr *MI) const {
342 MachineBasicBlock *MBB = MI->getParent();
344 // The offset is composed of two things: the sum of the sizes of all MBB's
345 // before this instruction's block, and the offset from the start of the block
347 unsigned Offset = BBInfo[MBB->getNumber()].Offset;
349 // Sum instructions before MI in MBB.
350 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
351 assert(I != MBB->end() && "Didn't find MI in its own basic block?");
352 Offset += TII->getInstSizeInBytes(*I);
357 /// Split the basic block containing MI into two blocks, which are joined by
358 /// an unconditional branch. Update data structures and renumber blocks to
359 /// account for this change and returns the newly created block.
361 AArch64BranchFixup::splitBlockBeforeInstr(MachineInstr *MI) {
362 MachineBasicBlock *OrigBB = MI->getParent();
364 // Create a new MBB for the code after the OrigBB.
365 MachineBasicBlock *NewBB =
366 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
367 MachineFunction::iterator MBBI = OrigBB; ++MBBI;
368 MF->insert(MBBI, NewBB);
370 // Splice the instructions starting with MI over to NewBB.
371 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
373 // Add an unconditional branch from OrigBB to NewBB.
374 // Note the new unconditional branch is not being recorded.
375 // There doesn't seem to be meaningful DebugInfo available; this doesn't
376 // correspond to anything in the source.
377 BuildMI(OrigBB, DebugLoc(), TII->get(AArch64::Bimm)).addMBB(NewBB);
380 // Update the CFG. All succs of OrigBB are now succs of NewBB.
381 NewBB->transferSuccessors(OrigBB);
383 // OrigBB branches to NewBB.
384 OrigBB->addSuccessor(NewBB);
386 // Update internal data structures to account for the newly inserted MBB.
387 MF->RenumberBlocks(NewBB);
389 // Insert an entry into BBInfo to align it properly with the (newly
390 // renumbered) block numbers.
391 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
393 // Figure out how large the OrigBB is. As the first half of the original
394 // block, it cannot contain a tablejump. The size includes
395 // the new jump we added. (It should be possible to do this without
396 // recounting everything, but it's very confusing, and this is rarely
398 computeBlockSize(OrigBB);
400 // Figure out how large the NewMBB is. As the second half of the original
401 // block, it may contain a tablejump.
402 computeBlockSize(NewBB);
404 // All BBOffsets following these blocks must be modified.
405 adjustBBOffsetsAfter(OrigBB);
410 void AArch64BranchFixup::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
411 unsigned BBNum = BB->getNumber();
412 for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
413 // Get the offset and known bits at the end of the layout predecessor.
414 // Include the alignment of the current block.
415 unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
416 unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
417 unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
419 // This is where block i begins. Stop if the offset is already correct,
420 // and we have updated 2 blocks. This is the maximum number of blocks
421 // changed before calling this function.
423 BBInfo[i].Offset == Offset &&
424 BBInfo[i].KnownBits == KnownBits)
427 BBInfo[i].Offset = Offset;
428 BBInfo[i].KnownBits = KnownBits;
432 /// Returns true if the distance between specific MI and specific BB can fit in
433 /// MI's displacement field.
434 bool AArch64BranchFixup::isBBInRange(MachineInstr *MI,
435 MachineBasicBlock *DestBB,
436 unsigned OffsetBits) {
437 int64_t BrOffset = getOffsetOf(MI);
438 int64_t DestOffset = BBInfo[DestBB->getNumber()].Offset;
440 DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
441 << " from BB#" << MI->getParent()->getNumber()
442 << " bits available=" << OffsetBits
443 << " from " << getOffsetOf(MI) << " to " << DestOffset
444 << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
446 return isIntN(OffsetBits, DestOffset - BrOffset);
449 /// Fix up an immediate branch whose destination is too far away to fit in its
450 /// displacement field.
451 bool AArch64BranchFixup::fixupImmediateBr(ImmBranch &Br) {
452 MachineInstr *MI = Br.MI;
453 MachineBasicBlock *DestBB = nullptr;
454 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
455 if (MI->getOperand(i).isMBB()) {
456 DestBB = MI->getOperand(i).getMBB();
460 assert(DestBB && "Branch with no destination BB?");
462 // Check to see if the DestBB is already in-range.
463 if (isBBInRange(MI, DestBB, Br.OffsetBits))
466 assert(Br.IsCond && "Only conditional branches should need fixup");
467 return fixupConditionalBr(Br);
470 /// Fix up a conditional branch whose destination is too far away to fit in its
471 /// displacement field. It is converted to an inverse conditional branch + an
472 /// unconditional branch to the destination.
474 AArch64BranchFixup::fixupConditionalBr(ImmBranch &Br) {
475 MachineInstr *MI = Br.MI;
476 MachineBasicBlock *MBB = MI->getParent();
477 unsigned CondBrMBBOperand = 0;
479 // The general idea is to add an unconditional branch to the destination and
480 // invert the conditional branch to jump over it. Complications occur around
481 // fallthrough and unreachable ends to the block.
488 // First we invert the conditional branch, by creating a replacement if
489 // necessary. This if statement contains all the special handling of different
491 if (MI->getOpcode() == AArch64::Bcc) {
492 // The basic block is operand number 1 for Bcc
493 CondBrMBBOperand = 1;
495 A64CC::CondCodes CC = (A64CC::CondCodes)MI->getOperand(0).getImm();
496 CC = A64InvertCondCode(CC);
497 MI->getOperand(0).setImm(CC);
499 MachineInstrBuilder InvertedMI;
501 switch (MI->getOpcode()) {
502 default: llvm_unreachable("Unknown branch type");
503 case AArch64::TBZxii: InvertedOpcode = AArch64::TBNZxii; break;
504 case AArch64::TBZwii: InvertedOpcode = AArch64::TBNZwii; break;
505 case AArch64::TBNZxii: InvertedOpcode = AArch64::TBZxii; break;
506 case AArch64::TBNZwii: InvertedOpcode = AArch64::TBZwii; break;
507 case AArch64::CBZx: InvertedOpcode = AArch64::CBNZx; break;
508 case AArch64::CBZw: InvertedOpcode = AArch64::CBNZw; break;
509 case AArch64::CBNZx: InvertedOpcode = AArch64::CBZx; break;
510 case AArch64::CBNZw: InvertedOpcode = AArch64::CBZw; break;
513 InvertedMI = BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(InvertedOpcode));
514 for (unsigned i = 0, e= MI->getNumOperands(); i != e; ++i) {
515 InvertedMI.addOperand(MI->getOperand(i));
516 if (MI->getOperand(i).isMBB())
517 CondBrMBBOperand = i;
520 MI->eraseFromParent();
521 MI = Br.MI = InvertedMI;
524 // If the branch is at the end of its MBB and that has a fall-through block,
525 // direct the updated conditional branch to the fall-through
526 // block. Otherwise, split the MBB before the next instruction.
527 MachineInstr *BMI = &MBB->back();
528 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
532 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
533 BMI->getOpcode() == AArch64::Bimm) {
534 // Last MI in the BB is an unconditional branch. We can swap destinations:
535 // b.eq L1 (temporarily b.ne L1 after first change)
540 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
541 if (isBBInRange(MI, NewDest, Br.OffsetBits)) {
542 DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
544 MachineBasicBlock *DestBB = MI->getOperand(CondBrMBBOperand).getMBB();
545 BMI->getOperand(0).setMBB(DestBB);
546 MI->getOperand(CondBrMBBOperand).setMBB(NewDest);
553 MachineBasicBlock::iterator MBBI = MI; ++MBBI;
554 splitBlockBeforeInstr(MBBI);
555 // No need for the branch to the next block. We're adding an unconditional
556 // branch to the destination.
557 int delta = TII->getInstSizeInBytes(MBB->back());
558 BBInfo[MBB->getNumber()].Size -= delta;
559 MBB->back().eraseFromParent();
560 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
563 // After splitting and removing the unconditional branch from the original BB,
564 // the structure is now:
568 // splitbb/fallthroughbb:
569 // [old b L2/real continuation]
571 // We now have to change the conditional branch to point to splitbb and add an
572 // unconditional branch after it to L1, giving the final structure:
575 // b.invertedCC splitbb
577 // splitbb/fallthroughbb:
578 // [old b L2/real continuation]
579 MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
581 DEBUG(dbgs() << " Insert B to BB#"
582 << MI->getOperand(CondBrMBBOperand).getMBB()->getNumber()
583 << " also invert condition and change dest. to BB#"
584 << NextBB->getNumber() << "\n");
586 // Insert a new unconditional branch and fixup the destination of the
587 // conditional one. Also update the ImmBranch as well as adding a new entry
588 // for the new branch.
589 BuildMI(MBB, DebugLoc(), TII->get(AArch64::Bimm))
590 .addMBB(MI->getOperand(CondBrMBBOperand).getMBB());
591 MI->getOperand(CondBrMBBOperand).setMBB(NextBB);
593 BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());
595 // 26 bits written down in Bimm, specifying a multiple of 4.
596 unsigned OffsetBits = 26 + 2;
597 ImmBranches.push_back(ImmBranch(&MBB->back(), OffsetBits, false));
599 adjustBBOffsetsAfter(MBB);