1 //===-- ARM64BranchRelaxation.cpp - ARM64 branch relaxation ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 //===----------------------------------------------------------------------===//
12 #define DEBUG_TYPE "arm64-branch-relax"
14 #include "ARM64InstrInfo.h"
15 #include "ARM64MachineFunctionInfo.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/Format.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Support/CommandLine.h"
28 BranchRelaxation("arm64-branch-relax", cl::Hidden, cl::init(true),
29 cl::desc("Relax out of range conditional branches"));
31 static cl::opt<unsigned>
32 TBZDisplacementBits("arm64-tbz-offset-bits", cl::Hidden, cl::init(14),
33 cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
35 static cl::opt<unsigned>
36 CBZDisplacementBits("arm64-cbz-offset-bits", cl::Hidden, cl::init(19),
37 cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
39 static cl::opt<unsigned>
40 BCCDisplacementBits("arm64-bcc-offset-bits", cl::Hidden, cl::init(19),
41 cl::desc("Restrict range of Bcc instructions (DEBUG)"));
43 STATISTIC(NumSplit, "Number of basic blocks split");
44 STATISTIC(NumRelaxed, "Number of conditional branches relaxed");
47 class ARM64BranchRelaxation : public MachineFunctionPass {
48 /// BasicBlockInfo - Information about the offset and size of a single
50 struct BasicBlockInfo {
51 /// Offset - Distance from the beginning of the function to the beginning
52 /// of this basic block.
54 /// The offset is always aligned as required by the basic block.
57 /// Size - Size of the basic block in bytes. If the block contains
58 /// inline assembly, this is a worst case estimate.
60 /// The size does not include any alignment padding whether from the
61 /// beginning of the block, or from an aligned jump table at the end.
64 BasicBlockInfo() : Offset(0), Size(0) {}
66 /// Compute the offset immediately following this block. If LogAlign is
67 /// specified, return the offset the successor block will get if it has
69 unsigned postOffset(unsigned LogAlign = 0) const {
70 unsigned PO = Offset + Size;
71 unsigned Align = 1 << LogAlign;
72 return (PO + Align - 1) / Align * Align;
76 SmallVector<BasicBlockInfo, 16> BlockInfo;
79 const ARM64InstrInfo *TII;
81 bool relaxBranchInstructions();
83 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
84 void adjustBlockOffsets(MachineBasicBlock *BB);
85 bool isBlockInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
86 bool fixupConditionalBranch(MachineInstr *MI);
87 void computeBlockSize(MachineBasicBlock *MBB);
88 unsigned getInstrOffset(MachineInstr *MI) const;
94 ARM64BranchRelaxation() : MachineFunctionPass(ID) {}
96 virtual bool runOnMachineFunction(MachineFunction &MF);
98 virtual const char *getPassName() const {
99 return "ARM64 branch relaxation pass";
102 char ARM64BranchRelaxation::ID = 0;
105 /// verify - check BBOffsets, BBSizes, alignment of islands
106 void ARM64BranchRelaxation::verify() {
108 unsigned PrevNum = MF->begin()->getNumber();
109 for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end(); MBBI != E;
111 MachineBasicBlock *MBB = MBBI;
112 unsigned Align = MBB->getAlignment();
113 unsigned Num = MBB->getNumber();
114 assert(BlockInfo[Num].Offset % (1u << Align) == 0);
115 assert(!Num || BlockInfo[PrevNum].postOffset() <= BlockInfo[Num].Offset);
121 /// print block size and offset information - debugging
122 void ARM64BranchRelaxation::dumpBBs() {
123 for (auto &MBB : *MF) {
124 const BasicBlockInfo &BBI = BlockInfo[MBB.getNumber()];
125 dbgs() << format("BB#%u\toffset=%08x\t", MBB.getNumber(), BBI.Offset)
126 << format("size=%#x\n", BBI.Size);
130 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
131 /// into the block immediately after it.
132 static bool BBHasFallthrough(MachineBasicBlock *MBB) {
133 // Get the next machine basic block in the function.
134 MachineFunction::iterator MBBI = MBB;
135 // Can't fall off end of function.
136 if (std::next(MBBI) == MBB->getParent()->end())
139 MachineBasicBlock *NextBB = std::next(MBBI);
140 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
149 /// scanFunction - Do the initial scan of the function, building up
150 /// information about each block.
151 void ARM64BranchRelaxation::scanFunction() {
153 BlockInfo.resize(MF->getNumBlockIDs());
155 // First thing, compute the size of all basic blocks, and see if the function
156 // has any inline assembly in it. If so, we have to be conservative about
157 // alignment assumptions, as we don't know for sure the size of any
158 // instructions in the inline assembly.
159 for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
162 // Compute block offsets and known bits.
163 adjustBlockOffsets(MF->begin());
166 /// computeBlockSize - Compute the size for MBB.
167 /// This function updates BlockInfo directly.
168 void ARM64BranchRelaxation::computeBlockSize(MachineBasicBlock *MBB) {
170 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
172 Size += TII->GetInstSizeInBytes(I);
173 BlockInfo[MBB->getNumber()].Size = Size;
176 /// getInstrOffset - Return the current offset of the specified machine
177 /// instruction from the start of the function. This offset changes as stuff is
178 /// moved around inside the function.
179 unsigned ARM64BranchRelaxation::getInstrOffset(MachineInstr *MI) const {
180 MachineBasicBlock *MBB = MI->getParent();
182 // The offset is composed of two things: the sum of the sizes of all MBB's
183 // before this instruction's block, and the offset from the start of the block
185 unsigned Offset = BlockInfo[MBB->getNumber()].Offset;
187 // Sum instructions before MI in MBB.
188 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
189 assert(I != MBB->end() && "Didn't find MI in its own basic block?");
190 Offset += TII->GetInstSizeInBytes(I);
195 void ARM64BranchRelaxation::adjustBlockOffsets(MachineBasicBlock *Start) {
196 unsigned PrevNum = Start->getNumber();
197 MachineFunction::iterator MBBI = Start, E = MF->end();
198 for (++MBBI; MBBI != E; ++MBBI) {
199 MachineBasicBlock *MBB = MBBI;
200 unsigned Num = MBB->getNumber();
201 if (!Num) // block zero is never changed from offset zero.
203 // Get the offset and known bits at the end of the layout predecessor.
204 // Include the alignment of the current block.
205 unsigned LogAlign = MBBI->getAlignment();
206 BlockInfo[Num].Offset = BlockInfo[PrevNum].postOffset(LogAlign);
211 /// Split the basic block containing MI into two blocks, which are joined by
212 /// an unconditional branch. Update data structures and renumber blocks to
213 /// account for this change and returns the newly created block.
214 /// NOTE: Successor list of the original BB is out of date after this function,
215 /// and must be updated by the caller! Other transforms follow using this
216 /// utility function, so no point updating now rather than waiting.
218 ARM64BranchRelaxation::splitBlockBeforeInstr(MachineInstr *MI) {
219 MachineBasicBlock *OrigBB = MI->getParent();
221 // Create a new MBB for the code after the OrigBB.
222 MachineBasicBlock *NewBB =
223 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
224 MachineFunction::iterator MBBI = OrigBB;
226 MF->insert(MBBI, NewBB);
228 // Splice the instructions starting with MI over to NewBB.
229 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
231 // Add an unconditional branch from OrigBB to NewBB.
232 // Note the new unconditional branch is not being recorded.
233 // There doesn't seem to be meaningful DebugInfo available; this doesn't
234 // correspond to anything in the source.
235 BuildMI(OrigBB, DebugLoc(), TII->get(ARM64::B)).addMBB(NewBB);
237 // Insert an entry into BlockInfo to align it properly with the block numbers.
238 BlockInfo.insert(BlockInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
240 // Figure out how large the OrigBB is. As the first half of the original
241 // block, it cannot contain a tablejump. The size includes
242 // the new jump we added. (It should be possible to do this without
243 // recounting everything, but it's very confusing, and this is rarely
245 computeBlockSize(OrigBB);
247 // Figure out how large the NewMBB is. As the second half of the original
248 // block, it may contain a tablejump.
249 computeBlockSize(NewBB);
251 // All BBOffsets following these blocks must be modified.
252 adjustBlockOffsets(OrigBB);
259 /// isBlockInRange - Returns true if the distance between specific MI and
260 /// specific BB can fit in MI's displacement field.
261 bool ARM64BranchRelaxation::isBlockInRange(MachineInstr *MI,
262 MachineBasicBlock *DestBB,
264 unsigned MaxOffs = ((1 << (Bits - 1)) - 1) << 2;
265 unsigned BrOffset = getInstrOffset(MI);
266 unsigned DestOffset = BlockInfo[DestBB->getNumber()].Offset;
268 DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
269 << " from BB#" << MI->getParent()->getNumber()
270 << " max delta=" << MaxOffs << " from " << getInstrOffset(MI)
271 << " to " << DestOffset << " offset "
272 << int(DestOffset - BrOffset) << "\t" << *MI);
274 // Branch before the Dest.
275 if (BrOffset <= DestOffset)
276 return (DestOffset - BrOffset <= MaxOffs);
277 return (BrOffset - DestOffset <= MaxOffs);
280 static bool isConditionalBranch(unsigned Opc) {
295 static MachineBasicBlock *getDestBlock(MachineInstr *MI) {
296 switch (MI->getOpcode()) {
298 assert(0 && "unexpected opcode!");
301 return MI->getOperand(2).getMBB();
307 return MI->getOperand(1).getMBB();
311 static unsigned getOppositeConditionOpcode(unsigned Opc) {
314 assert(0 && "unexpected opcode!");
315 case ARM64::TBNZ: return ARM64::TBZ;
316 case ARM64::TBZ: return ARM64::TBNZ;
317 case ARM64::CBNZW: return ARM64::CBZW;
318 case ARM64::CBNZX: return ARM64::CBZX;
319 case ARM64::CBZW: return ARM64::CBNZW;
320 case ARM64::CBZX: return ARM64::CBNZX;
321 case ARM64::Bcc: return ARM64::Bcc; // Condition is an operand for Bcc.
325 static unsigned getBranchDisplacementBits(unsigned Opc) {
328 assert(0 && "unexpected opcode!");
331 return TBZDisplacementBits;
336 return CBZDisplacementBits;
338 return BCCDisplacementBits;
342 static inline void invertBccCondition(MachineInstr *MI) {
343 assert(MI->getOpcode() == ARM64::Bcc && "Unexpected opcode!");
344 ARM64CC::CondCode CC = (ARM64CC::CondCode)MI->getOperand(0).getImm();
345 CC = ARM64CC::getInvertedCondCode(CC);
346 MI->getOperand(0).setImm((int64_t)CC);
349 /// fixupConditionalBranch - Fix up a conditional branch whose destination is
350 /// too far away to fit in its displacement field. It is converted to an inverse
351 /// conditional branch + an unconditional branch to the destination.
352 bool ARM64BranchRelaxation::fixupConditionalBranch(MachineInstr *MI) {
353 MachineBasicBlock *DestBB = getDestBlock(MI);
355 // Add an unconditional branch to the destination and invert the branch
356 // condition to jump over it:
363 // If the branch is at the end of its MBB and that has a fall-through block,
364 // direct the updated conditional branch to the fall-through block. Otherwise,
365 // split the MBB before the next instruction.
366 MachineBasicBlock *MBB = MI->getParent();
367 MachineInstr *BMI = &MBB->back();
368 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
371 if (std::next(MachineBasicBlock::iterator(MI)) ==
372 std::prev(MBB->getLastNonDebugInstr()) &&
373 BMI->getOpcode() == ARM64::B) {
374 // Last MI in the BB is an unconditional branch. Can we simply invert the
375 // condition and swap destinations:
381 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
382 if (isBlockInRange(MI, NewDest,
383 getBranchDisplacementBits(MI->getOpcode()))) {
384 DEBUG(dbgs() << " Invert condition and swap its destination with "
386 BMI->getOperand(0).setMBB(DestBB);
388 (MI->getOpcode() == ARM64::TBZ || MI->getOpcode() == ARM64::TBNZ)
391 MI->getOperand(OpNum).setMBB(NewDest);
392 MI->setDesc(TII->get(getOppositeConditionOpcode(MI->getOpcode())));
393 if (MI->getOpcode() == ARM64::Bcc)
394 invertBccCondition(MI);
401 // Analyze the branch so we know how to update the successor lists.
402 MachineBasicBlock *TBB, *FBB;
403 SmallVector<MachineOperand, 2> Cond;
404 TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, false);
406 MachineBasicBlock *NewBB = splitBlockBeforeInstr(MI);
407 // No need for the branch to the next block. We're adding an unconditional
408 // branch to the destination.
409 int delta = TII->GetInstSizeInBytes(&MBB->back());
410 BlockInfo[MBB->getNumber()].Size -= delta;
411 MBB->back().eraseFromParent();
412 // BlockInfo[SplitBB].Offset is wrong temporarily, fixed below
414 // Update the successor lists according to the transformation to follow.
415 // Do it here since if there's no split, no update is needed.
416 MBB->replaceSuccessor(FBB, NewBB);
417 NewBB->addSuccessor(FBB);
419 MachineBasicBlock *NextBB = std::next(MachineFunction::iterator(MBB));
421 DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
422 << ", invert condition and change dest. to BB#"
423 << NextBB->getNumber() << "\n");
425 // Insert a new conditional branch and a new unconditional branch.
426 MachineInstrBuilder MIB = BuildMI(
427 MBB, DebugLoc(), TII->get(getOppositeConditionOpcode(MI->getOpcode())))
428 .addOperand(MI->getOperand(0));
429 if (MI->getOpcode() == ARM64::TBZ || MI->getOpcode() == ARM64::TBNZ)
430 MIB.addOperand(MI->getOperand(1));
431 if (MI->getOpcode() == ARM64::Bcc)
432 invertBccCondition(MIB);
434 BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
435 BuildMI(MBB, DebugLoc(), TII->get(ARM64::B)).addMBB(DestBB);
436 BlockInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(&MBB->back());
438 // Remove the old conditional branch. It may or may not still be in MBB.
439 BlockInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(MI);
440 MI->eraseFromParent();
442 // Finally, keep the block offsets up to date.
443 adjustBlockOffsets(MBB);
447 bool ARM64BranchRelaxation::relaxBranchInstructions() {
448 bool Changed = false;
449 // Relaxing branches involves creating new basic blocks, so re-eval
450 // end() for termination.
451 for (auto &MBB : *MF) {
452 MachineInstr *MI = MBB.getFirstTerminator();
453 if (isConditionalBranch(MI->getOpcode()) &&
454 !isBlockInRange(MI, getDestBlock(MI),
455 getBranchDisplacementBits(MI->getOpcode()))) {
456 fixupConditionalBranch(MI);
464 bool ARM64BranchRelaxation::runOnMachineFunction(MachineFunction &mf) {
467 // If the pass is disabled, just bail early.
468 if (!BranchRelaxation)
471 DEBUG(dbgs() << "***** ARM64BranchRelaxation *****\n");
473 TII = (const ARM64InstrInfo *)MF->getTarget().getInstrInfo();
475 // Renumber all of the machine basic blocks in the function, guaranteeing that
476 // the numbers agree with the position of the block in the function.
477 MF->RenumberBlocks();
479 // Do the initial scan of the function, building up information about the
480 // sizes of each block.
483 DEBUG(dbgs() << " Basic blocks before relaxation\n");
486 bool MadeChange = false;
487 while (relaxBranchInstructions())
490 // After a while, this might be made debug-only, but it is not expensive.
493 DEBUG(dbgs() << " Basic blocks after relaxation\n");
494 DEBUG(dbgs() << '\n'; dumpBBs());
501 /// createARM64BranchRelaxation - returns an instance of the constpool
503 FunctionPass *llvm::createARM64BranchRelaxation() {
504 return new ARM64BranchRelaxation();