STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
+STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
static cl::opt<bool>
-AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(false),
+AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
cl::desc("Adjust basic block layout to better use TB[BH]"));
namespace {
/// the branch fix up pass.
bool HasFarJump;
+ /// HasInlineAsm - True if the function contains inline assembly.
+ bool HasInlineAsm;
+
const TargetInstrInfo *TII;
const ARMSubtarget *STI;
ARMFunctionInfo *AFI;
void DoInitialPlacement(MachineFunction &MF,
std::vector<MachineInstr*> &CPEMIs);
CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
+ void JumpTableFunctionScan(MachineFunction &MF);
void InitialFunctionScan(MachineFunction &MF,
const std::vector<MachineInstr*> &CPEMIs);
MachineBasicBlock *SplitBlockBeforeInstr(MachineInstr *MI);
bool UndoLRSpillRestore();
bool OptimizeThumb2Instructions(MachineFunction &MF);
bool OptimizeThumb2Branches(MachineFunction &MF);
+ bool ReorderThumb2JumpTables(MachineFunction &MF);
bool OptimizeThumb2JumpTables(MachineFunction &MF);
MachineBasicBlock *AdjustJTTargetBlockForward(MachineBasicBlock *BB,
MachineBasicBlock *JTBB);
if (!MBB->empty() &&
MBB->begin()->getOpcode() == ARM::CONSTPOOL_ENTRY) {
unsigned MBBId = MBB->getNumber();
- assert((BBOffsets[MBBId]%4 == 0 && BBSizes[MBBId]%4 == 0) ||
+ assert(HasInlineAsm ||
+ (BBOffsets[MBBId]%4 == 0 && BBSizes[MBBId]%4 == 0) ||
(BBOffsets[MBBId]%4 != 0 && BBSizes[MBBId]%4 != 0));
}
}
+ for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
+ CPUser &U = CPUsers[i];
+ unsigned UserOffset = GetOffsetOf(U.MI) + (isThumb ? 4 : 8);
+ unsigned CPEOffset = GetOffsetOf(U.CPEMI);
+ unsigned Disp = UserOffset < CPEOffset ? CPEOffset - UserOffset :
+ UserOffset - CPEOffset;
+ assert(Disp <= U.MaxDisp || "Constant pool entry out of range!");
+ }
#endif
}
isThumb2 = AFI->isThumb2Function();
HasFarJump = false;
+ HasInlineAsm = false;
// Renumber all of the machine basic blocks in the function, guaranteeing that
// the numbers agree with the position of the block in the function.
MF.RenumberBlocks();
+ // Try to reorder and otherwise adjust the block layout to make good use
+ // of the TB[BH] instructions.
+ bool MadeChange = false;
+ if (isThumb2 && AdjustJumpTableBlocks) {
+ JumpTableFunctionScan(MF);
+ MadeChange |= ReorderThumb2JumpTables(MF);
+ // Data is out of date, so clear it. It'll be re-computed later.
+ T2JumpTables.clear();
+ // Blocks may have shifted around. Keep the numbering up to date.
+ MF.RenumberBlocks();
+ }
+
// Thumb1 functions containing constant pools get 4-byte alignment.
// This is so we can keep exact track of where the alignment padding goes.
- // Set default. Thumb1 function is 2-byte aligned, ARM and Thumb2 are 4-byte
- // aligned.
- AFI->setAlign(isThumb1 ? 1U : 2U);
+ // ARM and Thumb2 functions need to be 4-byte aligned.
+ if (!isThumb1)
+ MF.EnsureAlignment(2); // 2 = log2(4)
// Perform the initial placement of the constant pool entries. To start with,
// we put them all at the end of the function.
if (!MCP.isEmpty()) {
DoInitialPlacement(MF, CPEMIs);
if (isThumb1)
- AFI->setAlign(2U);
+ MF.EnsureAlignment(2); // 2 = log2(4)
}
/// The next UID to take is the first unused one.
// sizes of each block, the location of all the water, and finding all of the
// constant pool users.
InitialFunctionScan(MF, CPEMIs);
-
- bool MadeChange = false;
- if (isThumb2)
- MadeChange |= OptimizeThumb2JumpTables(MF);
-
CPEMIs.clear();
/// Remove dead constant pool entries.
// aligned.
assert((Size & 3) == 0 && "CP Entry not multiple of 4 bytes!");
MachineInstr *CPEMI =
- BuildMI(BB, DebugLoc::getUnknownLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
- .addImm(i).addConstantPoolIndex(i).addImm(Size);
+ BuildMI(BB, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
+ .addImm(i).addConstantPoolIndex(i).addImm(Size);
CPEMIs.push_back(CPEMI);
// Add a new CPEntry, but no corresponding CPUser yet.
static bool BBHasFallthrough(MachineBasicBlock *MBB) {
// Get the next machine basic block in the function.
MachineFunction::iterator MBBI = MBB;
- if (next(MBBI) == MBB->getParent()->end()) // Can't fall off end of function.
+ if (llvm::next(MBBI) == MBB->getParent()->end()) // Can't fall off end of function.
return false;
- MachineBasicBlock *NextBB = next(MBBI);
+ MachineBasicBlock *NextBB = llvm::next(MBBI);
for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
E = MBB->succ_end(); I != E; ++I)
if (*I == NextBB)
return NULL;
}
+/// JumpTableFunctionScan - Do a scan of the function, building up
+/// information about the sizes of each block and the locations of all
+/// the jump tables.
+void ARMConstantIslands::JumpTableFunctionScan(MachineFunction &MF) {
+ for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
+ MBBI != E; ++MBBI) {
+ MachineBasicBlock &MBB = *MBBI;
+
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I)
+ if (I->getDesc().isBranch() && I->getOpcode() == ARM::t2BR_JT)
+ T2JumpTables.push_back(I);
+ }
+}
+
/// InitialFunctionScan - Do the initial scan of the function, building up
/// information about the sizes of each block, the location of all the water,
/// and finding all of the constant pool users.
void ARMConstantIslands::InitialFunctionScan(MachineFunction &MF,
const std::vector<MachineInstr*> &CPEMIs) {
+ // First thing, see if the function has any inline assembly in it. If so,
+ // we have to be conservative about alignment assumptions, as we don't
+ // know for sure the size of any instructions in the inline assembly.
+ for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
+ MBBI != E; ++MBBI) {
+ MachineBasicBlock &MBB = *MBBI;
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I)
+ if (I->getOpcode() == ARM::INLINEASM)
+ HasInlineAsm = true;
+ }
+
+ // Now go back through the instructions and build up our data structures
unsigned Offset = 0;
for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
MBBI != E; ++MBBI) {
case ARM::tBR_JTr:
// A Thumb1 table jump may involve padding; for the offsets to
// be right, functions containing these must be 4-byte aligned.
- AFI->setAlign(2U);
- if ((Offset+MBBSize)%4 != 0)
+ MF.EnsureAlignment(2U);
+ if ((Offset+MBBSize)%4 != 0 || HasInlineAsm)
// FIXME: Add a pseudo ALIGN instruction instead.
MBBSize += 2; // padding
continue; // Does not get an entry in ImmBranches
case ARM::LEApcrel:
// This takes a SoImm, which is 8 bit immediate rotated. We'll
// pretend the maximum offset is 255 * 4. Since each instruction
- // 4 byte wide, this is always correct. We'llc heck for other
+ // 4 byte wide, this is always correct. We'll check for other
// displacements that fits in a SoImm as well.
Bits = 8;
Scale = 4;
if (isThumb &&
!MBB.empty() &&
MBB.begin()->getOpcode() == ARM::CONSTPOOL_ENTRY &&
- (Offset%4) != 0)
+ ((Offset%4) != 0 || HasInlineAsm))
MBBSize += 2;
BBSizes.push_back(MBBSize);
// alignment padding, and compensate if so.
if (isThumb &&
MI->getOpcode() == ARM::CONSTPOOL_ENTRY &&
- Offset%4 != 0)
+ (Offset%4 != 0 || HasInlineAsm))
Offset += 2;
// Sum instructions before MI in MBB.
// There doesn't seem to be meaningful DebugInfo available; this doesn't
// correspond to anything in the source.
unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
- BuildMI(OrigBB, DebugLoc::getUnknownLoc(), TII->get(Opc)).addMBB(NewBB);
+ BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
NumSplit++;
// Update the CFG. All succs of OrigBB are now succs of NewBB.
// This pass should be run after register allocation, so there should be no
// PHI nodes to update.
- assert((Succ->empty() || Succ->begin()->getOpcode() != TargetInstrInfo::PHI)
+ assert((Succ->empty() || !Succ->begin()->isPHI())
&& "PHI nodes should be eliminated by now!");
}
CompareMBBNumbers);
MachineBasicBlock* WaterBB = *IP;
if (WaterBB == OrigBB)
- WaterList.insert(next(IP), NewBB);
+ WaterList.insert(llvm::next(IP), NewBB);
else
WaterList.insert(IP, OrigBB);
NewWaterList.insert(OrigBB);
MachineInstr *CPEMI, unsigned MaxDisp,
bool NegOk, bool DoDump) {
unsigned CPEOffset = GetOffsetOf(CPEMI);
- assert(CPEOffset%4 == 0 && "Misaligned CPE");
+ assert((CPEOffset%4 == 0 || HasInlineAsm) && "Misaligned CPE");
if (DoDump) {
DEBUG(errs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
void ARMConstantIslands::AdjustBBOffsetsAfter(MachineBasicBlock *BB,
int delta) {
- MachineFunction::iterator MBBI = BB; MBBI = next(MBBI);
+ MachineFunction::iterator MBBI = BB; MBBI = llvm::next(MBBI);
for(unsigned i = BB->getNumber()+1, e = BB->getParent()->getNumBlockIDs();
i < e; ++i) {
BBOffsets[i] += delta;
if (!isThumb)
continue;
MachineBasicBlock *MBB = MBBI;
- if (!MBB->empty()) {
+ if (!MBB->empty() && !HasInlineAsm) {
// Constant pool entries require padding.
if (MBB->begin()->getOpcode() == ARM::CONSTPOOL_ENTRY) {
unsigned OldOffset = BBOffsets[i] - delta;
if (delta==0)
return;
}
- MBBI = next(MBBI);
+ MBBI = llvm::next(MBBI);
}
}
DEBUG(errs() << "Split at end of block\n");
if (&UserMBB->back() == UserMI)
assert(BBHasFallthrough(UserMBB) && "Expected a fallthrough BB!");
- NewMBB = next(MachineFunction::iterator(UserMBB));
+ NewMBB = llvm::next(MachineFunction::iterator(UserMBB));
// Add an unconditional branch from UserMBB to fallthrough block.
// Record it for branch lengthening; this new branch will not get out of
// range, but if the preceding conditional branch is out of range, the
// targets will be exchanged, and the altered branch may be out of
// range, so the machinery has to know about it.
int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
- BuildMI(UserMBB, DebugLoc::getUnknownLoc(),
- TII->get(UncondBr)).addMBB(NewMBB);
+ BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
ImmBranches.push_back(ImmBranch(&UserMBB->back(),
MaxDisp, false, UncondBr));
for (unsigned Offset = UserOffset+TII->GetInstSizeInBytes(UserMI);
Offset < BaseInsertOffset;
Offset += TII->GetInstSizeInBytes(MI),
- MI = next(MI)) {
+ MI = llvm::next(MI)) {
if (CPUIndex < CPUsers.size() && CPUsers[CPUIndex].MI == MI) {
CPUser &U = CPUsers[CPUIndex];
if (!OffsetIsInRange(Offset, EndInsertOffset,
NewWaterList.insert(NewIsland);
}
// The new CPE goes before the following block (NewMBB).
- NewMBB = next(MachineFunction::iterator(WaterBB));
+ NewMBB = llvm::next(MachineFunction::iterator(WaterBB));
} else {
// No water found.
// Now that we have an island to add the CPE to, clone the original CPE and
// add it to the island.
U.HighWaterMark = NewIsland;
- U.CPEMI = BuildMI(NewIsland, DebugLoc::getUnknownLoc(),
- TII->get(ARM::CONSTPOOL_ENTRY))
+ U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
.addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
NumCPEs++;
BBOffsets[NewIsland->getNumber()] = BBOffsets[NewMBB->getNumber()];
// Compensate for .align 2 in thumb mode.
- if (isThumb && BBOffsets[NewIsland->getNumber()]%4 != 0)
+ if (isThumb && (BBOffsets[NewIsland->getNumber()]%4 != 0 || HasInlineAsm))
Size += 2;
// Increase the size of the island block to account for the new entry.
BBSizes[NewIsland->getNumber()] += Size;
NumCBrFixed++;
if (BMI != MI) {
- if (next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
+ if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
BMI->getOpcode() == Br.UncondBr) {
// Last MI in the BB is an unconditional branch. Can we simply invert the
// condition and swap destinations:
// branch to the destination.
int delta = TII->GetInstSizeInBytes(&MBB->back());
BBSizes[MBB->getNumber()] -= delta;
- MachineBasicBlock* SplitBB = next(MachineFunction::iterator(MBB));
+ MachineBasicBlock* SplitBB = llvm::next(MachineFunction::iterator(MBB));
AdjustBBOffsetsAfter(SplitBB, -delta);
MBB->back().eraseFromParent();
// BBOffsets[SplitBB] is wrong temporarily, fixed below
}
- MachineBasicBlock *NextBB = next(MachineFunction::iterator(MBB));
+ MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));
DEBUG(errs() << " Insert B to BB#" << DestBB->getNumber()
<< " also invert condition and change dest. to BB#"
// Insert a new conditional branch and a new unconditional branch.
// Also update the ImmBranch as well as adding a new entry for the new branch.
- BuildMI(MBB, DebugLoc::getUnknownLoc(),
- TII->get(MI->getOpcode()))
+ BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
.addMBB(NextBB).addImm(CC).addReg(CCReg);
Br.MI = &MBB->back();
BBSizes[MBB->getNumber()] += TII->GetInstSizeInBytes(&MBB->back());
- BuildMI(MBB, DebugLoc::getUnknownLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
+ BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
BBSizes[MBB->getNumber()] += TII->GetInstSizeInBytes(&MBB->back());
unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
bool MadeChange = false;
for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
MachineInstr *MI = PushPopMIs[i];
- // First two operands are predicates, the third is a zero since there
- // is no writeback.
+ // First two operands are predicates.
if (MI->getOpcode() == ARM::tPOP_RET &&
- MI->getOperand(3).getReg() == ARM::PC &&
- MI->getNumExplicitOperands() == 4) {
+ MI->getOperand(2).getReg() == ARM::PC &&
+ MI->getNumExplicitOperands() == 3) {
BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET));
MI->eraseFromParent();
MadeChange = true;
}
MadeChange |= OptimizeThumb2Branches(MF);
+ MadeChange |= OptimizeThumb2JumpTables(MF);
return MadeChange;
}
return MadeChange;
}
-
/// OptimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
/// jumptables when it's possible.
bool ARMConstantIslands::OptimizeThumb2JumpTables(MachineFunction &MF) {
// FIXME: After the tables are shrunk, can we get rid some of the
// constantpool tables?
MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
+ if (MJTI == 0) return false;
+
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
MachineInstr *MI = T2JumpTables[i];
unsigned JTI = JTOP.getIndex();
assert(JTI < JT.size());
- // We prefer if target blocks for the jump table come after the jump
- // instruction so we can use TB[BH]. Loop through the target blocks
- // and try to adjust them such that that's true.
- unsigned JTOffset = GetOffsetOf(MI) + 4;
- const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
- if (AdjustJumpTableBlocks) {
- for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
- MachineBasicBlock *MBB = JTBBs[j];
- unsigned DstOffset = BBOffsets[MBB->getNumber()];
-
- if (DstOffset < JTOffset) {
- // The destination precedes the switch. Try to move the block forward
- // so we have a positive offset.
- MachineBasicBlock *NewBB =
- AdjustJTTargetBlockForward(MBB, MI->getParent());
- if (NewBB) {
- MJTI->ReplaceMBBInJumpTables(JTBBs[j], NewBB);
- JTOffset = GetOffsetOf(MI) + 4;
- DstOffset = BBOffsets[MBB->getNumber()];
- }
- }
- }
- }
-
bool ByteOk = true;
bool HalfWordOk = true;
- JTOffset = GetOffsetOf(MI) + 4;
+ unsigned JTOffset = GetOffsetOf(MI) + 4;
+ const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
MachineBasicBlock *MBB = JTBBs[j];
unsigned DstOffset = BBOffsets[MBB->getNumber()];
return MadeChange;
}
+/// ReorderThumb2JumpTables - Adjust the function's block layout to ensure that
+/// jump tables always branch forwards, since that's what tbb and tbh need.
+bool ARMConstantIslands::ReorderThumb2JumpTables(MachineFunction &MF) {
+ bool MadeChange = false;
+
+ MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
+ if (MJTI == 0) return false;
+
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
+ MachineInstr *MI = T2JumpTables[i];
+ const TargetInstrDesc &TID = MI->getDesc();
+ unsigned NumOps = TID.getNumOperands();
+ unsigned JTOpIdx = NumOps - (TID.isPredicable() ? 3 : 2);
+ MachineOperand JTOP = MI->getOperand(JTOpIdx);
+ unsigned JTI = JTOP.getIndex();
+ assert(JTI < JT.size());
+
+ // We prefer if target blocks for the jump table come after the jump
+ // instruction so we can use TB[BH]. Loop through the target blocks
+ // and try to adjust them such that that's true.
+ int JTNumber = MI->getParent()->getNumber();
+ const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
+ for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
+ MachineBasicBlock *MBB = JTBBs[j];
+ int DTNumber = MBB->getNumber();
+
+ if (DTNumber < JTNumber) {
+ // The destination precedes the switch. Try to move the block forward
+ // so we have a positive offset.
+ MachineBasicBlock *NewBB =
+ AdjustJTTargetBlockForward(MBB, MI->getParent());
+ if (NewBB)
+ MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
+ MadeChange = true;
+ }
+ }
+ }
+
+ return MadeChange;
+}
+
MachineBasicBlock *ARMConstantIslands::
AdjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB)
{
MachineFunction &MF = *BB->getParent();
- // FIXME: For now, instead of moving the block, we'll create a new block
- // immediate following the jump that's an unconditional branch to the
- // actual target. This is obviously not what we want for a real solution,
- // but it's useful for proof of concept, and it may be a useful fallback
- // later for cases where we otherwise can't move a block.
+ // If it's the destination block is terminated by an unconditional branch,
+ // try to move it; otherwise, create a new block following the jump
+ // table that branches back to the actual target. This is a very simple
+ // heuristic. FIXME: We can definitely improve it.
+ MachineBasicBlock *TBB = 0, *FBB = 0;
+ SmallVector<MachineOperand, 4> Cond;
+ SmallVector<MachineOperand, 4> CondPrior;
+ MachineFunction::iterator BBi = BB;
+ MachineFunction::iterator OldPrior = prior(BBi);
+
+ // If the block terminator isn't analyzable, don't try to move the block
+ bool B = TII->AnalyzeBranch(*BB, TBB, FBB, Cond);
+
+ // If the block ends in an unconditional branch, move it. The prior block
+ // has to have an analyzable terminator for us to move this one. Be paranoid
+ // and make sure we're not trying to move the entry block of the function.
+ if (!B && Cond.empty() && BB != MF.begin() &&
+ !TII->AnalyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
+ BB->moveAfter(JTBB);
+ OldPrior->updateTerminator();
+ BB->updateTerminator();
+ // Update numbering to account for the block being moved.
+ MF.RenumberBlocks();
+ ++NumJTMoved;
+ return NULL;
+ }
// Create a new MBB for the code after the jump BB.
MachineBasicBlock *NewBB =
// There doesn't seem to be meaningful DebugInfo available; this doesn't
// correspond directly to anything in the source.
assert (isThumb2 && "Adjusting for TB[BH] but not in Thumb2?");
- BuildMI(NewBB, DebugLoc::getUnknownLoc(), TII->get(ARM::t2B)).addMBB(BB);
+ BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B)).addMBB(BB);
+
+ // Update internal data structures to account for the newly inserted MBB.
+ MF.RenumberBlocks(NewBB);
// Update the CFG.
NewBB->addSuccessor(BB);
JTBB->removeSuccessor(BB);
JTBB->addSuccessor(NewBB);
- // Update internal data structures to account for the newly inserted MBB.
- // Don't mark the new block as having water following it, as we want the
- // blocks following the jump table to be as close together as possible.
- MF.RenumberBlocks(NewBB);
-
- // Insert a size into BBSizes to align it properly with the (newly
- // renumbered) block numbers.
- BBSizes.insert(BBSizes.begin()+NewBB->getNumber(), 0);
-
- // Likewise for BBOffsets.
- BBOffsets.insert(BBOffsets.begin()+NewBB->getNumber(), 0);
-
- // Figure out how large the first NewMBB is.
- unsigned NewBBSize = 0;
- for (MachineBasicBlock::iterator I = NewBB->begin(), E = NewBB->end();
- I != E; ++I)
- NewBBSize += TII->GetInstSizeInBytes(I);
-
- unsigned NewBBI = NewBB->getNumber();
- unsigned JTBBI = JTBB->getNumber();
- // Set the size of NewBB in BBSizes.
- BBSizes[NewBBI] = NewBBSize;
-
- // ...and adjust BBOffsets for NewBB accordingly.
- BBOffsets[NewBBI] = BBOffsets[JTBBI] + BBSizes[JTBBI];
-
- // All BBOffsets following these blocks must be modified.
- AdjustBBOffsetsAfter(NewBB, 4);
-
- ++NumJTMoved;
+ ++NumJTInserted;
return NewBB;
}