1 //===-- MachineSink.cpp - Sinking for machine instructions ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass moves instructions into successor blocks when possible, so that
11 // they aren't executed on paths where their results aren't needed.
13 // This pass is not intended to be a replacement or a complete alternative
14 // for an LLVM-IR-level sinking pass. It is only designed to sink simple
15 // constructs that are not exposed before lowering and instruction selection.
17 //===----------------------------------------------------------------------===//
19 #define DEBUG_TYPE "machine-sink"
20 #include "llvm/CodeGen/Passes.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineLoopInfo.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Target/TargetRegisterInfo.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetMachine.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
36 SplitEdges("machine-sink-split",
37 cl::desc("Split critical edges during machine sinking"),
38 cl::init(true), cl::Hidden);
40 STATISTIC(NumSunk, "Number of machine instructions sunk");
41 STATISTIC(NumSplit, "Number of critical edges split");
42 STATISTIC(NumCoalesces, "Number of copies coalesced");
45 class MachineSinking : public MachineFunctionPass {
46 const TargetInstrInfo *TII;
47 const TargetRegisterInfo *TRI;
48 MachineRegisterInfo *MRI; // Machine register information
49 MachineDominatorTree *DT; // Machine dominator tree
52 BitVector AllocatableSet; // Which physregs are allocatable?
54 // Remember which edges have been considered for breaking.
55 SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8>
59 static char ID; // Pass identification
60 MachineSinking() : MachineFunctionPass(ID) {}
62 virtual bool runOnMachineFunction(MachineFunction &MF);
64 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
66 MachineFunctionPass::getAnalysisUsage(AU);
67 AU.addRequired<AliasAnalysis>();
68 AU.addRequired<MachineDominatorTree>();
69 AU.addRequired<MachineLoopInfo>();
70 AU.addPreserved<MachineDominatorTree>();
71 AU.addPreserved<MachineLoopInfo>();
74 virtual void releaseMemory() {
75 CEBCandidates.clear();
79 bool ProcessBlock(MachineBasicBlock &MBB);
80 bool isWorthBreakingCriticalEdge(MachineInstr *MI,
81 MachineBasicBlock *From,
82 MachineBasicBlock *To);
83 MachineBasicBlock *SplitCriticalEdge(MachineInstr *MI,
84 MachineBasicBlock *From,
85 MachineBasicBlock *To,
87 bool SinkInstruction(MachineInstr *MI, bool &SawStore);
88 bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB,
89 MachineBasicBlock *DefMBB,
90 bool &BreakPHIEdge, bool &LocalUse) const;
91 bool PerformTrivialForwardCoalescing(MachineInstr *MI,
92 MachineBasicBlock *MBB);
94 } // end anonymous namespace
96 char MachineSinking::ID = 0;
97 INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink",
98 "Machine code sinking", false, false)
99 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
100 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
101 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
102 INITIALIZE_PASS_END(MachineSinking, "machine-sink",
103 "Machine code sinking", false, false)
105 FunctionPass *llvm::createMachineSinkingPass() { return new MachineSinking(); }
107 bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI,
108 MachineBasicBlock *MBB) {
112 unsigned SrcReg = MI->getOperand(1).getReg();
113 unsigned DstReg = MI->getOperand(0).getReg();
114 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
115 !TargetRegisterInfo::isVirtualRegister(DstReg) ||
116 !MRI->hasOneNonDBGUse(SrcReg))
119 const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg);
120 const TargetRegisterClass *DRC = MRI->getRegClass(DstReg);
124 MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
125 if (DefMI->isCopyLike())
127 DEBUG(dbgs() << "Coalescing: " << *DefMI);
128 DEBUG(dbgs() << "*** to: " << *MI);
129 MRI->replaceRegWith(DstReg, SrcReg);
130 MI->eraseFromParent();
135 /// AllUsesDominatedByBlock - Return true if all uses of the specified register
136 /// occur in blocks dominated by the specified block. If any use is in the
137 /// definition block, then return false since it is never legal to move def
140 MachineSinking::AllUsesDominatedByBlock(unsigned Reg,
141 MachineBasicBlock *MBB,
142 MachineBasicBlock *DefMBB,
144 bool &LocalUse) const {
145 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
146 "Only makes sense for vregs");
148 if (MRI->use_nodbg_empty(Reg))
151 // Ignoring debug uses is necessary so debug info doesn't affect the code.
152 // This may leave a referencing dbg_value in the original block, before
153 // the definition of the vreg. Dwarf generator handles this although the
154 // user might not get the right info at runtime.
156 // BreakPHIEdge is true if all the uses are in the successor MBB being sunken
157 // into and they are all PHI nodes. In this case, machine-sink must break
158 // the critical edge first. e.g.
160 // BB#1: derived from LLVM BB %bb4.preheader
161 // Predecessors according to CFG: BB#0
163 // %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead>
165 // JE_4 <BB#37>, %EFLAGS<imp-use>
166 // Successors according to CFG: BB#37 BB#2
168 // BB#2: derived from LLVM BB %bb.nph
169 // Predecessors according to CFG: BB#0 BB#1
170 // %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1>
172 for (MachineRegisterInfo::use_nodbg_iterator
173 I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
175 MachineInstr *UseInst = &*I;
176 MachineBasicBlock *UseBlock = UseInst->getParent();
177 if (!(UseBlock == MBB && UseInst->isPHI() &&
178 UseInst->getOperand(I.getOperandNo()+1).getMBB() == DefMBB)) {
179 BreakPHIEdge = false;
186 for (MachineRegisterInfo::use_nodbg_iterator
187 I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end();
189 // Determine the block of the use.
190 MachineInstr *UseInst = &*I;
191 MachineBasicBlock *UseBlock = UseInst->getParent();
192 if (UseInst->isPHI()) {
193 // PHI nodes use the operand in the predecessor block, not the block with
195 UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB();
196 } else if (UseBlock == DefMBB) {
201 // Check that it dominates.
202 if (!DT->dominates(MBB, UseBlock))
209 bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
210 DEBUG(dbgs() << "******** Machine Sinking ********\n");
212 const TargetMachine &TM = MF.getTarget();
213 TII = TM.getInstrInfo();
214 TRI = TM.getRegisterInfo();
215 MRI = &MF.getRegInfo();
216 DT = &getAnalysis<MachineDominatorTree>();
217 LI = &getAnalysis<MachineLoopInfo>();
218 AA = &getAnalysis<AliasAnalysis>();
219 AllocatableSet = TRI->getAllocatableSet(MF);
221 bool EverMadeChange = false;
224 bool MadeChange = false;
226 // Process all basic blocks.
227 CEBCandidates.clear();
228 for (MachineFunction::iterator I = MF.begin(), E = MF.end();
230 MadeChange |= ProcessBlock(*I);
232 // If this iteration over the code changed anything, keep iterating.
233 if (!MadeChange) break;
234 EverMadeChange = true;
236 return EverMadeChange;
239 bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) {
240 // Can't sink anything out of a block that has less than two successors.
241 if (MBB.succ_size() <= 1 || MBB.empty()) return false;
243 // Don't bother sinking code out of unreachable blocks. In addition to being
244 // unprofitable, it can also lead to infinite looping, because in an
245 // unreachable loop there may be nowhere to stop.
246 if (!DT->isReachableFromEntry(&MBB)) return false;
248 bool MadeChange = false;
250 // Walk the basic block bottom-up. Remember if we saw a store.
251 MachineBasicBlock::iterator I = MBB.end();
253 bool ProcessedBegin, SawStore = false;
255 MachineInstr *MI = I; // The instruction to sink.
257 // Predecrement I (if it's not begin) so that it isn't invalidated by
259 ProcessedBegin = I == MBB.begin();
263 if (MI->isDebugValue())
266 if (PerformTrivialForwardCoalescing(MI, &MBB))
269 if (SinkInstruction(MI, SawStore))
270 ++NumSunk, MadeChange = true;
272 // If we just processed the first instruction in the block, we're done.
273 } while (!ProcessedBegin);
278 bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI,
279 MachineBasicBlock *From,
280 MachineBasicBlock *To) {
281 // FIXME: Need much better heuristics.
283 // If the pass has already considered breaking this edge (during this pass
284 // through the function), then let's go ahead and break it. This means
285 // sinking multiple "cheap" instructions into the same block.
286 if (!CEBCandidates.insert(std::make_pair(From, To)))
289 if (!MI->isCopy() && !MI->getDesc().isAsCheapAsAMove())
292 // MI is cheap, we probably don't want to break the critical edge for it.
293 // However, if this would allow some definitions of its source operands
294 // to be sunk then it's probably worth it.
295 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
296 const MachineOperand &MO = MI->getOperand(i);
297 if (!MO.isReg()) continue;
298 unsigned Reg = MO.getReg();
299 if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg))
301 if (MRI->hasOneNonDBGUse(Reg))
308 MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineInstr *MI,
309 MachineBasicBlock *FromBB,
310 MachineBasicBlock *ToBB,
312 if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB))
315 // Avoid breaking back edge. From == To means backedge for single BB loop.
316 if (!SplitEdges || FromBB == ToBB)
319 // Check for backedges of more "complex" loops.
320 if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) &&
321 LI->isLoopHeader(ToBB))
324 // It's not always legal to break critical edges and sink the computation
332 // ... no uses of v1024
338 // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted:
347 // ... no uses of v1024
353 // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3
354 // flow. We need to ensure the new basic block where the computation is
355 // sunk to dominates all the uses.
356 // It's only legal to break critical edge and sink the computation to the
357 // new block if all the predecessors of "To", except for "From", are
358 // not dominated by "From". Given SSA property, this means these
359 // predecessors are dominated by "To".
361 // There is no need to do this check if all the uses are PHI nodes. PHI
362 // sources are only defined on the specific predecessor edges.
364 for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(),
365 E = ToBB->pred_end(); PI != E; ++PI) {
368 if (!DT->dominates(ToBB, *PI))
373 return FromBB->SplitCriticalEdge(ToBB, this);
376 static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
377 return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence();
380 /// SinkInstruction - Determine whether it is safe to sink the specified machine
381 /// instruction out of its current block into a successor.
382 bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) {
383 // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to
384 // be close to the source to make it easier to coalesce.
385 if (AvoidsSinking(MI, MRI))
388 // Check if it's safe to move the instruction.
389 if (!MI->isSafeToMove(TII, AA, SawStore))
392 // FIXME: This should include support for sinking instructions within the
393 // block they are currently in to shorten the live ranges. We often get
394 // instructions sunk into the top of a large block, but it would be better to
395 // also sink them down before their first use in the block. This xform has to
396 // be careful not to *increase* register pressure though, e.g. sinking
397 // "x = y + z" down if it kills y and z would increase the live ranges of y
398 // and z and only shrink the live range of x.
400 // Loop over all the operands of the specified instruction. If there is
401 // anything we can't handle, bail out.
402 MachineBasicBlock *ParentBlock = MI->getParent();
404 // SuccToSinkTo - This is the successor to sink this instruction to, once we
406 MachineBasicBlock *SuccToSinkTo = 0;
408 bool BreakPHIEdge = false;
409 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
410 const MachineOperand &MO = MI->getOperand(i);
411 if (!MO.isReg()) continue; // Ignore non-register operands.
413 unsigned Reg = MO.getReg();
414 if (Reg == 0) continue;
416 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
418 // If the physreg has no defs anywhere, it's just an ambient register
419 // and we can freely move its uses. Alternatively, if it's allocatable,
420 // it could get allocated to something with a def during allocation.
421 if (!MRI->def_empty(Reg))
424 if (AllocatableSet.test(Reg))
427 // Check for a def among the register's aliases too.
428 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
429 unsigned AliasReg = *Alias;
430 if (!MRI->def_empty(AliasReg))
433 if (AllocatableSet.test(AliasReg))
436 } else if (!MO.isDead()) {
437 // A def that isn't dead. We can't move it.
441 // Virtual register uses are always safe to sink.
442 if (MO.isUse()) continue;
444 // If it's not safe to move defs of the register class, then abort.
445 if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg)))
448 // FIXME: This picks a successor to sink into based on having one
449 // successor that dominates all the uses. However, there are cases where
450 // sinking can happen but where the sink point isn't a successor. For
457 // the instruction could be sunk over the whole diamond for the
458 // if/then/else (or loop, etc), allowing it to be sunk into other blocks
461 // Virtual register defs can only be sunk if all their uses are in blocks
462 // dominated by one of the successors.
464 // If a previous operand picked a block to sink to, then this operand
465 // must be sinkable to the same block.
466 bool LocalUse = false;
467 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, ParentBlock,
468 BreakPHIEdge, LocalUse))
474 // Otherwise, we should look at all the successors and decide which one
475 // we should sink to.
476 for (MachineBasicBlock::succ_iterator SI = ParentBlock->succ_begin(),
477 E = ParentBlock->succ_end(); SI != E; ++SI) {
478 bool LocalUse = false;
479 if (AllUsesDominatedByBlock(Reg, *SI, ParentBlock,
480 BreakPHIEdge, LocalUse)) {
485 // Def is used locally, it's never safe to move this def.
489 // If we couldn't find a block to sink to, ignore this instruction.
490 if (SuccToSinkTo == 0)
495 // If there are no outputs, it must have side-effects.
496 if (SuccToSinkTo == 0)
499 // It's not safe to sink instructions to EH landing pad. Control flow into
500 // landing pad is implicitly defined.
501 if (SuccToSinkTo->isLandingPad())
504 // It is not possible to sink an instruction into its own block. This can
505 // happen with loops.
506 if (MI->getParent() == SuccToSinkTo)
509 // If the instruction to move defines a dead physical register which is live
510 // when leaving the basic block, don't move it because it could turn into a
511 // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>)
512 for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) {
513 const MachineOperand &MO = MI->getOperand(I);
514 if (!MO.isReg()) continue;
515 unsigned Reg = MO.getReg();
516 if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue;
517 if (SuccToSinkTo->isLiveIn(Reg))
521 DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo);
523 // If the block has multiple predecessors, this would introduce computation on
524 // a path that it doesn't already exist. We could split the critical edge,
525 // but for now we just punt.
526 if (SuccToSinkTo->pred_size() > 1) {
527 // We cannot sink a load across a critical edge - there may be stores in
529 bool TryBreak = false;
531 if (!MI->isSafeToMove(TII, AA, store)) {
532 DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n");
536 // We don't want to sink across a critical edge if we don't dominate the
537 // successor. We could be introducing calculations to new code paths.
538 if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) {
539 DEBUG(dbgs() << " *** NOTE: Critical edge found\n");
543 // Don't sink instructions into a loop.
544 if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) {
545 DEBUG(dbgs() << " *** NOTE: Loop header found\n");
549 // Otherwise we are OK with sinking along a critical edge.
551 DEBUG(dbgs() << "Sinking along critical edge.\n");
553 MachineBasicBlock *NewSucc =
554 SplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge);
556 DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
557 "break critical edge\n");
560 DEBUG(dbgs() << " *** Splitting critical edge:"
561 " BB#" << ParentBlock->getNumber()
562 << " -- BB#" << NewSucc->getNumber()
563 << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
564 SuccToSinkTo = NewSucc;
566 BreakPHIEdge = false;
572 // BreakPHIEdge is true if all the uses are in the successor MBB being
573 // sunken into and they are all PHI nodes. In this case, machine-sink must
574 // break the critical edge first.
575 MachineBasicBlock *NewSucc = SplitCriticalEdge(MI, ParentBlock,
576 SuccToSinkTo, BreakPHIEdge);
578 DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to "
579 "break critical edge\n");
583 DEBUG(dbgs() << " *** Splitting critical edge:"
584 " BB#" << ParentBlock->getNumber()
585 << " -- BB#" << NewSucc->getNumber()
586 << " -- BB#" << SuccToSinkTo->getNumber() << '\n');
587 SuccToSinkTo = NewSucc;
591 // Determine where to insert into. Skip phi nodes.
592 MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin();
593 while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI())
596 // Move the instruction.
597 SuccToSinkTo->splice(InsertPos, ParentBlock, MI,
598 ++MachineBasicBlock::iterator(MI));
600 // Conservatively, clear any kill flags, since it's possible that they are no