1 //===- RegisterCoalescer.cpp - Generic Register Coalescing Interface -------==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the generic RegisterCoalescer interface which
11 // is used as the common interface used by all clients and
12 // implementations of register coalescing.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "regcoalescing"
17 #include "RegisterCoalescer.h"
18 #include "VirtRegMap.h"
19 #include "LiveDebugVariables.h"
21 #include "llvm/Pass.h"
22 #include "llvm/Value.h"
23 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetRegisterInfo.h"
28 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineLoopInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/Passes.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/ADT/OwningPtr.h"
43 #include "llvm/ADT/SmallSet.h"
44 #include "llvm/ADT/Statistic.h"
45 #include "llvm/ADT/STLExtras.h"
50 STATISTIC(numJoins , "Number of interval joins performed");
51 STATISTIC(numCrossRCs , "Number of cross class joins performed");
52 STATISTIC(numCommutes , "Number of instruction commuting performed");
53 STATISTIC(numExtends , "Number of copies extended");
54 STATISTIC(NumReMats , "Number of instructions re-materialized");
55 STATISTIC(numPeep , "Number of identity moves eliminated after coalescing");
56 STATISTIC(numAborts , "Number of times interval joining aborted");
59 EnableJoining("join-liveintervals",
60 cl::desc("Coalesce copies (default=true)"),
64 DisableCrossClassJoin("disable-cross-class-join",
65 cl::desc("Avoid coalescing cross register class copies"),
66 cl::init(false), cl::Hidden);
69 EnablePhysicalJoin("join-physregs",
70 cl::desc("Join physical register copies"),
71 cl::init(false), cl::Hidden);
74 VerifyCoalescing("verify-coalescing",
75 cl::desc("Verify machine instrs before and after register coalescing"),
78 INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing",
79 "Simple Register Coalescing", false, false)
80 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
81 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
82 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
83 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
84 INITIALIZE_PASS_DEPENDENCY(StrongPHIElimination)
85 INITIALIZE_PASS_DEPENDENCY(PHIElimination)
86 INITIALIZE_PASS_DEPENDENCY(TwoAddressInstructionPass)
87 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
88 INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",
89 "Simple Register Coalescing", false, false)
91 char RegisterCoalescer::ID = 0;
93 unsigned CoalescerPair::compose(unsigned a, unsigned b) const {
96 return tri_.composeSubRegIndices(a, b);
99 bool CoalescerPair::isMoveInstr(const MachineInstr *MI,
100 unsigned &Src, unsigned &Dst,
101 unsigned &SrcSub, unsigned &DstSub) const {
103 Dst = MI->getOperand(0).getReg();
104 DstSub = MI->getOperand(0).getSubReg();
105 Src = MI->getOperand(1).getReg();
106 SrcSub = MI->getOperand(1).getSubReg();
107 } else if (MI->isSubregToReg()) {
108 Dst = MI->getOperand(0).getReg();
109 DstSub = compose(MI->getOperand(0).getSubReg(), MI->getOperand(3).getImm());
110 Src = MI->getOperand(2).getReg();
111 SrcSub = MI->getOperand(2).getSubReg();
117 bool CoalescerPair::setRegisters(const MachineInstr *MI) {
118 srcReg_ = dstReg_ = subIdx_ = 0;
120 flipped_ = crossClass_ = false;
122 unsigned Src, Dst, SrcSub, DstSub;
123 if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
125 partial_ = SrcSub || DstSub;
127 // If one register is a physreg, it must be Dst.
128 if (TargetRegisterInfo::isPhysicalRegister(Src)) {
129 if (TargetRegisterInfo::isPhysicalRegister(Dst))
132 std::swap(SrcSub, DstSub);
136 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
138 if (TargetRegisterInfo::isPhysicalRegister(Dst)) {
139 // Eliminate DstSub on a physreg.
141 Dst = tri_.getSubReg(Dst, DstSub);
142 if (!Dst) return false;
146 // Eliminate SrcSub by picking a corresponding Dst superregister.
148 Dst = tri_.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src));
149 if (!Dst) return false;
151 } else if (!MRI.getRegClass(Src)->contains(Dst)) {
155 // Both registers are virtual.
157 // Both registers have subreg indices.
158 if (SrcSub && DstSub) {
159 // For now we only handle the case of identical indices in commensurate
160 // registers: Dreg:ssub_1 + Dreg:ssub_1 -> Dreg
161 // FIXME: Handle Qreg:ssub_3 + Dreg:ssub_1 as QReg:dsub_1 + Dreg.
162 if (SrcSub != DstSub)
164 const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
165 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
166 if (!getCommonSubClass(DstRC, SrcRC))
171 // There can be no SrcSub.
176 assert(!flipped_ && "Unexpected flip");
180 // Find the new register class.
181 const TargetRegisterClass *SrcRC = MRI.getRegClass(Src);
182 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst);
184 newRC_ = tri_.getMatchingSuperRegClass(DstRC, SrcRC, DstSub);
186 newRC_ = getCommonSubClass(DstRC, SrcRC);
189 crossClass_ = newRC_ != DstRC || newRC_ != SrcRC;
191 // Check our invariants
192 assert(TargetRegisterInfo::isVirtualRegister(Src) && "Src must be virtual");
193 assert(!(TargetRegisterInfo::isPhysicalRegister(Dst) && DstSub) &&
194 "Cannot have a physical SubIdx");
201 bool CoalescerPair::flip() {
202 if (subIdx_ || TargetRegisterInfo::isPhysicalRegister(dstReg_))
204 std::swap(srcReg_, dstReg_);
205 flipped_ = !flipped_;
209 bool CoalescerPair::isCoalescable(const MachineInstr *MI) const {
212 unsigned Src, Dst, SrcSub, DstSub;
213 if (!isMoveInstr(MI, Src, Dst, SrcSub, DstSub))
216 // Find the virtual register that is srcReg_.
217 if (Dst == srcReg_) {
219 std::swap(SrcSub, DstSub);
220 } else if (Src != srcReg_) {
224 // Now check that Dst matches dstReg_.
225 if (TargetRegisterInfo::isPhysicalRegister(dstReg_)) {
226 if (!TargetRegisterInfo::isPhysicalRegister(Dst))
228 assert(!subIdx_ && "Inconsistent CoalescerPair state.");
229 // DstSub could be set for a physreg from INSERT_SUBREG.
231 Dst = tri_.getSubReg(Dst, DstSub);
234 return dstReg_ == Dst;
235 // This is a partial register copy. Check that the parts match.
236 return tri_.getSubReg(dstReg_, SrcSub) == Dst;
238 // dstReg_ is virtual.
241 // Registers match, do the subregisters line up?
242 return compose(subIdx_, SrcSub) == DstSub;
246 void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const {
247 AU.setPreservesCFG();
248 AU.addRequired<AliasAnalysis>();
249 AU.addRequired<LiveIntervals>();
250 AU.addPreserved<LiveIntervals>();
251 AU.addRequired<LiveDebugVariables>();
252 AU.addPreserved<LiveDebugVariables>();
253 AU.addPreserved<SlotIndexes>();
254 AU.addRequired<MachineLoopInfo>();
255 AU.addPreserved<MachineLoopInfo>();
256 AU.addPreservedID(MachineDominatorsID);
257 AU.addPreservedID(StrongPHIEliminationID);
258 AU.addPreservedID(PHIEliminationID);
259 AU.addPreservedID(TwoAddressInstructionPassID);
260 MachineFunctionPass::getAnalysisUsage(AU);
263 void RegisterCoalescer::markAsJoined(MachineInstr *CopyMI) {
264 /// Joined copies are not deleted immediately, but kept in JoinedCopies.
265 JoinedCopies.insert(CopyMI);
267 /// Mark all register operands of CopyMI as <undef> so they won't affect dead
268 /// code elimination.
269 for (MachineInstr::mop_iterator I = CopyMI->operands_begin(),
270 E = CopyMI->operands_end(); I != E; ++I)
275 /// AdjustCopiesBackFrom - We found a non-trivially-coalescable copy with IntA
276 /// being the source and IntB being the dest, thus this defines a value number
277 /// in IntB. If the source value number (in IntA) is defined by a copy from B,
278 /// see if we can merge these two pieces of B into a single value number,
279 /// eliminating a copy. For example:
283 /// B1 = A3 <- this copy
285 /// In this case, B0 can be extended to where the B1 copy lives, allowing the B1
286 /// value number to be replaced with B0 (which simplifies the B liveinterval).
288 /// This returns true if an interval was modified.
290 bool RegisterCoalescer::AdjustCopiesBackFrom(const CoalescerPair &CP,
291 MachineInstr *CopyMI) {
292 // Bail if there is no dst interval - can happen when merging physical subreg
294 if (!li_->hasInterval(CP.getDstReg()))
298 li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
300 li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
301 SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
303 // BValNo is a value number in B that is defined by a copy from A. 'B3' in
304 // the example above.
305 LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx);
306 if (BLR == IntB.end()) return false;
307 VNInfo *BValNo = BLR->valno;
309 // Get the location that B is defined at. Two options: either this value has
310 // an unknown definition point or it is defined at CopyIdx. If unknown, we
312 if (!BValNo->isDefByCopy()) return false;
313 assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
315 // AValNo is the value number in A that defines the copy, A3 in the example.
316 SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
317 LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
318 // The live range might not exist after fun with physreg coalescing.
319 if (ALR == IntA.end()) return false;
320 VNInfo *AValNo = ALR->valno;
321 // If it's re-defined by an early clobber somewhere in the live range, then
322 // it's not safe to eliminate the copy. FIXME: This is a temporary workaround.
324 // 172 %ECX<def> = MOV32rr %reg1039<kill>
325 // 180 INLINEASM <es:subl $5,$1
326 // sbbl $3,$0>, 10, %EAX<def>, 14, %ECX<earlyclobber,def>, 9,
328 // 36, <fi#0>, 1, %reg0, 0, 9, %ECX<kill>, 36, <fi#1>, 1, %reg0, 0
329 // 188 %EAX<def> = MOV32rr %EAX<kill>
330 // 196 %ECX<def> = MOV32rr %ECX<kill>
331 // 204 %ECX<def> = MOV32rr %ECX<kill>
332 // 212 %EAX<def> = MOV32rr %EAX<kill>
333 // 220 %EAX<def> = MOV32rr %EAX
334 // 228 %reg1039<def> = MOV32rr %ECX<kill>
335 // The early clobber operand ties ECX input to the ECX def.
337 // The live interval of ECX is represented as this:
338 // %reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
339 // The coalescer has no idea there was a def in the middle of [174,230].
340 if (AValNo->hasRedefByEC())
343 // If AValNo is defined as a copy from IntB, we can potentially process this.
344 // Get the instruction that defines this value number.
345 if (!CP.isCoalescable(AValNo->getCopy()))
348 // Get the LiveRange in IntB that this value number starts with.
349 LiveInterval::iterator ValLR =
350 IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
351 if (ValLR == IntB.end())
354 // Make sure that the end of the live range is inside the same block as
356 MachineInstr *ValLREndInst =
357 li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
358 if (!ValLREndInst || ValLREndInst->getParent() != CopyMI->getParent())
361 // Okay, we now know that ValLR ends in the same block that the CopyMI
362 // live-range starts. If there are no intervening live ranges between them in
363 // IntB, we can merge them.
364 if (ValLR+1 != BLR) return false;
366 // If a live interval is a physical register, conservatively check if any
367 // of its aliases is overlapping the live interval of the virtual register.
368 // If so, do not coalesce.
369 if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
370 for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
371 if (li_->hasInterval(*AS) && IntA.overlaps(li_->getInterval(*AS))) {
373 dbgs() << "\t\tInterfere with alias ";
374 li_->getInterval(*AS).print(dbgs(), tri_);
381 dbgs() << "Extending: ";
382 IntB.print(dbgs(), tri_);
385 SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
386 // We are about to delete CopyMI, so need to remove it as the 'instruction
387 // that defines this value #'. Update the valnum with the new defining
389 BValNo->def = FillerStart;
392 // Okay, we can merge them. We need to insert a new liverange:
393 // [ValLR.end, BLR.begin) of either value number, then we merge the
394 // two value numbers.
395 IntB.addRange(LiveRange(FillerStart, FillerEnd, BValNo));
397 // If the IntB live range is assigned to a physical register, and if that
398 // physreg has sub-registers, update their live intervals as well.
399 if (TargetRegisterInfo::isPhysicalRegister(IntB.reg)) {
400 for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
401 if (!li_->hasInterval(*SR))
403 LiveInterval &SRLI = li_->getInterval(*SR);
404 SRLI.addRange(LiveRange(FillerStart, FillerEnd,
405 SRLI.getNextValue(FillerStart, 0,
406 li_->getVNInfoAllocator())));
410 // Okay, merge "B1" into the same value number as "B0".
411 if (BValNo != ValLR->valno) {
412 // If B1 is killed by a PHI, then the merged live range must also be killed
413 // by the same PHI, as B0 and B1 can not overlap.
414 bool HasPHIKill = BValNo->hasPHIKill();
415 IntB.MergeValueNumberInto(BValNo, ValLR->valno);
417 ValLR->valno->setHasPHIKill(true);
420 dbgs() << " result = ";
421 IntB.print(dbgs(), tri_);
425 // If the source instruction was killing the source register before the
426 // merge, unset the isKill marker given the live range has been extended.
427 int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true);
429 ValLREndInst->getOperand(UIdx).setIsKill(false);
432 // If the copy instruction was killing the destination register before the
433 // merge, find the last use and trim the live range. That will also add the
435 if (ALR->end == CopyIdx)
436 li_->shrinkToUses(&IntA);
442 /// HasOtherReachingDefs - Return true if there are definitions of IntB
443 /// other than BValNo val# that can reach uses of AValno val# of IntA.
444 bool RegisterCoalescer::HasOtherReachingDefs(LiveInterval &IntA,
448 for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
450 if (AI->valno != AValNo) continue;
451 LiveInterval::Ranges::iterator BI =
452 std::upper_bound(IntB.ranges.begin(), IntB.ranges.end(), AI->start);
453 if (BI != IntB.ranges.begin())
455 for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) {
456 if (BI->valno == BValNo)
458 if (BI->start <= AI->start && BI->end > AI->start)
460 if (BI->start > AI->start && BI->start < AI->end)
467 /// RemoveCopyByCommutingDef - We found a non-trivially-coalescable copy with
468 /// IntA being the source and IntB being the dest, thus this defines a value
469 /// number in IntB. If the source value number (in IntA) is defined by a
470 /// commutable instruction and its other operand is coalesced to the copy dest
471 /// register, see if we can transform the copy into a noop by commuting the
472 /// definition. For example,
474 /// A3 = op A2 B0<kill>
476 /// B1 = A3 <- this copy
478 /// = op A3 <- more uses
482 /// B2 = op B0 A2<kill>
484 /// B1 = B2 <- now an identify copy
486 /// = op B2 <- more uses
488 /// This returns true if an interval was modified.
490 bool RegisterCoalescer::RemoveCopyByCommutingDef(const CoalescerPair &CP,
491 MachineInstr *CopyMI) {
492 // FIXME: For now, only eliminate the copy by commuting its def when the
493 // source register is a virtual register. We want to guard against cases
494 // where the copy is a back edge copy and commuting the def lengthen the
495 // live interval of the source register to the entire loop.
496 if (CP.isPhys() && CP.isFlipped())
499 // Bail if there is no dst interval.
500 if (!li_->hasInterval(CP.getDstReg()))
503 SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
506 li_->getInterval(CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg());
508 li_->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg());
510 // BValNo is a value number in B that is defined by a copy from A. 'B3' in
511 // the example above.
512 VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx);
513 if (!BValNo || !BValNo->isDefByCopy())
516 assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
518 // AValNo is the value number in A that defines the copy, A3 in the example.
519 VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getUseIndex());
520 assert(AValNo && "COPY source not live");
522 // If other defs can reach uses of this def, then it's not safe to perform
524 if (AValNo->isPHIDef() || AValNo->isUnused() || AValNo->hasPHIKill())
526 MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
529 const TargetInstrDesc &TID = DefMI->getDesc();
530 if (!TID.isCommutable())
532 // If DefMI is a two-address instruction then commuting it will change the
533 // destination register.
534 int DefIdx = DefMI->findRegisterDefOperandIdx(IntA.reg);
535 assert(DefIdx != -1);
537 if (!DefMI->isRegTiedToUseOperand(DefIdx, &UseOpIdx))
539 unsigned Op1, Op2, NewDstIdx;
540 if (!tii_->findCommutedOpIndices(DefMI, Op1, Op2))
544 else if (Op2 == UseOpIdx)
549 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
550 unsigned NewReg = NewDstMO.getReg();
551 if (NewReg != IntB.reg || !NewDstMO.isKill())
554 // Make sure there are no other definitions of IntB that would reach the
555 // uses which the new definition can reach.
556 if (HasOtherReachingDefs(IntA, IntB, AValNo, BValNo))
559 // Abort if the aliases of IntB.reg have values that are not simply the
560 // clobbers from the superreg.
561 if (TargetRegisterInfo::isPhysicalRegister(IntB.reg))
562 for (const unsigned *AS = tri_->getAliasSet(IntB.reg); *AS; ++AS)
563 if (li_->hasInterval(*AS) &&
564 HasOtherReachingDefs(IntA, li_->getInterval(*AS), AValNo, 0))
567 // If some of the uses of IntA.reg is already coalesced away, return false.
568 // It's not possible to determine whether it's safe to perform the coalescing.
569 for (MachineRegisterInfo::use_nodbg_iterator UI =
570 mri_->use_nodbg_begin(IntA.reg),
571 UE = mri_->use_nodbg_end(); UI != UE; ++UI) {
572 MachineInstr *UseMI = &*UI;
573 SlotIndex UseIdx = li_->getInstructionIndex(UseMI);
574 LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
575 if (ULR == IntA.end())
577 if (ULR->valno == AValNo && JoinedCopies.count(UseMI))
581 DEBUG(dbgs() << "\tRemoveCopyByCommutingDef: " << AValNo->def << '\t'
584 // At this point we have decided that it is legal to do this
585 // transformation. Start by commuting the instruction.
586 MachineBasicBlock *MBB = DefMI->getParent();
587 MachineInstr *NewMI = tii_->commuteInstruction(DefMI);
590 if (TargetRegisterInfo::isVirtualRegister(IntA.reg) &&
591 TargetRegisterInfo::isVirtualRegister(IntB.reg) &&
592 !mri_->constrainRegClass(IntB.reg, mri_->getRegClass(IntA.reg)))
594 if (NewMI != DefMI) {
595 li_->ReplaceMachineInstrInMaps(DefMI, NewMI);
596 MBB->insert(DefMI, NewMI);
599 unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false);
600 NewMI->getOperand(OpIdx).setIsKill();
602 // If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
611 // Update uses of IntA of the specific Val# with IntB.
612 for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
613 UE = mri_->use_end(); UI != UE;) {
614 MachineOperand &UseMO = UI.getOperand();
615 MachineInstr *UseMI = &*UI;
617 if (JoinedCopies.count(UseMI))
619 if (UseMI->isDebugValue()) {
620 // FIXME These don't have an instruction index. Not clear we have enough
621 // info to decide whether to do this replacement or not. For now do it.
622 UseMO.setReg(NewReg);
625 SlotIndex UseIdx = li_->getInstructionIndex(UseMI).getUseIndex();
626 LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
627 if (ULR == IntA.end() || ULR->valno != AValNo)
629 if (TargetRegisterInfo::isPhysicalRegister(NewReg))
630 UseMO.substPhysReg(NewReg, *tri_);
632 UseMO.setReg(NewReg);
635 if (!UseMI->isCopy())
637 if (UseMI->getOperand(0).getReg() != IntB.reg ||
638 UseMI->getOperand(0).getSubReg())
641 // This copy will become a noop. If it's defining a new val#, merge it into
643 SlotIndex DefIdx = UseIdx.getDefIndex();
644 VNInfo *DVNI = IntB.getVNInfoAt(DefIdx);
647 DEBUG(dbgs() << "\t\tnoop: " << DefIdx << '\t' << *UseMI);
648 assert(DVNI->def == DefIdx);
649 BValNo = IntB.MergeValueNumberInto(BValNo, DVNI);
653 // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition
655 VNInfo *ValNo = BValNo;
656 ValNo->def = AValNo->def;
658 for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
660 if (AI->valno != AValNo) continue;
661 IntB.addRange(LiveRange(AI->start, AI->end, ValNo));
663 DEBUG(dbgs() << "\t\textended: " << IntB << '\n');
665 IntA.removeValNo(AValNo);
666 DEBUG(dbgs() << "\t\ttrimmed: " << IntA << '\n');
671 /// ReMaterializeTrivialDef - If the source of a copy is defined by a trivial
672 /// computation, replace the copy by rematerialize the definition.
673 bool RegisterCoalescer::ReMaterializeTrivialDef(LiveInterval &SrcInt,
677 MachineInstr *CopyMI) {
678 SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getUseIndex();
679 LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
680 assert(SrcLR != SrcInt.end() && "Live range not found!");
681 VNInfo *ValNo = SrcLR->valno;
682 // If other defs can reach uses of this def, then it's not safe to perform
684 if (ValNo->isPHIDef() || ValNo->isUnused() || ValNo->hasPHIKill())
686 MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
689 assert(DefMI && "Defining instruction disappeared");
690 const TargetInstrDesc &TID = DefMI->getDesc();
691 if (!TID.isAsCheapAsAMove())
693 if (!tii_->isTriviallyReMaterializable(DefMI, AA))
695 bool SawStore = false;
696 if (!DefMI->isSafeToMove(tii_, AA, SawStore))
698 if (TID.getNumDefs() != 1)
700 if (!DefMI->isImplicitDef()) {
701 // Make sure the copy destination register class fits the instruction
702 // definition register class. The mismatch can happen as a result of earlier
703 // extract_subreg, insert_subreg, subreg_to_reg coalescing.
704 const TargetRegisterClass *RC = tii_->getRegClass(TID, 0, tri_);
705 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
706 if (mri_->getRegClass(DstReg) != RC)
708 } else if (!RC->contains(DstReg))
712 // If destination register has a sub-register index on it, make sure it
713 // matches the instruction register class.
715 const TargetInstrDesc &TID = DefMI->getDesc();
716 if (TID.getNumDefs() != 1)
718 const TargetRegisterClass *DstRC = mri_->getRegClass(DstReg);
719 const TargetRegisterClass *DstSubRC =
720 DstRC->getSubRegisterRegClass(DstSubIdx);
721 const TargetRegisterClass *DefRC = tii_->getRegClass(TID, 0, tri_);
724 else if (DefRC != DstSubRC)
728 RemoveCopyFlag(DstReg, CopyMI);
730 MachineBasicBlock *MBB = CopyMI->getParent();
731 MachineBasicBlock::iterator MII =
732 llvm::next(MachineBasicBlock::iterator(CopyMI));
733 tii_->reMaterialize(*MBB, MII, DstReg, DstSubIdx, DefMI, *tri_);
734 MachineInstr *NewMI = prior(MII);
736 // CopyMI may have implicit operands, transfer them over to the newly
737 // rematerialized instruction. And update implicit def interval valnos.
738 for (unsigned i = CopyMI->getDesc().getNumOperands(),
739 e = CopyMI->getNumOperands(); i != e; ++i) {
740 MachineOperand &MO = CopyMI->getOperand(i);
741 if (MO.isReg() && MO.isImplicit())
742 NewMI->addOperand(MO);
744 RemoveCopyFlag(MO.getReg(), CopyMI);
747 NewMI->copyImplicitOps(CopyMI);
748 li_->ReplaceMachineInstrInMaps(CopyMI, NewMI);
749 CopyMI->eraseFromParent();
750 ReMatCopies.insert(CopyMI);
751 ReMatDefs.insert(DefMI);
752 DEBUG(dbgs() << "Remat: " << *NewMI);
755 // The source interval can become smaller because we removed a use.
757 li_->shrinkToUses(&SrcInt);
762 /// UpdateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and
763 /// update the subregister number if it is not zero. If DstReg is a
764 /// physical register and the existing subregister number of the def / use
765 /// being updated is not zero, make sure to set it to the correct physical
768 RegisterCoalescer::UpdateRegDefsUses(const CoalescerPair &CP) {
769 bool DstIsPhys = CP.isPhys();
770 unsigned SrcReg = CP.getSrcReg();
771 unsigned DstReg = CP.getDstReg();
772 unsigned SubIdx = CP.getSubIdx();
774 // Update LiveDebugVariables.
775 ldv_->renameRegister(SrcReg, DstReg, SubIdx);
777 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(SrcReg);
778 MachineInstr *UseMI = I.skipInstruction();) {
779 // A PhysReg copy that won't be coalesced can perhaps be rematerialized
782 if (UseMI->isCopy() &&
783 !UseMI->getOperand(1).getSubReg() &&
784 !UseMI->getOperand(0).getSubReg() &&
785 UseMI->getOperand(1).getReg() == SrcReg &&
786 UseMI->getOperand(0).getReg() != SrcReg &&
787 UseMI->getOperand(0).getReg() != DstReg &&
788 !JoinedCopies.count(UseMI) &&
789 ReMaterializeTrivialDef(li_->getInterval(SrcReg), false,
790 UseMI->getOperand(0).getReg(), 0, UseMI))
794 SmallVector<unsigned,8> Ops;
796 tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops);
797 bool Kills = false, Deads = false;
799 // Replace SrcReg with DstReg in all UseMI operands.
800 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
801 MachineOperand &MO = UseMI->getOperand(Ops[i]);
802 Kills |= MO.isKill();
803 Deads |= MO.isDead();
806 MO.substPhysReg(DstReg, *tri_);
808 MO.substVirtReg(DstReg, SubIdx, *tri_);
811 // This instruction is a copy that will be removed.
812 if (JoinedCopies.count(UseMI))
816 // If UseMI was a simple SrcReg def, make sure we didn't turn it into a
817 // read-modify-write of DstReg.
819 UseMI->addRegisterDead(DstReg, tri_);
820 else if (!Reads && Writes)
821 UseMI->addRegisterDefined(DstReg, tri_);
823 // Kill flags apply to the whole physical register.
824 if (DstIsPhys && Kills)
825 UseMI->addRegisterKilled(DstReg, tri_);
829 dbgs() << "\t\tupdated: ";
830 if (!UseMI->isDebugValue())
831 dbgs() << li_->getInstructionIndex(UseMI) << "\t";
837 /// removeIntervalIfEmpty - Check if the live interval of a physical register
838 /// is empty, if so remove it and also remove the empty intervals of its
839 /// sub-registers. Return true if live interval is removed.
840 static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_,
841 const TargetRegisterInfo *tri_) {
843 if (TargetRegisterInfo::isPhysicalRegister(li.reg))
844 for (const unsigned* SR = tri_->getSubRegisters(li.reg); *SR; ++SR) {
845 if (!li_->hasInterval(*SR))
847 LiveInterval &sli = li_->getInterval(*SR);
849 li_->removeInterval(*SR);
851 li_->removeInterval(li.reg);
857 /// RemoveDeadDef - If a def of a live interval is now determined dead, remove
858 /// the val# it defines. If the live interval becomes empty, remove it as well.
859 bool RegisterCoalescer::RemoveDeadDef(LiveInterval &li,
860 MachineInstr *DefMI) {
861 SlotIndex DefIdx = li_->getInstructionIndex(DefMI).getDefIndex();
862 LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
863 if (DefIdx != MLR->valno->def)
865 li.removeValNo(MLR->valno);
866 return removeIntervalIfEmpty(li, li_, tri_);
869 void RegisterCoalescer::RemoveCopyFlag(unsigned DstReg,
870 const MachineInstr *CopyMI) {
871 SlotIndex DefIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
872 if (li_->hasInterval(DstReg)) {
873 LiveInterval &LI = li_->getInterval(DstReg);
874 if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
875 if (LR->valno->def == DefIdx)
876 LR->valno->setCopy(0);
878 if (!TargetRegisterInfo::isPhysicalRegister(DstReg))
880 for (const unsigned* AS = tri_->getAliasSet(DstReg); *AS; ++AS) {
881 if (!li_->hasInterval(*AS))
883 LiveInterval &LI = li_->getInterval(*AS);
884 if (const LiveRange *LR = LI.getLiveRangeContaining(DefIdx))
885 if (LR->valno->def == DefIdx)
886 LR->valno->setCopy(0);
890 /// shouldJoinPhys - Return true if a copy involving a physreg should be joined.
891 /// We need to be careful about coalescing a source physical register with a
892 /// virtual register. Once the coalescing is done, it cannot be broken and these
893 /// are not spillable! If the destination interval uses are far away, think
894 /// twice about coalescing them!
895 bool RegisterCoalescer::shouldJoinPhys(CoalescerPair &CP) {
896 bool Allocatable = li_->isAllocatable(CP.getDstReg());
897 LiveInterval &JoinVInt = li_->getInterval(CP.getSrcReg());
899 /// Always join simple intervals that are defined by a single copy from a
900 /// reserved register. This doesn't increase register pressure, so it is
901 /// always beneficial.
902 if (!Allocatable && CP.isFlipped() && JoinVInt.containsOneValue())
905 if (!EnablePhysicalJoin) {
906 DEBUG(dbgs() << "\tPhysreg joins disabled.\n");
910 // Only coalesce to allocatable physreg, we don't want to risk modifying
911 // reserved registers.
913 DEBUG(dbgs() << "\tRegister is an unallocatable physreg.\n");
914 return false; // Not coalescable.
917 // Don't join with physregs that have a ridiculous number of live
918 // ranges. The data structure performance is really bad when that
920 if (li_->hasInterval(CP.getDstReg()) &&
921 li_->getInterval(CP.getDstReg()).ranges.size() > 1000) {
924 << "\tPhysical register live interval too complicated, abort!\n");
928 // FIXME: Why are we skipping this test for partial copies?
929 // CodeGen/X86/phys_subreg_coalesce-3.ll needs it.
930 if (!CP.isPartial()) {
931 const TargetRegisterClass *RC = mri_->getRegClass(CP.getSrcReg());
932 unsigned Threshold = RegClassInfo.getNumAllocatableRegs(RC) * 2;
933 unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
934 if (Length > Threshold) {
936 DEBUG(dbgs() << "\tMay tie down a physical register, abort!\n");
943 /// isWinToJoinCrossClass - Return true if it's profitable to coalesce
944 /// two virtual registers from different register classes.
946 RegisterCoalescer::isWinToJoinCrossClass(unsigned SrcReg,
948 const TargetRegisterClass *SrcRC,
949 const TargetRegisterClass *DstRC,
950 const TargetRegisterClass *NewRC) {
951 unsigned NewRCCount = RegClassInfo.getNumAllocatableRegs(NewRC);
952 // This heuristics is good enough in practice, but it's obviously not *right*.
953 // 4 is a magic number that works well enough for x86, ARM, etc. It filter
954 // out all but the most restrictive register classes.
955 if (NewRCCount > 4 ||
956 // Early exit if the function is fairly small, coalesce aggressively if
957 // that's the case. For really special register classes with 3 or
958 // fewer registers, be a bit more careful.
959 (li_->getFuncInstructionCount() / NewRCCount) < 8)
961 LiveInterval &SrcInt = li_->getInterval(SrcReg);
962 LiveInterval &DstInt = li_->getInterval(DstReg);
963 unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt);
964 unsigned DstSize = li_->getApproximateInstructionCount(DstInt);
966 // Coalesce aggressively if the intervals are small compared to the number of
967 // registers in the new class. The number 4 is fairly arbitrary, chosen to be
968 // less aggressive than the 8 used for the whole function size.
969 const unsigned ThresSize = 4 * NewRCCount;
970 if (SrcSize <= ThresSize && DstSize <= ThresSize)
973 // Estimate *register use density*. If it doubles or more, abort.
974 unsigned SrcUses = std::distance(mri_->use_nodbg_begin(SrcReg),
975 mri_->use_nodbg_end());
976 unsigned DstUses = std::distance(mri_->use_nodbg_begin(DstReg),
977 mri_->use_nodbg_end());
978 unsigned NewUses = SrcUses + DstUses;
979 unsigned NewSize = SrcSize + DstSize;
980 if (SrcRC != NewRC && SrcSize > ThresSize) {
981 unsigned SrcRCCount = RegClassInfo.getNumAllocatableRegs(SrcRC);
982 if (NewUses*SrcSize*SrcRCCount > 2*SrcUses*NewSize*NewRCCount)
985 if (DstRC != NewRC && DstSize > ThresSize) {
986 unsigned DstRCCount = RegClassInfo.getNumAllocatableRegs(DstRC);
987 if (NewUses*DstSize*DstRCCount > 2*DstUses*NewSize*NewRCCount)
994 /// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
995 /// which are the src/dst of the copy instruction CopyMI. This returns true
996 /// if the copy was successfully coalesced away. If it is not currently
997 /// possible to coalesce this interval, but it may be possible if other
998 /// things get coalesced, then it returns true by reference in 'Again'.
999 bool RegisterCoalescer::JoinCopy(MachineInstr *CopyMI, bool &Again) {
1002 if (JoinedCopies.count(CopyMI) || ReMatCopies.count(CopyMI))
1003 return false; // Already done.
1005 DEBUG(dbgs() << li_->getInstructionIndex(CopyMI) << '\t' << *CopyMI);
1007 CoalescerPair CP(*tii_, *tri_);
1008 if (!CP.setRegisters(CopyMI)) {
1009 DEBUG(dbgs() << "\tNot coalescable.\n");
1013 // If they are already joined we continue.
1014 if (CP.getSrcReg() == CP.getDstReg()) {
1015 markAsJoined(CopyMI);
1016 DEBUG(dbgs() << "\tCopy already coalesced.\n");
1017 return false; // Not coalescable.
1020 DEBUG(dbgs() << "\tConsidering merging " << PrintReg(CP.getSrcReg(), tri_)
1021 << " with " << PrintReg(CP.getDstReg(), tri_, CP.getSubIdx())
1024 // Enforce policies.
1026 if (!shouldJoinPhys(CP)) {
1027 // Before giving up coalescing, if definition of source is defined by
1028 // trivial computation, try rematerializing it.
1029 if (!CP.isFlipped() &&
1030 ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
1031 CP.getDstReg(), 0, CopyMI))
1036 // Avoid constraining virtual register regclass too much.
1037 if (CP.isCrossClass()) {
1038 DEBUG(dbgs() << "\tCross-class to " << CP.getNewRC()->getName() << ".\n");
1039 if (DisableCrossClassJoin) {
1040 DEBUG(dbgs() << "\tCross-class joins disabled.\n");
1043 if (!isWinToJoinCrossClass(CP.getSrcReg(), CP.getDstReg(),
1044 mri_->getRegClass(CP.getSrcReg()),
1045 mri_->getRegClass(CP.getDstReg()),
1047 DEBUG(dbgs() << "\tAvoid coalescing to constrained register class.\n");
1048 Again = true; // May be possible to coalesce later.
1053 // When possible, let DstReg be the larger interval.
1054 if (!CP.getSubIdx() && li_->getInterval(CP.getSrcReg()).ranges.size() >
1055 li_->getInterval(CP.getDstReg()).ranges.size())
1059 // Okay, attempt to join these two intervals. On failure, this returns false.
1060 // Otherwise, if one of the intervals being joined is a physreg, this method
1061 // always canonicalizes DstInt to be it. The output "SrcInt" will not have
1062 // been modified, so we can use this information below to update aliases.
1063 if (!JoinIntervals(CP)) {
1064 // Coalescing failed.
1066 // If definition of source is defined by trivial computation, try
1067 // rematerializing it.
1068 if (!CP.isFlipped() &&
1069 ReMaterializeTrivialDef(li_->getInterval(CP.getSrcReg()), true,
1070 CP.getDstReg(), 0, CopyMI))
1073 // If we can eliminate the copy without merging the live ranges, do so now.
1074 if (!CP.isPartial()) {
1075 if (AdjustCopiesBackFrom(CP, CopyMI) ||
1076 RemoveCopyByCommutingDef(CP, CopyMI)) {
1077 markAsJoined(CopyMI);
1078 DEBUG(dbgs() << "\tTrivial!\n");
1083 // Otherwise, we are unable to join the intervals.
1084 DEBUG(dbgs() << "\tInterference!\n");
1085 Again = true; // May be possible to coalesce later.
1089 // Coalescing to a virtual register that is of a sub-register class of the
1090 // other. Make sure the resulting register is set to the right register class.
1091 if (CP.isCrossClass()) {
1093 mri_->setRegClass(CP.getDstReg(), CP.getNewRC());
1096 // Remember to delete the copy instruction.
1097 markAsJoined(CopyMI);
1099 UpdateRegDefsUses(CP);
1101 // If we have extended the live range of a physical register, make sure we
1102 // update live-in lists as well.
1104 SmallVector<MachineBasicBlock*, 16> BlockSeq;
1105 // JoinIntervals invalidates the VNInfos in SrcInt, but we only need the
1106 // ranges for this, and they are preserved.
1107 LiveInterval &SrcInt = li_->getInterval(CP.getSrcReg());
1108 for (LiveInterval::const_iterator I = SrcInt.begin(), E = SrcInt.end();
1110 li_->findLiveInMBBs(I->start, I->end, BlockSeq);
1111 for (unsigned idx = 0, size = BlockSeq.size(); idx != size; ++idx) {
1112 MachineBasicBlock &block = *BlockSeq[idx];
1113 if (!block.isLiveIn(CP.getDstReg()))
1114 block.addLiveIn(CP.getDstReg());
1120 // SrcReg is guarateed to be the register whose live interval that is
1122 li_->removeInterval(CP.getSrcReg());
1124 // Update regalloc hint.
1125 tri_->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *mf_);
1128 LiveInterval &DstInt = li_->getInterval(CP.getDstReg());
1129 dbgs() << "\tJoined. Result = ";
1130 DstInt.print(dbgs(), tri_);
1138 /// ComputeUltimateVN - Assuming we are going to join two live intervals,
1139 /// compute what the resultant value numbers for each value in the input two
1140 /// ranges will be. This is complicated by copies between the two which can
1141 /// and will commonly cause multiple value numbers to be merged into one.
1143 /// VN is the value number that we're trying to resolve. InstDefiningValue
1144 /// keeps track of the new InstDefiningValue assignment for the result
1145 /// LiveInterval. ThisFromOther/OtherFromThis are sets that keep track of
1146 /// whether a value in this or other is a copy from the opposite set.
1147 /// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have
1148 /// already been assigned.
1150 /// ThisFromOther[x] - If x is defined as a copy from the other interval, this
1151 /// contains the value number the copy is from.
1153 static unsigned ComputeUltimateVN(VNInfo *VNI,
1154 SmallVector<VNInfo*, 16> &NewVNInfo,
1155 DenseMap<VNInfo*, VNInfo*> &ThisFromOther,
1156 DenseMap<VNInfo*, VNInfo*> &OtherFromThis,
1157 SmallVector<int, 16> &ThisValNoAssignments,
1158 SmallVector<int, 16> &OtherValNoAssignments) {
1159 unsigned VN = VNI->id;
1161 // If the VN has already been computed, just return it.
1162 if (ThisValNoAssignments[VN] >= 0)
1163 return ThisValNoAssignments[VN];
1164 assert(ThisValNoAssignments[VN] != -2 && "Cyclic value numbers");
1166 // If this val is not a copy from the other val, then it must be a new value
1167 // number in the destination.
1168 DenseMap<VNInfo*, VNInfo*>::iterator I = ThisFromOther.find(VNI);
1169 if (I == ThisFromOther.end()) {
1170 NewVNInfo.push_back(VNI);
1171 return ThisValNoAssignments[VN] = NewVNInfo.size()-1;
1173 VNInfo *OtherValNo = I->second;
1175 // Otherwise, this *is* a copy from the RHS. If the other side has already
1176 // been computed, return it.
1177 if (OtherValNoAssignments[OtherValNo->id] >= 0)
1178 return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id];
1180 // Mark this value number as currently being computed, then ask what the
1181 // ultimate value # of the other value is.
1182 ThisValNoAssignments[VN] = -2;
1183 unsigned UltimateVN =
1184 ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther,
1185 OtherValNoAssignments, ThisValNoAssignments);
1186 return ThisValNoAssignments[VN] = UltimateVN;
1189 /// JoinIntervals - Attempt to join these two intervals. On failure, this
1191 bool RegisterCoalescer::JoinIntervals(CoalescerPair &CP) {
1192 LiveInterval &RHS = li_->getInterval(CP.getSrcReg());
1193 DEBUG({ dbgs() << "\t\tRHS = "; RHS.print(dbgs(), tri_); dbgs() << "\n"; });
1195 // If a live interval is a physical register, check for interference with any
1196 // aliases. The interference check implemented here is a bit more conservative
1197 // than the full interfeence check below. We allow overlapping live ranges
1198 // only when one is a copy of the other.
1200 for (const unsigned *AS = tri_->getAliasSet(CP.getDstReg()); *AS; ++AS){
1201 if (!li_->hasInterval(*AS))
1203 const LiveInterval &LHS = li_->getInterval(*AS);
1204 LiveInterval::const_iterator LI = LHS.begin();
1205 for (LiveInterval::const_iterator RI = RHS.begin(), RE = RHS.end();
1207 LI = std::lower_bound(LI, LHS.end(), RI->start);
1208 // Does LHS have an overlapping live range starting before RI?
1209 if ((LI != LHS.begin() && LI[-1].end > RI->start) &&
1210 (RI->start != RI->valno->def ||
1211 !CP.isCoalescable(li_->getInstructionFromIndex(RI->start)))) {
1213 dbgs() << "\t\tInterference from alias: ";
1214 LHS.print(dbgs(), tri_);
1215 dbgs() << "\n\t\tOverlap at " << RI->start << " and no copy.\n";
1220 // Check that LHS ranges beginning in this range are copies.
1221 for (; LI != LHS.end() && LI->start < RI->end; ++LI) {
1222 if (LI->start != LI->valno->def ||
1223 !CP.isCoalescable(li_->getInstructionFromIndex(LI->start))) {
1225 dbgs() << "\t\tInterference from alias: ";
1226 LHS.print(dbgs(), tri_);
1227 dbgs() << "\n\t\tDef at " << LI->start << " is not a copy.\n";
1236 // Compute the final value assignment, assuming that the live ranges can be
1238 SmallVector<int, 16> LHSValNoAssignments;
1239 SmallVector<int, 16> RHSValNoAssignments;
1240 DenseMap<VNInfo*, VNInfo*> LHSValsDefinedFromRHS;
1241 DenseMap<VNInfo*, VNInfo*> RHSValsDefinedFromLHS;
1242 SmallVector<VNInfo*, 16> NewVNInfo;
1244 LiveInterval &LHS = li_->getOrCreateInterval(CP.getDstReg());
1245 DEBUG({ dbgs() << "\t\tLHS = "; LHS.print(dbgs(), tri_); dbgs() << "\n"; });
1247 // Loop over the value numbers of the LHS, seeing if any are defined from
1249 for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
1252 if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy?
1255 // Never join with a register that has EarlyClobber redefs.
1256 if (VNI->hasRedefByEC())
1259 // DstReg is known to be a register in the LHS interval. If the src is
1260 // from the RHS interval, we can use its value #.
1261 if (!CP.isCoalescable(VNI->getCopy()))
1264 // Figure out the value # from the RHS.
1265 LiveRange *lr = RHS.getLiveRangeContaining(VNI->def.getPrevSlot());
1266 // The copy could be to an aliased physreg.
1268 LHSValsDefinedFromRHS[VNI] = lr->valno;
1271 // Loop over the value numbers of the RHS, seeing if any are defined from
1273 for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
1276 if (VNI->isUnused() || !VNI->isDefByCopy()) // Src not defined by a copy?
1279 // Never join with a register that has EarlyClobber redefs.
1280 if (VNI->hasRedefByEC())
1283 // DstReg is known to be a register in the RHS interval. If the src is
1284 // from the LHS interval, we can use its value #.
1285 if (!CP.isCoalescable(VNI->getCopy()))
1288 // Figure out the value # from the LHS.
1289 LiveRange *lr = LHS.getLiveRangeContaining(VNI->def.getPrevSlot());
1290 // The copy could be to an aliased physreg.
1292 RHSValsDefinedFromLHS[VNI] = lr->valno;
1295 LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
1296 RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
1297 NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums());
1299 for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
1302 unsigned VN = VNI->id;
1303 if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
1305 ComputeUltimateVN(VNI, NewVNInfo,
1306 LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
1307 LHSValNoAssignments, RHSValNoAssignments);
1309 for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
1312 unsigned VN = VNI->id;
1313 if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
1315 // If this value number isn't a copy from the LHS, it's a new number.
1316 if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
1317 NewVNInfo.push_back(VNI);
1318 RHSValNoAssignments[VN] = NewVNInfo.size()-1;
1322 ComputeUltimateVN(VNI, NewVNInfo,
1323 RHSValsDefinedFromLHS, LHSValsDefinedFromRHS,
1324 RHSValNoAssignments, LHSValNoAssignments);
1327 // Armed with the mappings of LHS/RHS values to ultimate values, walk the
1328 // interval lists to see if these intervals are coalescable.
1329 LiveInterval::const_iterator I = LHS.begin();
1330 LiveInterval::const_iterator IE = LHS.end();
1331 LiveInterval::const_iterator J = RHS.begin();
1332 LiveInterval::const_iterator JE = RHS.end();
1334 // Skip ahead until the first place of potential sharing.
1335 if (I != IE && J != JE) {
1336 if (I->start < J->start) {
1337 I = std::upper_bound(I, IE, J->start);
1338 if (I != LHS.begin()) --I;
1339 } else if (J->start < I->start) {
1340 J = std::upper_bound(J, JE, I->start);
1341 if (J != RHS.begin()) --J;
1345 while (I != IE && J != JE) {
1346 // Determine if these two live ranges overlap.
1348 if (I->start < J->start) {
1349 Overlaps = I->end > J->start;
1351 Overlaps = J->end > I->start;
1354 // If so, check value # info to determine if they are really different.
1356 // If the live range overlap will map to the same value number in the
1357 // result liverange, we can still coalesce them. If not, we can't.
1358 if (LHSValNoAssignments[I->valno->id] !=
1359 RHSValNoAssignments[J->valno->id])
1361 // If it's re-defined by an early clobber somewhere in the live range,
1362 // then conservatively abort coalescing.
1363 if (NewVNInfo[LHSValNoAssignments[I->valno->id]]->hasRedefByEC())
1367 if (I->end < J->end)
1373 // Update kill info. Some live ranges are extended due to copy coalescing.
1374 for (DenseMap<VNInfo*, VNInfo*>::iterator I = LHSValsDefinedFromRHS.begin(),
1375 E = LHSValsDefinedFromRHS.end(); I != E; ++I) {
1376 VNInfo *VNI = I->first;
1377 unsigned LHSValID = LHSValNoAssignments[VNI->id];
1378 if (VNI->hasPHIKill())
1379 NewVNInfo[LHSValID]->setHasPHIKill(true);
1382 // Update kill info. Some live ranges are extended due to copy coalescing.
1383 for (DenseMap<VNInfo*, VNInfo*>::iterator I = RHSValsDefinedFromLHS.begin(),
1384 E = RHSValsDefinedFromLHS.end(); I != E; ++I) {
1385 VNInfo *VNI = I->first;
1386 unsigned RHSValID = RHSValNoAssignments[VNI->id];
1387 if (VNI->hasPHIKill())
1388 NewVNInfo[RHSValID]->setHasPHIKill(true);
1391 if (LHSValNoAssignments.empty())
1392 LHSValNoAssignments.push_back(-1);
1393 if (RHSValNoAssignments.empty())
1394 RHSValNoAssignments.push_back(-1);
1396 // If we get here, we know that we can coalesce the live ranges. Ask the
1397 // intervals to coalesce themselves now.
1398 LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
1404 // DepthMBBCompare - Comparison predicate that sort first based on the loop
1405 // depth of the basic block (the unsigned), and then on the MBB number.
1406 struct DepthMBBCompare {
1407 typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair;
1408 bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const {
1409 // Deeper loops first
1410 if (LHS.first != RHS.first)
1411 return LHS.first > RHS.first;
1413 // Prefer blocks that are more connected in the CFG. This takes care of
1414 // the most difficult copies first while intervals are short.
1415 unsigned cl = LHS.second->pred_size() + LHS.second->succ_size();
1416 unsigned cr = RHS.second->pred_size() + RHS.second->succ_size();
1420 // As a last resort, sort by block number.
1421 return LHS.second->getNumber() < RHS.second->getNumber();
1426 void RegisterCoalescer::CopyCoalesceInMBB(MachineBasicBlock *MBB,
1427 std::vector<MachineInstr*> &TryAgain) {
1428 DEBUG(dbgs() << MBB->getName() << ":\n");
1430 SmallVector<MachineInstr*, 8> VirtCopies;
1431 SmallVector<MachineInstr*, 8> PhysCopies;
1432 SmallVector<MachineInstr*, 8> ImpDefCopies;
1433 for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end();
1435 MachineInstr *Inst = MII++;
1437 // If this isn't a copy nor a extract_subreg, we can't join intervals.
1438 unsigned SrcReg, DstReg;
1439 if (Inst->isCopy()) {
1440 DstReg = Inst->getOperand(0).getReg();
1441 SrcReg = Inst->getOperand(1).getReg();
1442 } else if (Inst->isSubregToReg()) {
1443 DstReg = Inst->getOperand(0).getReg();
1444 SrcReg = Inst->getOperand(2).getReg();
1448 bool SrcIsPhys = TargetRegisterInfo::isPhysicalRegister(SrcReg);
1449 bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg);
1450 if (li_->hasInterval(SrcReg) && li_->getInterval(SrcReg).empty())
1451 ImpDefCopies.push_back(Inst);
1452 else if (SrcIsPhys || DstIsPhys)
1453 PhysCopies.push_back(Inst);
1455 VirtCopies.push_back(Inst);
1458 // Try coalescing implicit copies and insert_subreg <undef> first,
1459 // followed by copies to / from physical registers, then finally copies
1460 // from virtual registers to virtual registers.
1461 for (unsigned i = 0, e = ImpDefCopies.size(); i != e; ++i) {
1462 MachineInstr *TheCopy = ImpDefCopies[i];
1464 if (!JoinCopy(TheCopy, Again))
1466 TryAgain.push_back(TheCopy);
1468 for (unsigned i = 0, e = PhysCopies.size(); i != e; ++i) {
1469 MachineInstr *TheCopy = PhysCopies[i];
1471 if (!JoinCopy(TheCopy, Again))
1473 TryAgain.push_back(TheCopy);
1475 for (unsigned i = 0, e = VirtCopies.size(); i != e; ++i) {
1476 MachineInstr *TheCopy = VirtCopies[i];
1478 if (!JoinCopy(TheCopy, Again))
1480 TryAgain.push_back(TheCopy);
1484 void RegisterCoalescer::joinIntervals() {
1485 DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n");
1487 std::vector<MachineInstr*> TryAgainList;
1488 if (loopInfo->empty()) {
1489 // If there are no loops in the function, join intervals in function order.
1490 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
1492 CopyCoalesceInMBB(I, TryAgainList);
1494 // Otherwise, join intervals in inner loops before other intervals.
1495 // Unfortunately we can't just iterate over loop hierarchy here because
1496 // there may be more MBB's than BB's. Collect MBB's for sorting.
1498 // Join intervals in the function prolog first. We want to join physical
1499 // registers with virtual registers before the intervals got too long.
1500 std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs;
1501 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();I != E;++I){
1502 MachineBasicBlock *MBB = I;
1503 MBBs.push_back(std::make_pair(loopInfo->getLoopDepth(MBB), I));
1506 // Sort by loop depth.
1507 std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare());
1509 // Finally, join intervals in loop nest order.
1510 for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
1511 CopyCoalesceInMBB(MBBs[i].second, TryAgainList);
1514 // Joining intervals can allow other intervals to be joined. Iteratively join
1515 // until we make no progress.
1516 bool ProgressMade = true;
1517 while (ProgressMade) {
1518 ProgressMade = false;
1520 for (unsigned i = 0, e = TryAgainList.size(); i != e; ++i) {
1521 MachineInstr *&TheCopy = TryAgainList[i];
1526 bool Success = JoinCopy(TheCopy, Again);
1527 if (Success || !Again) {
1528 TheCopy= 0; // Mark this one as done.
1529 ProgressMade = true;
1535 void RegisterCoalescer::releaseMemory() {
1536 JoinedCopies.clear();
1537 ReMatCopies.clear();
1541 bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
1543 mri_ = &fn.getRegInfo();
1544 tm_ = &fn.getTarget();
1545 tri_ = tm_->getRegisterInfo();
1546 tii_ = tm_->getInstrInfo();
1547 li_ = &getAnalysis<LiveIntervals>();
1548 ldv_ = &getAnalysis<LiveDebugVariables>();
1549 AA = &getAnalysis<AliasAnalysis>();
1550 loopInfo = &getAnalysis<MachineLoopInfo>();
1552 DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n"
1553 << "********** Function: "
1554 << ((Value*)mf_->getFunction())->getName() << '\n');
1556 if (VerifyCoalescing)
1557 mf_->verify(this, "Before register coalescing");
1559 RegClassInfo.runOnMachineFunction(fn);
1561 // Join (coalesce) intervals if requested.
1562 if (EnableJoining) {
1565 dbgs() << "********** INTERVALS POST JOINING **********\n";
1566 for (LiveIntervals::iterator I = li_->begin(), E = li_->end();
1568 I->second->print(dbgs(), tri_);
1574 // Perform a final pass over the instructions and compute spill weights
1575 // and remove identity moves.
1576 SmallVector<unsigned, 4> DeadDefs;
1577 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
1578 mbbi != mbbe; ++mbbi) {
1579 MachineBasicBlock* mbb = mbbi;
1580 for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
1582 MachineInstr *MI = mii;
1583 if (JoinedCopies.count(MI)) {
1584 // Delete all coalesced copies.
1585 bool DoDelete = true;
1586 assert(MI->isCopyLike() && "Unrecognized copy instruction");
1587 unsigned SrcReg = MI->getOperand(MI->isSubregToReg() ? 2 : 1).getReg();
1588 if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
1589 MI->getNumOperands() > 2)
1590 // Do not delete extract_subreg, insert_subreg of physical
1591 // registers unless the definition is dead. e.g.
1592 // %DO<def> = INSERT_SUBREG %D0<undef>, %S0<kill>, 1
1593 // or else the scavenger may complain. LowerSubregs will
1594 // delete them later.
1597 if (MI->allDefsAreDead()) {
1598 if (TargetRegisterInfo::isVirtualRegister(SrcReg) &&
1599 li_->hasInterval(SrcReg))
1600 li_->shrinkToUses(&li_->getInterval(SrcReg));
1604 // We need the instruction to adjust liveness, so make it a KILL.
1605 if (MI->isSubregToReg()) {
1606 MI->RemoveOperand(3);
1607 MI->RemoveOperand(1);
1609 MI->setDesc(tii_->get(TargetOpcode::KILL));
1610 mii = llvm::next(mii);
1612 li_->RemoveMachineInstrFromMaps(MI);
1613 mii = mbbi->erase(mii);
1619 // Now check if this is a remat'ed def instruction which is now dead.
1620 if (ReMatDefs.count(MI)) {
1622 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1623 const MachineOperand &MO = MI->getOperand(i);
1626 unsigned Reg = MO.getReg();
1629 if (TargetRegisterInfo::isVirtualRegister(Reg))
1630 DeadDefs.push_back(Reg);
1633 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
1634 !mri_->use_nodbg_empty(Reg)) {
1640 while (!DeadDefs.empty()) {
1641 unsigned DeadDef = DeadDefs.back();
1642 DeadDefs.pop_back();
1643 RemoveDeadDef(li_->getInterval(DeadDef), MI);
1645 li_->RemoveMachineInstrFromMaps(mii);
1646 mii = mbbi->erase(mii);
1654 // Check for now unnecessary kill flags.
1655 if (li_->isNotInMIMap(MI)) continue;
1656 SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
1657 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1658 MachineOperand &MO = MI->getOperand(i);
1659 if (!MO.isReg() || !MO.isKill()) continue;
1660 unsigned reg = MO.getReg();
1661 if (!reg || !li_->hasInterval(reg)) continue;
1662 if (!li_->getInterval(reg).killedAt(DefIdx)) {
1663 MO.setIsKill(false);
1666 // When leaving a kill flag on a physreg, check if any subregs should
1668 if (!TargetRegisterInfo::isPhysicalRegister(reg))
1670 for (const unsigned *SR = tri_->getSubRegisters(reg);
1671 unsigned S = *SR; ++SR)
1672 if (li_->hasInterval(S) && li_->getInterval(S).liveAt(DefIdx))
1673 MI->addRegisterDefined(S, tri_);
1679 DEBUG(ldv_->dump());
1680 if (VerifyCoalescing)
1681 mf_->verify(this, "After register coalescing");
1685 /// print - Implement the dump method.
1686 void RegisterCoalescer::print(raw_ostream &O, const Module* m) const {
1690 RegisterCoalescer *llvm::createRegisterCoalescer() {
1691 return new RegisterCoalescer();