1 //===-- InterferenceCache.cpp - Caching per-block interference ---------*--===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InterferenceCache remembers per-block interference in LiveIntervalUnions.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "regalloc"
15 #include "InterferenceCache.h"
16 #include "llvm/Target/TargetRegisterInfo.h"
17 #include "llvm/Support/ErrorHandling.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
22 // Static member used for null interference cursors.
23 InterferenceCache::BlockInterference InterferenceCache::Cursor::NoInterference;
25 void InterferenceCache::init(MachineFunction *mf,
26 LiveIntervalUnion *liuarray,
29 const TargetRegisterInfo *tri) {
33 PhysRegEntries.assign(TRI->getNumRegs(), 0);
34 for (unsigned i = 0; i != CacheEntries; ++i)
35 Entries[i].clear(mf, indexes, lis);
38 InterferenceCache::Entry *InterferenceCache::get(unsigned PhysReg) {
39 unsigned E = PhysRegEntries[PhysReg];
40 if (E < CacheEntries && Entries[E].getPhysReg() == PhysReg) {
41 if (!Entries[E].valid(LIUArray, TRI))
42 Entries[E].revalidate();
45 // No valid entry exists, pick the next round-robin entry.
47 if (++RoundRobin == CacheEntries)
49 for (unsigned i = 0; i != CacheEntries; ++i) {
50 // Skip entries that are in use.
51 if (Entries[E].hasRefs()) {
52 if (++E == CacheEntries)
56 Entries[E].reset(PhysReg, LIUArray, TRI, MF);
57 PhysRegEntries[PhysReg] = E;
60 llvm_unreachable("Ran out of interference cache entries.");
63 /// revalidate - LIU contents have changed, update tags.
64 void InterferenceCache::Entry::revalidate() {
65 // Invalidate all block entries.
67 // Invalidate all iterators.
68 PrevPos = SlotIndex();
69 for (unsigned i = 0, e = Aliases.size(); i != e; ++i)
70 Aliases[i].second = Aliases[i].first->getTag();
73 void InterferenceCache::Entry::reset(unsigned physReg,
74 LiveIntervalUnion *LIUArray,
75 const TargetRegisterInfo *TRI,
76 const MachineFunction *MF) {
77 assert(!hasRefs() && "Cannot reset cache entry with references");
78 // LIU's changed, invalidate cache.
81 Blocks.resize(MF->getNumBlockIDs());
83 for (const uint16_t *AS = TRI->getOverlaps(PhysReg); *AS; ++AS) {
84 LiveIntervalUnion *LIU = LIUArray + *AS;
85 Aliases.push_back(std::make_pair(LIU, LIU->getTag()));
89 PrevPos = SlotIndex();
90 unsigned e = Aliases.size();
92 for (unsigned i = 0; i != e; ++i)
93 Iters[i].setMap(Aliases[i].first->getMap());
96 bool InterferenceCache::Entry::valid(LiveIntervalUnion *LIUArray,
97 const TargetRegisterInfo *TRI) {
98 unsigned i = 0, e = Aliases.size();
99 for (const uint16_t *AS = TRI->getOverlaps(PhysReg); *AS; ++AS, ++i) {
100 LiveIntervalUnion *LIU = LIUArray + *AS;
101 if (i == e || Aliases[i].first != LIU)
103 if (LIU->changedSince(Aliases[i].second))
109 void InterferenceCache::Entry::update(unsigned MBBNum) {
110 SlotIndex Start, Stop;
111 tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
113 // Use advanceTo only when possible.
114 if (PrevPos != Start) {
115 if (!PrevPos.isValid() || Start < PrevPos)
116 for (unsigned i = 0, e = Iters.size(); i != e; ++i)
117 Iters[i].find(Start);
119 for (unsigned i = 0, e = Iters.size(); i != e; ++i)
120 Iters[i].advanceTo(Start);
124 MachineFunction::const_iterator MFI = MF->getBlockNumbered(MBBNum);
125 BlockInterference *BI = &Blocks[MBBNum];
126 ArrayRef<SlotIndex> RegMaskSlots;
127 ArrayRef<const uint32_t*> RegMaskBits;
130 BI->First = BI->Last = SlotIndex();
132 // Check for first interference.
133 for (unsigned i = 0, e = Iters.size(); i != e; ++i) {
137 SlotIndex StartI = I.start();
140 if (!BI->First.isValid() || StartI < BI->First)
144 // Also check for register mask interference.
145 RegMaskSlots = LIS->getRegMaskSlotsInBlock(MBBNum);
146 RegMaskBits = LIS->getRegMaskBitsInBlock(MBBNum);
147 SlotIndex Limit = BI->First.isValid() ? BI->First : Stop;
148 for (unsigned i = 0, e = RegMaskSlots.size();
149 i != e && RegMaskSlots[i] < Limit; ++i)
150 if (MachineOperand::clobbersPhysReg(RegMaskBits[i], PhysReg)) {
151 // Register mask i clobbers PhysReg before the LIU interference.
152 BI->First = RegMaskSlots[i];
157 if (BI->First.isValid())
160 // No interference in this block? Go ahead and precompute the next block.
161 if (++MFI == MF->end())
163 MBBNum = MFI->getNumber();
164 BI = &Blocks[MBBNum];
167 tie(Start, Stop) = Indexes->getMBBRange(MBBNum);
170 // Check for last interference in block.
171 for (unsigned i = 0, e = Iters.size(); i != e; ++i) {
173 if (!I.valid() || I.start() >= Stop)
176 bool Backup = !I.valid() || I.start() >= Stop;
179 SlotIndex StopI = I.stop();
180 if (!BI->Last.isValid() || StopI > BI->Last)
186 // Also check for register mask interference.
187 SlotIndex Limit = BI->Last.isValid() ? BI->Last : Start;
188 for (unsigned i = RegMaskSlots.size();
189 i && RegMaskSlots[i-1].getDeadSlot() > Limit; --i)
190 if (MachineOperand::clobbersPhysReg(RegMaskBits[i-1], PhysReg)) {
191 // Register mask i-1 clobbers PhysReg after the LIU interference.
192 // Model the regmask clobber as a dead def.
193 BI->Last = RegMaskSlots[i-1].getDeadSlot();