1 //===-- SpillPlacement.cpp - Optimal Spill Code Placement -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the spill code placement analysis.
12 // Each edge bundle corresponds to a node in a Hopfield network. Constraints on
13 // basic blocks are weighted by the block frequency and added to become the node
16 // Transparent basic blocks have the variable live through, but don't care if it
17 // is spilled or in a register. These blocks become connections in the Hopfield
18 // network, again weighted by block frequency.
20 // The Hopfield network minimizes (possibly locally) its energy function:
22 // E = -sum_n V_n * ( B_n + sum_{n, m linked by b} V_m * F_b )
24 // The energy function represents the expected spill code execution frequency,
25 // or the cost of spilling. This is a Lyapunov function which never increases
26 // when a node is updated. It is guaranteed to converge to a local minimum.
28 //===----------------------------------------------------------------------===//
30 #define DEBUG_TYPE "spillplacement"
31 #include "SpillPlacement.h"
32 #include "llvm/ADT/BitVector.h"
33 #include "llvm/CodeGen/EdgeBundles.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineLoopInfo.h"
38 #include "llvm/CodeGen/Passes.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/Format.h"
44 char SpillPlacement::ID = 0;
45 INITIALIZE_PASS_BEGIN(SpillPlacement, "spill-code-placement",
46 "Spill Code Placement Analysis", true, true)
47 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
48 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
49 INITIALIZE_PASS_END(SpillPlacement, "spill-code-placement",
50 "Spill Code Placement Analysis", true, true)
52 char &llvm::SpillPlacementID = SpillPlacement::ID;
54 void SpillPlacement::getAnalysisUsage(AnalysisUsage &AU) const {
56 AU.addRequired<MachineBlockFrequencyInfo>();
57 AU.addRequiredTransitive<EdgeBundles>();
58 AU.addRequiredTransitive<MachineLoopInfo>();
59 MachineFunctionPass::getAnalysisUsage(AU);
62 /// Decision threshold. A node gets the output value 0 if the weighted sum of
63 /// its inputs falls in the open interval (-Threshold;Threshold).
64 static const BlockFrequency Threshold = 2;
66 /// Node - Each edge bundle corresponds to a Hopfield node.
68 /// The node contains precomputed frequency data that only depends on the CFG,
69 /// but Bias and Links are computed each time placeSpills is called.
71 /// The node Value is positive when the variable should be in a register. The
72 /// value can change when linked nodes change, but convergence is very fast
73 /// because all weights are positive.
75 struct SpillPlacement::Node {
76 /// BiasN - Sum of blocks that prefer a spill.
78 /// BiasP - Sum of blocks that prefer a register.
81 /// Value - Output value of this node computed from the Bias and links.
82 /// This is always on of the values {-1, 0, 1}. A positive number means the
83 /// variable should go in a register through this bundle.
86 typedef SmallVector<std::pair<BlockFrequency, unsigned>, 4> LinkVector;
88 /// Links - (Weight, BundleNo) for all transparent blocks connecting to other
89 /// bundles. The weights are all positive block frequencies.
92 /// SumLinkWeights - Cached sum of the weights of all links + ThresHold.
93 BlockFrequency SumLinkWeights;
95 /// preferReg - Return true when this node prefers to be in a register.
96 bool preferReg() const {
97 // Undecided nodes (Value==0) go on the stack.
101 /// mustSpill - Return True if this node is so biased that it must spill.
102 bool mustSpill() const {
103 // We must spill if Bias < -sum(weights) or the MustSpill flag was set.
104 // BiasN is saturated when MustSpill is set, make sure this still returns
105 // true when the RHS saturates. Note that SumLinkWeights includes Threshold.
106 return BiasN >= BiasP + SumLinkWeights;
109 /// clear - Reset per-query data, but preserve frequencies that only depend on
112 BiasN = BiasP = Value = 0;
113 SumLinkWeights = Threshold;
117 /// addLink - Add a link to bundle b with weight w.
118 void addLink(unsigned b, BlockFrequency w) {
119 // Update cached sum.
122 // There can be multiple links to the same bundle, add them up.
123 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I)
124 if (I->second == b) {
128 // This must be the first link to b.
129 Links.push_back(std::make_pair(w, b));
132 /// addBias - Bias this node.
133 void addBias(BlockFrequency freq, BorderConstraint direction) {
144 BiasN = BlockFrequency::getMaxFrequency();
149 /// update - Recompute Value from Bias and Links. Return true when node
150 /// preference changes.
151 bool update(const Node nodes[]) {
152 // Compute the weighted sum of inputs.
153 BlockFrequency SumN = BiasN;
154 BlockFrequency SumP = BiasP;
155 for (LinkVector::iterator I = Links.begin(), E = Links.end(); I != E; ++I) {
156 if (nodes[I->second].Value == -1)
158 else if (nodes[I->second].Value == 1)
162 // Each weighted sum is going to be less than the total frequency of the
163 // bundle. Ideally, we should simply set Value = sign(SumP - SumN), but we
164 // will add a dead zone around 0 for two reasons:
166 // 1. It avoids arbitrary bias when all links are 0 as is possible during
167 // initial iterations.
168 // 2. It helps tame rounding errors when the links nominally sum to 0.
170 bool Before = preferReg();
171 if (SumN >= SumP + Threshold)
173 else if (SumP >= SumN + Threshold)
177 return Before != preferReg();
181 bool SpillPlacement::runOnMachineFunction(MachineFunction &mf) {
183 bundles = &getAnalysis<EdgeBundles>();
184 loops = &getAnalysis<MachineLoopInfo>();
186 assert(!nodes && "Leaking node array");
187 nodes = new Node[bundles->getNumBundles()];
189 // Compute total ingoing and outgoing block frequencies for all bundles.
190 BlockFrequencies.resize(mf.getNumBlockIDs());
191 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
192 for (MachineFunction::iterator I = mf.begin(), E = mf.end(); I != E; ++I) {
193 unsigned Num = I->getNumber();
194 BlockFrequencies[Num] = MBFI->getBlockFreq(I);
197 // We never change the function.
201 void SpillPlacement::releaseMemory() {
206 /// activate - mark node n as active if it wasn't already.
207 void SpillPlacement::activate(unsigned n) {
208 if (ActiveNodes->test(n))
213 // Very large bundles usually come from big switches, indirect branches,
214 // landing pads, or loops with many 'continue' statements. It is difficult to
215 // allocate registers when so many different blocks are involved.
217 // Give a small negative bias to large bundles such that a substantial
218 // fraction of the connected blocks need to be interested before we consider
219 // expanding the region through the bundle. This helps compile time by
220 // limiting the number of blocks visited and the number of links in the
222 if (bundles->getBlocks(n).size() > 100) {
224 nodes[n].BiasN = (MBFI->getEntryFreq() / 16);
229 /// addConstraints - Compute node biases and weights from a set of constraints.
230 /// Set a bit in NodeMask for each active node.
231 void SpillPlacement::addConstraints(ArrayRef<BlockConstraint> LiveBlocks) {
232 for (ArrayRef<BlockConstraint>::iterator I = LiveBlocks.begin(),
233 E = LiveBlocks.end(); I != E; ++I) {
234 BlockFrequency Freq = BlockFrequencies[I->Number];
237 if (I->Entry != DontCare) {
238 unsigned ib = bundles->getBundle(I->Number, 0);
240 nodes[ib].addBias(Freq, I->Entry);
243 // Live-out from block?
244 if (I->Exit != DontCare) {
245 unsigned ob = bundles->getBundle(I->Number, 1);
247 nodes[ob].addBias(Freq, I->Exit);
252 /// addPrefSpill - Same as addConstraints(PrefSpill)
253 void SpillPlacement::addPrefSpill(ArrayRef<unsigned> Blocks, bool Strong) {
254 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
256 BlockFrequency Freq = BlockFrequencies[*I];
259 unsigned ib = bundles->getBundle(*I, 0);
260 unsigned ob = bundles->getBundle(*I, 1);
263 nodes[ib].addBias(Freq, PrefSpill);
264 nodes[ob].addBias(Freq, PrefSpill);
268 void SpillPlacement::addLinks(ArrayRef<unsigned> Links) {
269 for (ArrayRef<unsigned>::iterator I = Links.begin(), E = Links.end(); I != E;
271 unsigned Number = *I;
272 unsigned ib = bundles->getBundle(Number, 0);
273 unsigned ob = bundles->getBundle(Number, 1);
275 // Ignore self-loops.
280 if (nodes[ib].Links.empty() && !nodes[ib].mustSpill())
281 Linked.push_back(ib);
282 if (nodes[ob].Links.empty() && !nodes[ob].mustSpill())
283 Linked.push_back(ob);
284 BlockFrequency Freq = BlockFrequencies[Number];
285 nodes[ib].addLink(ob, Freq);
286 nodes[ob].addLink(ib, Freq);
290 bool SpillPlacement::scanActiveBundles() {
292 RecentPositive.clear();
293 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
294 nodes[n].update(nodes);
295 // A node that must spill, or a node without any links is not going to
296 // change its value ever again, so exclude it from iterations.
297 if (nodes[n].mustSpill())
299 if (!nodes[n].Links.empty())
301 if (nodes[n].preferReg())
302 RecentPositive.push_back(n);
304 return !RecentPositive.empty();
307 /// iterate - Repeatedly update the Hopfield nodes until stability or the
308 /// maximum number of iterations is reached.
309 /// @param Linked - Numbers of linked nodes that need updating.
310 void SpillPlacement::iterate() {
311 // First update the recently positive nodes. They have likely received new
312 // negative bias that will turn them off.
313 while (!RecentPositive.empty())
314 nodes[RecentPositive.pop_back_val()].update(nodes);
319 // Run up to 10 iterations. The edge bundle numbering is closely related to
320 // basic block numbering, so there is a strong tendency towards chains of
321 // linked nodes with sequential numbers. By scanning the linked nodes
322 // backwards and forwards, we make it very likely that a single node can
323 // affect the entire network in a single iteration. That means very fast
324 // convergence, usually in a single iteration.
325 for (unsigned iteration = 0; iteration != 10; ++iteration) {
326 // Scan backwards, skipping the last node which was just updated.
327 bool Changed = false;
328 for (SmallVectorImpl<unsigned>::const_reverse_iterator I =
329 llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) {
331 if (nodes[n].update(nodes)) {
333 if (nodes[n].preferReg())
334 RecentPositive.push_back(n);
337 if (!Changed || !RecentPositive.empty())
340 // Scan forwards, skipping the first node which was just updated.
342 for (SmallVectorImpl<unsigned>::const_iterator I =
343 llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
345 if (nodes[n].update(nodes)) {
347 if (nodes[n].preferReg())
348 RecentPositive.push_back(n);
351 if (!Changed || !RecentPositive.empty())
356 void SpillPlacement::prepare(BitVector &RegBundles) {
358 RecentPositive.clear();
359 // Reuse RegBundles as our ActiveNodes vector.
360 ActiveNodes = &RegBundles;
361 ActiveNodes->clear();
362 ActiveNodes->resize(bundles->getNumBundles());
366 SpillPlacement::finish() {
367 assert(ActiveNodes && "Call prepare() first");
369 // Write preferences back to ActiveNodes.
371 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n))
372 if (!nodes[n].preferReg()) {
373 ActiveNodes->reset(n);