1 //===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the PBQPBuilder interface, for classes which build PBQP
11 // instances to represent register allocation problems, and the RegAllocPBQP
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_REGALLOCPBQP_H
17 #define LLVM_CODEGEN_REGALLOCPBQP_H
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/PBQPRAConstraint.h"
21 #include "llvm/CodeGen/PBQP/CostAllocator.h"
22 #include "llvm/CodeGen/PBQP/ReductionRules.h"
23 #include "llvm/Support/ErrorHandling.h"
29 /// @brief Spill option index.
30 inline unsigned getSpillOptionIdx() { return 0; }
32 /// \brief Metadata to speed allocatability test.
34 /// Keeps track of the number of infinities in each row and column.
35 class MatrixMetadata {
37 MatrixMetadata(const MatrixMetadata&);
38 void operator=(const MatrixMetadata&);
40 MatrixMetadata(const Matrix& M)
41 : WorstRow(0), WorstCol(0),
42 UnsafeRows(new bool[M.getRows() - 1]()),
43 UnsafeCols(new bool[M.getCols() - 1]()) {
45 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
47 for (unsigned i = 1; i < M.getRows(); ++i) {
48 unsigned RowCount = 0;
49 for (unsigned j = 1; j < M.getCols(); ++j) {
50 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
53 UnsafeRows[i - 1] = true;
54 UnsafeCols[j - 1] = true;
57 WorstRow = std::max(WorstRow, RowCount);
59 unsigned WorstColCountForCurRow =
60 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
61 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
70 unsigned getWorstRow() const { return WorstRow; }
71 unsigned getWorstCol() const { return WorstCol; }
72 const bool* getUnsafeRows() const { return UnsafeRows; }
73 const bool* getUnsafeCols() const { return UnsafeCols; }
76 unsigned WorstRow, WorstCol;
83 typedef std::vector<unsigned> OptionToRegMap;
85 typedef enum { Unprocessed,
87 ConservativelyAllocatable,
88 NotProvablyAllocatable } ReductionState;
90 NodeMetadata() : RS(Unprocessed), DeniedOpts(0), OptUnsafeEdges(nullptr){}
91 ~NodeMetadata() { delete[] OptUnsafeEdges; }
93 void setVReg(unsigned VReg) { this->VReg = VReg; }
94 unsigned getVReg() const { return VReg; }
96 void setOptionRegs(OptionToRegMap OptionRegs) {
97 this->OptionRegs = std::move(OptionRegs);
99 const OptionToRegMap& getOptionRegs() const { return OptionRegs; }
101 void setup(const Vector& Costs) {
102 NumOpts = Costs.getLength() - 1;
103 OptUnsafeEdges = new unsigned[NumOpts]();
106 ReductionState getReductionState() const { return RS; }
107 void setReductionState(ReductionState RS) { this->RS = RS; }
109 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
110 DeniedOpts += Transpose ? MD.getWorstCol() : MD.getWorstRow();
111 const bool* UnsafeOpts =
112 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
113 for (unsigned i = 0; i < NumOpts; ++i)
114 OptUnsafeEdges[i] += UnsafeOpts[i];
117 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
118 DeniedOpts -= Transpose ? MD.getWorstCol() : MD.getWorstRow();
119 const bool* UnsafeOpts =
120 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
121 for (unsigned i = 0; i < NumOpts; ++i)
122 OptUnsafeEdges[i] -= UnsafeOpts[i];
125 bool isConservativelyAllocatable() const {
126 return (DeniedOpts < NumOpts) ||
127 (std::find(OptUnsafeEdges, OptUnsafeEdges + NumOpts, 0) !=
128 OptUnsafeEdges + NumOpts);
135 unsigned* OptUnsafeEdges;
137 OptionToRegMap OptionRegs;
140 class RegAllocSolverImpl {
142 typedef MDMatrix<MatrixMetadata> RAMatrix;
144 typedef PBQP::Vector RawVector;
145 typedef PBQP::Matrix RawMatrix;
146 typedef PBQP::Vector Vector;
147 typedef RAMatrix Matrix;
148 typedef PBQP::PoolCostAllocator<
149 Vector, PBQP::VectorComparator,
150 Matrix, PBQP::MatrixComparator> CostAllocator;
152 typedef GraphBase::NodeId NodeId;
153 typedef GraphBase::EdgeId EdgeId;
155 typedef RegAlloc::NodeMetadata NodeMetadata;
157 struct EdgeMetadata { };
159 class GraphMetadata {
161 GraphMetadata(MachineFunction &MF,
163 MachineBlockFrequencyInfo &MBFI)
164 : MF(MF), LIS(LIS), MBFI(MBFI) {}
168 MachineBlockFrequencyInfo &MBFI;
170 void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
171 VRegToNodeId[VReg] = NId;
174 GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
175 auto VRegItr = VRegToNodeId.find(VReg);
176 if (VRegItr == VRegToNodeId.end())
177 return GraphBase::invalidNodeId();
178 return VRegItr->second;
181 void eraseNodeIdForVReg(unsigned VReg) {
182 VRegToNodeId.erase(VReg);
186 DenseMap<unsigned, NodeId> VRegToNodeId;
189 typedef PBQP::Graph<RegAllocSolverImpl> Graph;
191 RegAllocSolverImpl(Graph &G) : G(G) {}
197 S = backpropagate(G, reduce());
202 void handleAddNode(NodeId NId) {
203 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
205 void handleRemoveNode(NodeId NId) {}
206 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
208 void handleAddEdge(EdgeId EId) {
209 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
210 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
213 void handleRemoveEdge(EdgeId EId) {
214 handleDisconnectEdge(EId, G.getEdgeNode1Id(EId));
215 handleDisconnectEdge(EId, G.getEdgeNode2Id(EId));
218 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
219 NodeMetadata& NMd = G.getNodeMetadata(NId);
220 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
221 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
222 if (G.getNodeDegree(NId) == 3) {
223 // This node is becoming optimally reducible.
224 moveToOptimallyReducibleNodes(NId);
225 } else if (NMd.getReductionState() ==
226 NodeMetadata::NotProvablyAllocatable &&
227 NMd.isConservativelyAllocatable()) {
228 // This node just became conservatively allocatable.
229 moveToConservativelyAllocatableNodes(NId);
233 void handleReconnectEdge(EdgeId EId, NodeId NId) {
234 NodeMetadata& NMd = G.getNodeMetadata(NId);
235 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
236 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
239 void handleSetEdgeCosts(EdgeId EId, const Matrix& NewCosts) {
240 handleRemoveEdge(EId);
242 NodeId N1Id = G.getEdgeNode1Id(EId);
243 NodeId N2Id = G.getEdgeNode2Id(EId);
244 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
245 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
246 const MatrixMetadata& MMd = NewCosts.getMetadata();
247 N1Md.handleAddEdge(MMd, N1Id != G.getEdgeNode1Id(EId));
248 N2Md.handleAddEdge(MMd, N2Id != G.getEdgeNode1Id(EId));
253 void removeFromCurrentSet(NodeId NId) {
254 switch (G.getNodeMetadata(NId).getReductionState()) {
255 case NodeMetadata::Unprocessed: break;
256 case NodeMetadata::OptimallyReducible:
257 assert(OptimallyReducibleNodes.find(NId) !=
258 OptimallyReducibleNodes.end() &&
259 "Node not in optimally reducible set.");
260 OptimallyReducibleNodes.erase(NId);
262 case NodeMetadata::ConservativelyAllocatable:
263 assert(ConservativelyAllocatableNodes.find(NId) !=
264 ConservativelyAllocatableNodes.end() &&
265 "Node not in conservatively allocatable set.");
266 ConservativelyAllocatableNodes.erase(NId);
268 case NodeMetadata::NotProvablyAllocatable:
269 assert(NotProvablyAllocatableNodes.find(NId) !=
270 NotProvablyAllocatableNodes.end() &&
271 "Node not in not-provably-allocatable set.");
272 NotProvablyAllocatableNodes.erase(NId);
277 void moveToOptimallyReducibleNodes(NodeId NId) {
278 removeFromCurrentSet(NId);
279 OptimallyReducibleNodes.insert(NId);
280 G.getNodeMetadata(NId).setReductionState(
281 NodeMetadata::OptimallyReducible);
284 void moveToConservativelyAllocatableNodes(NodeId NId) {
285 removeFromCurrentSet(NId);
286 ConservativelyAllocatableNodes.insert(NId);
287 G.getNodeMetadata(NId).setReductionState(
288 NodeMetadata::ConservativelyAllocatable);
291 void moveToNotProvablyAllocatableNodes(NodeId NId) {
292 removeFromCurrentSet(NId);
293 NotProvablyAllocatableNodes.insert(NId);
294 G.getNodeMetadata(NId).setReductionState(
295 NodeMetadata::NotProvablyAllocatable);
300 for (auto NId : G.nodeIds()) {
301 if (G.getNodeDegree(NId) < 3)
302 moveToOptimallyReducibleNodes(NId);
303 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
304 moveToConservativelyAllocatableNodes(NId);
306 moveToNotProvablyAllocatableNodes(NId);
310 // Compute a reduction order for the graph by iteratively applying PBQP
311 // reduction rules. Locally optimal rules are applied whenever possible (R0,
312 // R1, R2). If no locally-optimal rules apply then any conservatively
313 // allocatable node is reduced. Finally, if no conservatively allocatable
314 // node exists then the node with the lowest spill-cost:degree ratio is
316 std::vector<GraphBase::NodeId> reduce() {
317 assert(!G.empty() && "Cannot reduce empty graph.");
319 typedef GraphBase::NodeId NodeId;
320 std::vector<NodeId> NodeStack;
322 // Consume worklists.
324 if (!OptimallyReducibleNodes.empty()) {
325 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
327 OptimallyReducibleNodes.erase(NItr);
328 NodeStack.push_back(NId);
329 switch (G.getNodeDegree(NId)) {
338 default: llvm_unreachable("Not an optimally reducible node.");
340 } else if (!ConservativelyAllocatableNodes.empty()) {
341 // Conservatively allocatable nodes will never spill. For now just
342 // take the first node in the set and push it on the stack. When we
343 // start optimizing more heavily for register preferencing, it may
344 // would be better to push nodes with lower 'expected' or worst-case
345 // register costs first (since early nodes are the most
347 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
349 ConservativelyAllocatableNodes.erase(NItr);
350 NodeStack.push_back(NId);
351 G.disconnectAllNeighborsFromNode(NId);
353 } else if (!NotProvablyAllocatableNodes.empty()) {
354 NodeSet::iterator NItr =
355 std::min_element(NotProvablyAllocatableNodes.begin(),
356 NotProvablyAllocatableNodes.end(),
357 SpillCostComparator(G));
359 NotProvablyAllocatableNodes.erase(NItr);
360 NodeStack.push_back(NId);
361 G.disconnectAllNeighborsFromNode(NId);
369 class SpillCostComparator {
371 SpillCostComparator(const Graph& G) : G(G) {}
372 bool operator()(NodeId N1Id, NodeId N2Id) {
373 PBQPNum N1SC = G.getNodeCosts(N1Id)[0] / G.getNodeDegree(N1Id);
374 PBQPNum N2SC = G.getNodeCosts(N2Id)[0] / G.getNodeDegree(N2Id);
382 typedef std::set<NodeId> NodeSet;
383 NodeSet OptimallyReducibleNodes;
384 NodeSet ConservativelyAllocatableNodes;
385 NodeSet NotProvablyAllocatableNodes;
388 class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
390 typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
392 PBQPRAGraph(GraphMetadata Metadata) : BaseT(Metadata) {}
395 inline Solution solve(PBQPRAGraph& G) {
398 RegAllocSolverImpl RegAllocSolver(G);
399 return RegAllocSolver.solve();
402 } // namespace RegAlloc
405 /// @brief Create a PBQP register allocator instance.
407 createPBQPRegisterAllocator(char *customPassID = nullptr);
411 #endif /* LLVM_CODEGEN_REGALLOCPBQP_H */