1 //===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the PBQPBuilder interface, for classes which build PBQP
11 // instances to represent register allocation problems, and the RegAllocPBQP
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_REGALLOCPBQP_H
17 #define LLVM_CODEGEN_REGALLOCPBQP_H
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/PBQPRAConstraint.h"
21 #include "llvm/CodeGen/PBQP/CostAllocator.h"
22 #include "llvm/CodeGen/PBQP/ReductionRules.h"
23 #include "llvm/Support/ErrorHandling.h"
29 /// @brief Spill option index.
30 inline unsigned getSpillOptionIdx() { return 0; }
32 /// \brief Metadata to speed allocatability test.
34 /// Keeps track of the number of infinities in each row and column.
35 class MatrixMetadata {
37 MatrixMetadata(const MatrixMetadata&);
38 void operator=(const MatrixMetadata&);
40 MatrixMetadata(const Matrix& M)
41 : WorstRow(0), WorstCol(0),
42 UnsafeRows(new bool[M.getRows() - 1]()),
43 UnsafeCols(new bool[M.getCols() - 1]()) {
45 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
47 for (unsigned i = 1; i < M.getRows(); ++i) {
48 unsigned RowCount = 0;
49 for (unsigned j = 1; j < M.getCols(); ++j) {
50 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
53 UnsafeRows[i - 1] = true;
54 UnsafeCols[j - 1] = true;
57 WorstRow = std::max(WorstRow, RowCount);
59 unsigned WorstColCountForCurRow =
60 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
61 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
65 unsigned getWorstRow() const { return WorstRow; }
66 unsigned getWorstCol() const { return WorstCol; }
67 const bool* getUnsafeRows() const { return UnsafeRows.get(); }
68 const bool* getUnsafeCols() const { return UnsafeCols.get(); }
71 unsigned WorstRow, WorstCol;
72 std::unique_ptr<bool[]> UnsafeRows;
73 std::unique_ptr<bool[]> UnsafeCols;
76 /// \brief Holds a vector of the allowed physical regs for a vreg.
77 class AllowedRegVector {
78 friend hash_code hash_value(const AllowedRegVector &);
81 AllowedRegVector() : NumOpts(0), Opts(nullptr) {}
83 AllowedRegVector(const std::vector<unsigned> &OptVec)
84 : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
85 std::copy(OptVec.begin(), OptVec.end(), Opts.get());
88 AllowedRegVector(const AllowedRegVector &Other)
89 : NumOpts(Other.NumOpts), Opts(new unsigned[NumOpts]) {
90 std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
93 AllowedRegVector(AllowedRegVector &&Other)
94 : NumOpts(std::move(Other.NumOpts)), Opts(std::move(Other.Opts)) {}
96 AllowedRegVector& operator=(const AllowedRegVector &Other) {
97 NumOpts = Other.NumOpts;
98 Opts.reset(new unsigned[NumOpts]);
99 std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
103 AllowedRegVector& operator=(AllowedRegVector &&Other) {
104 NumOpts = std::move(Other.NumOpts);
105 Opts = std::move(Other.Opts);
109 unsigned size() const { return NumOpts; }
110 unsigned operator[](size_t I) const { return Opts[I]; }
112 bool operator==(const AllowedRegVector &Other) const {
113 if (NumOpts != Other.NumOpts)
115 return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
118 bool operator!=(const AllowedRegVector &Other) const {
119 return !(*this == Other);
124 std::unique_ptr<unsigned[]> Opts;
127 inline hash_code hash_value(const AllowedRegVector &OptRegs) {
128 unsigned *OStart = OptRegs.Opts.get();
129 unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
130 return hash_combine(OptRegs.NumOpts,
131 hash_combine_range(OStart, OEnd));
134 /// \brief Holds graph-level metadata relevent to PBQP RA problems.
135 class GraphMetadata {
137 typedef ValuePool<AllowedRegVector> AllowedRegVecPool;
140 typedef AllowedRegVecPool::PoolRef AllowedRegVecRef;
142 GraphMetadata(MachineFunction &MF,
144 MachineBlockFrequencyInfo &MBFI)
145 : MF(MF), LIS(LIS), MBFI(MBFI) {}
149 MachineBlockFrequencyInfo &MBFI;
151 void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
152 VRegToNodeId[VReg] = NId;
155 GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
156 auto VRegItr = VRegToNodeId.find(VReg);
157 if (VRegItr == VRegToNodeId.end())
158 return GraphBase::invalidNodeId();
159 return VRegItr->second;
162 void eraseNodeIdForVReg(unsigned VReg) {
163 VRegToNodeId.erase(VReg);
166 AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
167 return AllowedRegVecs.getValue(std::move(Allowed));
171 DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
172 AllowedRegVecPool AllowedRegVecs;
175 /// \brief Holds solver state and other metadata relevant to each PBQP RA node.
178 typedef RegAlloc::AllowedRegVector AllowedRegVector;
180 typedef enum { Unprocessed,
182 ConservativelyAllocatable,
183 NotProvablyAllocatable } ReductionState;
186 : RS(Unprocessed), NumOpts(0), DeniedOpts(0), OptUnsafeEdges(nullptr),
189 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
190 // MSVC synthesizes move constructors properly.
191 NodeMetadata(const NodeMetadata &Other)
192 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
193 OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
194 AllowedRegs(Other.AllowedRegs) {
195 std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
199 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
200 // MSVC synthesizes move constructors properly.
201 NodeMetadata(NodeMetadata &&Other)
202 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
203 OptUnsafeEdges(std::move(Other.OptUnsafeEdges)), VReg(Other.VReg),
204 AllowedRegs(std::move(Other.AllowedRegs)) {}
206 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
207 // MSVC synthesizes move constructors properly.
208 NodeMetadata& operator=(const NodeMetadata &Other) {
210 NumOpts = Other.NumOpts;
211 DeniedOpts = Other.DeniedOpts;
212 OptUnsafeEdges.reset(new unsigned[NumOpts]);
213 std::copy(Other.OptUnsafeEdges.get(), Other.OptUnsafeEdges.get() + NumOpts,
214 OptUnsafeEdges.get());
216 AllowedRegs = Other.AllowedRegs;
220 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
221 // MSVC synthesizes move constructors properly.
222 NodeMetadata& operator=(NodeMetadata &&Other) {
224 NumOpts = Other.NumOpts;
225 DeniedOpts = Other.DeniedOpts;
226 OptUnsafeEdges = std::move(Other.OptUnsafeEdges);
228 AllowedRegs = std::move(Other.AllowedRegs);
232 void setVReg(unsigned VReg) { this->VReg = VReg; }
233 unsigned getVReg() const { return VReg; }
235 void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
236 this->AllowedRegs = std::move(AllowedRegs);
238 const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
240 void setup(const Vector& Costs) {
241 NumOpts = Costs.getLength() - 1;
242 OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
245 ReductionState getReductionState() const { return RS; }
246 void setReductionState(ReductionState RS) { this->RS = RS; }
248 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
249 DeniedOpts += Transpose ? MD.getWorstCol() : MD.getWorstRow();
250 const bool* UnsafeOpts =
251 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
252 for (unsigned i = 0; i < NumOpts; ++i)
253 OptUnsafeEdges[i] += UnsafeOpts[i];
256 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
257 DeniedOpts -= Transpose ? MD.getWorstCol() : MD.getWorstRow();
258 const bool* UnsafeOpts =
259 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
260 for (unsigned i = 0; i < NumOpts; ++i)
261 OptUnsafeEdges[i] -= UnsafeOpts[i];
264 bool isConservativelyAllocatable() const {
265 return (DeniedOpts < NumOpts) ||
266 (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
267 &OptUnsafeEdges[NumOpts]);
274 std::unique_ptr<unsigned[]> OptUnsafeEdges;
276 GraphMetadata::AllowedRegVecRef AllowedRegs;
279 class RegAllocSolverImpl {
281 typedef MDMatrix<MatrixMetadata> RAMatrix;
283 typedef PBQP::Vector RawVector;
284 typedef PBQP::Matrix RawMatrix;
285 typedef PBQP::Vector Vector;
286 typedef RAMatrix Matrix;
287 typedef PBQP::PoolCostAllocator<Vector, Matrix> CostAllocator;
289 typedef GraphBase::NodeId NodeId;
290 typedef GraphBase::EdgeId EdgeId;
292 typedef RegAlloc::NodeMetadata NodeMetadata;
293 struct EdgeMetadata { };
294 typedef RegAlloc::GraphMetadata GraphMetadata;
296 typedef PBQP::Graph<RegAllocSolverImpl> Graph;
298 RegAllocSolverImpl(Graph &G) : G(G) {}
304 S = backpropagate(G, reduce());
309 void handleAddNode(NodeId NId) {
310 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
312 void handleRemoveNode(NodeId NId) {}
313 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
315 void handleAddEdge(EdgeId EId) {
316 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
317 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
320 void handleRemoveEdge(EdgeId EId) {
321 handleDisconnectEdge(EId, G.getEdgeNode1Id(EId));
322 handleDisconnectEdge(EId, G.getEdgeNode2Id(EId));
325 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
326 NodeMetadata& NMd = G.getNodeMetadata(NId);
327 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
328 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
329 if (G.getNodeDegree(NId) == 3) {
330 // This node is becoming optimally reducible.
331 moveToOptimallyReducibleNodes(NId);
332 } else if (NMd.getReductionState() ==
333 NodeMetadata::NotProvablyAllocatable &&
334 NMd.isConservativelyAllocatable()) {
335 // This node just became conservatively allocatable.
336 moveToConservativelyAllocatableNodes(NId);
340 void handleReconnectEdge(EdgeId EId, NodeId NId) {
341 NodeMetadata& NMd = G.getNodeMetadata(NId);
342 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
343 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
346 void handleSetEdgeCosts(EdgeId EId, const Matrix& NewCosts) {
347 handleRemoveEdge(EId);
349 NodeId N1Id = G.getEdgeNode1Id(EId);
350 NodeId N2Id = G.getEdgeNode2Id(EId);
351 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
352 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
353 const MatrixMetadata& MMd = NewCosts.getMetadata();
354 N1Md.handleAddEdge(MMd, N1Id != G.getEdgeNode1Id(EId));
355 N2Md.handleAddEdge(MMd, N2Id != G.getEdgeNode1Id(EId));
360 void removeFromCurrentSet(NodeId NId) {
361 switch (G.getNodeMetadata(NId).getReductionState()) {
362 case NodeMetadata::Unprocessed: break;
363 case NodeMetadata::OptimallyReducible:
364 assert(OptimallyReducibleNodes.find(NId) !=
365 OptimallyReducibleNodes.end() &&
366 "Node not in optimally reducible set.");
367 OptimallyReducibleNodes.erase(NId);
369 case NodeMetadata::ConservativelyAllocatable:
370 assert(ConservativelyAllocatableNodes.find(NId) !=
371 ConservativelyAllocatableNodes.end() &&
372 "Node not in conservatively allocatable set.");
373 ConservativelyAllocatableNodes.erase(NId);
375 case NodeMetadata::NotProvablyAllocatable:
376 assert(NotProvablyAllocatableNodes.find(NId) !=
377 NotProvablyAllocatableNodes.end() &&
378 "Node not in not-provably-allocatable set.");
379 NotProvablyAllocatableNodes.erase(NId);
384 void moveToOptimallyReducibleNodes(NodeId NId) {
385 removeFromCurrentSet(NId);
386 OptimallyReducibleNodes.insert(NId);
387 G.getNodeMetadata(NId).setReductionState(
388 NodeMetadata::OptimallyReducible);
391 void moveToConservativelyAllocatableNodes(NodeId NId) {
392 removeFromCurrentSet(NId);
393 ConservativelyAllocatableNodes.insert(NId);
394 G.getNodeMetadata(NId).setReductionState(
395 NodeMetadata::ConservativelyAllocatable);
398 void moveToNotProvablyAllocatableNodes(NodeId NId) {
399 removeFromCurrentSet(NId);
400 NotProvablyAllocatableNodes.insert(NId);
401 G.getNodeMetadata(NId).setReductionState(
402 NodeMetadata::NotProvablyAllocatable);
407 for (auto NId : G.nodeIds()) {
408 if (G.getNodeDegree(NId) < 3)
409 moveToOptimallyReducibleNodes(NId);
410 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
411 moveToConservativelyAllocatableNodes(NId);
413 moveToNotProvablyAllocatableNodes(NId);
417 // Compute a reduction order for the graph by iteratively applying PBQP
418 // reduction rules. Locally optimal rules are applied whenever possible (R0,
419 // R1, R2). If no locally-optimal rules apply then any conservatively
420 // allocatable node is reduced. Finally, if no conservatively allocatable
421 // node exists then the node with the lowest spill-cost:degree ratio is
423 std::vector<GraphBase::NodeId> reduce() {
424 assert(!G.empty() && "Cannot reduce empty graph.");
426 typedef GraphBase::NodeId NodeId;
427 std::vector<NodeId> NodeStack;
429 // Consume worklists.
431 if (!OptimallyReducibleNodes.empty()) {
432 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
434 OptimallyReducibleNodes.erase(NItr);
435 NodeStack.push_back(NId);
436 switch (G.getNodeDegree(NId)) {
445 default: llvm_unreachable("Not an optimally reducible node.");
447 } else if (!ConservativelyAllocatableNodes.empty()) {
448 // Conservatively allocatable nodes will never spill. For now just
449 // take the first node in the set and push it on the stack. When we
450 // start optimizing more heavily for register preferencing, it may
451 // would be better to push nodes with lower 'expected' or worst-case
452 // register costs first (since early nodes are the most
454 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
456 ConservativelyAllocatableNodes.erase(NItr);
457 NodeStack.push_back(NId);
458 G.disconnectAllNeighborsFromNode(NId);
460 } else if (!NotProvablyAllocatableNodes.empty()) {
461 NodeSet::iterator NItr =
462 std::min_element(NotProvablyAllocatableNodes.begin(),
463 NotProvablyAllocatableNodes.end(),
464 SpillCostComparator(G));
466 NotProvablyAllocatableNodes.erase(NItr);
467 NodeStack.push_back(NId);
468 G.disconnectAllNeighborsFromNode(NId);
476 class SpillCostComparator {
478 SpillCostComparator(const Graph& G) : G(G) {}
479 bool operator()(NodeId N1Id, NodeId N2Id) {
480 PBQPNum N1SC = G.getNodeCosts(N1Id)[0] / G.getNodeDegree(N1Id);
481 PBQPNum N2SC = G.getNodeCosts(N2Id)[0] / G.getNodeDegree(N2Id);
489 typedef std::set<NodeId> NodeSet;
490 NodeSet OptimallyReducibleNodes;
491 NodeSet ConservativelyAllocatableNodes;
492 NodeSet NotProvablyAllocatableNodes;
495 class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
497 typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
499 PBQPRAGraph(GraphMetadata Metadata) : BaseT(Metadata) {}
502 inline Solution solve(PBQPRAGraph& G) {
505 RegAllocSolverImpl RegAllocSolver(G);
506 return RegAllocSolver.solve();
509 } // namespace RegAlloc
512 /// @brief Create a PBQP register allocator instance.
514 createPBQPRegisterAllocator(char *customPassID = nullptr);
518 #endif /* LLVM_CODEGEN_REGALLOCPBQP_H */