1 //===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the PBQPBuilder interface, for classes which build PBQP
11 // instances to represent register allocation problems, and the RegAllocPBQP
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_REGALLOCPBQP_H
17 #define LLVM_CODEGEN_REGALLOCPBQP_H
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/PBQP/CostAllocator.h"
21 #include "llvm/CodeGen/PBQP/ReductionRules.h"
22 #include "llvm/CodeGen/PBQPRAConstraint.h"
23 #include "llvm/Support/ErrorHandling.h"
32 /// @brief Spill option index.
33 inline unsigned getSpillOptionIdx() { return 0; }
35 /// \brief Metadata to speed allocatability test.
37 /// Keeps track of the number of infinities in each row and column.
38 class MatrixMetadata {
40 MatrixMetadata(const MatrixMetadata&);
41 void operator=(const MatrixMetadata&);
43 MatrixMetadata(const Matrix& M)
44 : WorstRow(0), WorstCol(0),
45 UnsafeRows(new bool[M.getRows() - 1]()),
46 UnsafeCols(new bool[M.getCols() - 1]()) {
48 unsigned* ColCounts = new unsigned[M.getCols() - 1]();
50 for (unsigned i = 1; i < M.getRows(); ++i) {
51 unsigned RowCount = 0;
52 for (unsigned j = 1; j < M.getCols(); ++j) {
53 if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
56 UnsafeRows[i - 1] = true;
57 UnsafeCols[j - 1] = true;
60 WorstRow = std::max(WorstRow, RowCount);
62 unsigned WorstColCountForCurRow =
63 *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
64 WorstCol = std::max(WorstCol, WorstColCountForCurRow);
68 unsigned getWorstRow() const { return WorstRow; }
69 unsigned getWorstCol() const { return WorstCol; }
70 const bool* getUnsafeRows() const { return UnsafeRows.get(); }
71 const bool* getUnsafeCols() const { return UnsafeCols.get(); }
74 unsigned WorstRow, WorstCol;
75 std::unique_ptr<bool[]> UnsafeRows;
76 std::unique_ptr<bool[]> UnsafeCols;
79 /// \brief Holds a vector of the allowed physical regs for a vreg.
80 class AllowedRegVector {
81 friend hash_code hash_value(const AllowedRegVector &);
84 AllowedRegVector() : NumOpts(0), Opts(nullptr) {}
86 AllowedRegVector(const std::vector<unsigned> &OptVec)
87 : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
88 std::copy(OptVec.begin(), OptVec.end(), Opts.get());
91 AllowedRegVector(const AllowedRegVector &Other)
92 : NumOpts(Other.NumOpts), Opts(new unsigned[NumOpts]) {
93 std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
96 AllowedRegVector(AllowedRegVector &&Other)
97 : NumOpts(std::move(Other.NumOpts)), Opts(std::move(Other.Opts)) {}
99 AllowedRegVector& operator=(const AllowedRegVector &Other) {
100 NumOpts = Other.NumOpts;
101 Opts.reset(new unsigned[NumOpts]);
102 std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
106 AllowedRegVector& operator=(AllowedRegVector &&Other) {
107 NumOpts = std::move(Other.NumOpts);
108 Opts = std::move(Other.Opts);
112 unsigned size() const { return NumOpts; }
113 unsigned operator[](size_t I) const { return Opts[I]; }
115 bool operator==(const AllowedRegVector &Other) const {
116 if (NumOpts != Other.NumOpts)
118 return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
121 bool operator!=(const AllowedRegVector &Other) const {
122 return !(*this == Other);
127 std::unique_ptr<unsigned[]> Opts;
130 inline hash_code hash_value(const AllowedRegVector &OptRegs) {
131 unsigned *OStart = OptRegs.Opts.get();
132 unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
133 return hash_combine(OptRegs.NumOpts,
134 hash_combine_range(OStart, OEnd));
137 /// \brief Holds graph-level metadata relevant to PBQP RA problems.
138 class GraphMetadata {
140 typedef ValuePool<AllowedRegVector> AllowedRegVecPool;
143 typedef AllowedRegVecPool::PoolRef AllowedRegVecRef;
145 GraphMetadata(MachineFunction &MF,
147 MachineBlockFrequencyInfo &MBFI)
148 : MF(MF), LIS(LIS), MBFI(MBFI) {}
152 MachineBlockFrequencyInfo &MBFI;
154 void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
155 VRegToNodeId[VReg] = NId;
158 GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
159 auto VRegItr = VRegToNodeId.find(VReg);
160 if (VRegItr == VRegToNodeId.end())
161 return GraphBase::invalidNodeId();
162 return VRegItr->second;
165 void eraseNodeIdForVReg(unsigned VReg) {
166 VRegToNodeId.erase(VReg);
169 AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
170 return AllowedRegVecs.getValue(std::move(Allowed));
174 DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
175 AllowedRegVecPool AllowedRegVecs;
178 /// \brief Holds solver state and other metadata relevant to each PBQP RA node.
181 typedef RegAlloc::AllowedRegVector AllowedRegVector;
183 // The node's reduction state. The order in this enum is important,
184 // as it is assumed nodes can only progress up (i.e. towards being
185 // optimally reducible) when reducing the graph.
188 NotProvablyAllocatable,
189 ConservativelyAllocatable,
194 : RS(Unprocessed), NumOpts(0), DeniedOpts(0), OptUnsafeEdges(nullptr),
197 , everConservativelyAllocatable(false)
201 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
202 // MSVC synthesizes move constructors properly.
203 NodeMetadata(const NodeMetadata &Other)
204 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
205 OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
206 AllowedRegs(Other.AllowedRegs)
208 , everConservativelyAllocatable(Other.everConservativelyAllocatable)
212 std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
217 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
218 // MSVC synthesizes move constructors properly.
219 NodeMetadata(NodeMetadata &&Other)
220 : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
221 OptUnsafeEdges(std::move(Other.OptUnsafeEdges)), VReg(Other.VReg),
222 AllowedRegs(std::move(Other.AllowedRegs))
224 , everConservativelyAllocatable(Other.everConservativelyAllocatable)
228 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
229 // MSVC synthesizes move constructors properly.
230 NodeMetadata& operator=(const NodeMetadata &Other) {
232 NumOpts = Other.NumOpts;
233 DeniedOpts = Other.DeniedOpts;
234 OptUnsafeEdges.reset(new unsigned[NumOpts]);
235 std::copy(Other.OptUnsafeEdges.get(), Other.OptUnsafeEdges.get() + NumOpts,
236 OptUnsafeEdges.get());
238 AllowedRegs = Other.AllowedRegs;
240 everConservativelyAllocatable = Other.everConservativelyAllocatable;
245 // FIXME: Re-implementing default behavior to work around MSVC. Remove once
246 // MSVC synthesizes move constructors properly.
247 NodeMetadata& operator=(NodeMetadata &&Other) {
249 NumOpts = Other.NumOpts;
250 DeniedOpts = Other.DeniedOpts;
251 OptUnsafeEdges = std::move(Other.OptUnsafeEdges);
253 AllowedRegs = std::move(Other.AllowedRegs);
255 everConservativelyAllocatable = Other.everConservativelyAllocatable;
260 void setVReg(unsigned VReg) { this->VReg = VReg; }
261 unsigned getVReg() const { return VReg; }
263 void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
264 this->AllowedRegs = std::move(AllowedRegs);
266 const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
268 void setup(const Vector& Costs) {
269 NumOpts = Costs.getLength() - 1;
270 OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
273 ReductionState getReductionState() const { return RS; }
274 void setReductionState(ReductionState RS) {
275 assert(RS >= this->RS && "A node's reduction state can not be downgraded");
279 // Remember this state to assert later that a non-infinite register
280 // option was available.
281 if (RS == ConservativelyAllocatable)
282 everConservativelyAllocatable = true;
287 void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
288 DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
289 const bool* UnsafeOpts =
290 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
291 for (unsigned i = 0; i < NumOpts; ++i)
292 OptUnsafeEdges[i] += UnsafeOpts[i];
295 void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
296 DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
297 const bool* UnsafeOpts =
298 Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
299 for (unsigned i = 0; i < NumOpts; ++i)
300 OptUnsafeEdges[i] -= UnsafeOpts[i];
303 bool isConservativelyAllocatable() const {
304 return (DeniedOpts < NumOpts) ||
305 (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
306 &OptUnsafeEdges[NumOpts]);
310 bool wasConservativelyAllocatable() const {
311 return everConservativelyAllocatable;
319 std::unique_ptr<unsigned[]> OptUnsafeEdges;
321 GraphMetadata::AllowedRegVecRef AllowedRegs;
324 bool everConservativelyAllocatable;
328 class RegAllocSolverImpl {
330 typedef MDMatrix<MatrixMetadata> RAMatrix;
332 typedef PBQP::Vector RawVector;
333 typedef PBQP::Matrix RawMatrix;
334 typedef PBQP::Vector Vector;
335 typedef RAMatrix Matrix;
336 typedef PBQP::PoolCostAllocator<Vector, Matrix> CostAllocator;
338 typedef GraphBase::NodeId NodeId;
339 typedef GraphBase::EdgeId EdgeId;
341 typedef RegAlloc::NodeMetadata NodeMetadata;
342 struct EdgeMetadata { };
343 typedef RegAlloc::GraphMetadata GraphMetadata;
345 typedef PBQP::Graph<RegAllocSolverImpl> Graph;
347 RegAllocSolverImpl(Graph &G) : G(G) {}
353 S = backpropagate(G, reduce());
358 void handleAddNode(NodeId NId) {
359 assert(G.getNodeCosts(NId).getLength() > 1 &&
360 "PBQP Graph should not contain single or zero-option nodes");
361 G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
363 void handleRemoveNode(NodeId NId) {}
364 void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
366 void handleAddEdge(EdgeId EId) {
367 handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
368 handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
371 void handleRemoveEdge(EdgeId EId) {
372 handleDisconnectEdge(EId, G.getEdgeNode1Id(EId));
373 handleDisconnectEdge(EId, G.getEdgeNode2Id(EId));
376 void handleDisconnectEdge(EdgeId EId, NodeId NId) {
377 NodeMetadata& NMd = G.getNodeMetadata(NId);
378 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
379 NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
383 void handleReconnectEdge(EdgeId EId, NodeId NId) {
384 NodeMetadata& NMd = G.getNodeMetadata(NId);
385 const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
386 NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
389 void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
390 NodeId N1Id = G.getEdgeNode1Id(EId);
391 NodeId N2Id = G.getEdgeNode2Id(EId);
392 NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
393 NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
394 bool Transpose = N1Id != G.getEdgeNode1Id(EId);
396 // Metadata are computed incrementally. First, update them
397 // by removing the old cost.
398 const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
399 N1Md.handleRemoveEdge(OldMMd, Transpose);
400 N2Md.handleRemoveEdge(OldMMd, !Transpose);
402 // And update now the metadata with the new cost.
403 const MatrixMetadata& MMd = NewCosts.getMetadata();
404 N1Md.handleAddEdge(MMd, Transpose);
405 N2Md.handleAddEdge(MMd, !Transpose);
407 // As the metadata may have changed with the update, the nodes may have
408 // become ConservativelyAllocatable or OptimallyReducible.
415 void promote(NodeId NId, NodeMetadata& NMd) {
416 if (G.getNodeDegree(NId) == 3) {
417 // This node is becoming optimally reducible.
418 moveToOptimallyReducibleNodes(NId);
419 } else if (NMd.getReductionState() ==
420 NodeMetadata::NotProvablyAllocatable &&
421 NMd.isConservativelyAllocatable()) {
422 // This node just became conservatively allocatable.
423 moveToConservativelyAllocatableNodes(NId);
427 void removeFromCurrentSet(NodeId NId) {
428 switch (G.getNodeMetadata(NId).getReductionState()) {
429 case NodeMetadata::Unprocessed: break;
430 case NodeMetadata::OptimallyReducible:
431 assert(OptimallyReducibleNodes.find(NId) !=
432 OptimallyReducibleNodes.end() &&
433 "Node not in optimally reducible set.");
434 OptimallyReducibleNodes.erase(NId);
436 case NodeMetadata::ConservativelyAllocatable:
437 assert(ConservativelyAllocatableNodes.find(NId) !=
438 ConservativelyAllocatableNodes.end() &&
439 "Node not in conservatively allocatable set.");
440 ConservativelyAllocatableNodes.erase(NId);
442 case NodeMetadata::NotProvablyAllocatable:
443 assert(NotProvablyAllocatableNodes.find(NId) !=
444 NotProvablyAllocatableNodes.end() &&
445 "Node not in not-provably-allocatable set.");
446 NotProvablyAllocatableNodes.erase(NId);
451 void moveToOptimallyReducibleNodes(NodeId NId) {
452 removeFromCurrentSet(NId);
453 OptimallyReducibleNodes.insert(NId);
454 G.getNodeMetadata(NId).setReductionState(
455 NodeMetadata::OptimallyReducible);
458 void moveToConservativelyAllocatableNodes(NodeId NId) {
459 removeFromCurrentSet(NId);
460 ConservativelyAllocatableNodes.insert(NId);
461 G.getNodeMetadata(NId).setReductionState(
462 NodeMetadata::ConservativelyAllocatable);
465 void moveToNotProvablyAllocatableNodes(NodeId NId) {
466 removeFromCurrentSet(NId);
467 NotProvablyAllocatableNodes.insert(NId);
468 G.getNodeMetadata(NId).setReductionState(
469 NodeMetadata::NotProvablyAllocatable);
474 for (auto NId : G.nodeIds()) {
475 if (G.getNodeDegree(NId) < 3)
476 moveToOptimallyReducibleNodes(NId);
477 else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
478 moveToConservativelyAllocatableNodes(NId);
480 moveToNotProvablyAllocatableNodes(NId);
484 // Compute a reduction order for the graph by iteratively applying PBQP
485 // reduction rules. Locally optimal rules are applied whenever possible (R0,
486 // R1, R2). If no locally-optimal rules apply then any conservatively
487 // allocatable node is reduced. Finally, if no conservatively allocatable
488 // node exists then the node with the lowest spill-cost:degree ratio is
490 std::vector<GraphBase::NodeId> reduce() {
491 assert(!G.empty() && "Cannot reduce empty graph.");
493 typedef GraphBase::NodeId NodeId;
494 std::vector<NodeId> NodeStack;
496 // Consume worklists.
498 if (!OptimallyReducibleNodes.empty()) {
499 NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
501 OptimallyReducibleNodes.erase(NItr);
502 NodeStack.push_back(NId);
503 switch (G.getNodeDegree(NId)) {
512 default: llvm_unreachable("Not an optimally reducible node.");
514 } else if (!ConservativelyAllocatableNodes.empty()) {
515 // Conservatively allocatable nodes will never spill. For now just
516 // take the first node in the set and push it on the stack. When we
517 // start optimizing more heavily for register preferencing, it may
518 // would be better to push nodes with lower 'expected' or worst-case
519 // register costs first (since early nodes are the most
521 NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
523 ConservativelyAllocatableNodes.erase(NItr);
524 NodeStack.push_back(NId);
525 G.disconnectAllNeighborsFromNode(NId);
527 } else if (!NotProvablyAllocatableNodes.empty()) {
528 NodeSet::iterator NItr =
529 std::min_element(NotProvablyAllocatableNodes.begin(),
530 NotProvablyAllocatableNodes.end(),
531 SpillCostComparator(G));
533 NotProvablyAllocatableNodes.erase(NItr);
534 NodeStack.push_back(NId);
535 G.disconnectAllNeighborsFromNode(NId);
543 class SpillCostComparator {
545 SpillCostComparator(const Graph& G) : G(G) {}
546 bool operator()(NodeId N1Id, NodeId N2Id) {
547 PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
548 PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
550 return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
558 typedef std::set<NodeId> NodeSet;
559 NodeSet OptimallyReducibleNodes;
560 NodeSet ConservativelyAllocatableNodes;
561 NodeSet NotProvablyAllocatableNodes;
564 class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
566 typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
568 PBQPRAGraph(GraphMetadata Metadata) : BaseT(Metadata) {}
570 /// @brief Dump this graph to dbgs().
573 /// @brief Dump this graph to an output stream.
574 /// @param OS Output stream to print on.
575 void dump(raw_ostream &OS) const;
577 /// @brief Print a representation of this graph in DOT format.
578 /// @param OS Output stream to print on.
579 void printDot(raw_ostream &OS) const;
582 inline Solution solve(PBQPRAGraph& G) {
585 RegAllocSolverImpl RegAllocSolver(G);
586 return RegAllocSolver.solve();
589 } // namespace RegAlloc
592 /// @brief Create a PBQP register allocator instance.
594 createPBQPRegisterAllocator(char *customPassID = nullptr);
598 #endif /* LLVM_CODEGEN_REGALLOCPBQP_H */