1 //===- Reassociate.cpp - Reassociate binary expressions -------------------===//
3 // This pass reassociates commutative expressions in an order that is designed
4 // to promote better constant propagation, GCSE, LICM, PRE...
6 // For example: 4 + (x + 5) -> x + (4 + 5)
8 // Note that this pass works best if left shifts have been promoted to explicit
9 // multiplies before this pass executes.
11 // In the implementation of this algorithm, constants are assigned rank = 0,
12 // function arguments are rank = 1, and other values are assigned ranks
13 // corresponding to the reverse post order traversal of current function
14 // (starting at 2), which effectively gives values in deep loops higher rank
15 // than values not in loops.
17 // This code was originally written by Chris Lattner, and was then cleaned up
18 // and perfected by Casey Carter.
20 //===----------------------------------------------------------------------===//
22 #include "llvm/Transforms/Scalar.h"
23 #include "llvm/Function.h"
24 #include "llvm/iOperators.h"
25 #include "llvm/Type.h"
26 #include "llvm/Pass.h"
27 #include "llvm/Constant.h"
28 #include "llvm/Support/CFG.h"
29 #include "Support/Debug.h"
30 #include "Support/PostOrderIterator.h"
31 #include "Support/Statistic.h"
34 Statistic<> NumLinear ("reassociate","Number of insts linearized");
35 Statistic<> NumChanged("reassociate","Number of insts reassociated");
36 Statistic<> NumSwapped("reassociate","Number of insts with operands swapped");
38 class Reassociate : public FunctionPass {
39 std::map<BasicBlock*, unsigned> RankMap;
40 std::map<Value*, unsigned> ValueRankMap;
42 bool runOnFunction(Function &F);
44 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
48 void BuildRankMap(Function &F);
49 unsigned getRank(Value *V);
50 bool ReassociateExpr(BinaryOperator *I);
51 bool ReassociateBB(BasicBlock *BB);
54 RegisterOpt<Reassociate> X("reassociate", "Reassociate expressions");
57 Pass *createReassociatePass() { return new Reassociate(); }
59 void Reassociate::BuildRankMap(Function &F) {
62 // Assign distinct ranks to function arguments
63 for (Function::aiterator I = F.abegin(), E = F.aend(); I != E; ++I)
64 ValueRankMap[I] = ++i;
66 ReversePostOrderTraversal<Function*> RPOT(&F);
67 for (ReversePostOrderTraversal<Function*>::rpo_iterator I = RPOT.begin(),
68 E = RPOT.end(); I != E; ++I)
69 RankMap[*I] = ++i << 16;
72 unsigned Reassociate::getRank(Value *V) {
73 if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument...
75 if (Instruction *I = dyn_cast<Instruction>(V)) {
76 // If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that
77 // we can reassociate expressions for code motion! Since we do not recurse
78 // for PHI nodes, we cannot have infinite recursion here, because there
79 // cannot be loops in the value graph that do not go through PHI nodes.
81 if (I->getOpcode() == Instruction::PHINode ||
82 I->getOpcode() == Instruction::Alloca ||
83 I->getOpcode() == Instruction::Malloc || isa<TerminatorInst>(I) ||
84 I->mayWriteToMemory()) // Cannot move inst if it writes to memory!
85 return RankMap[I->getParent()];
87 unsigned &CachedRank = ValueRankMap[I];
88 if (CachedRank) return CachedRank; // Rank already known?
90 // If not, compute it!
91 unsigned Rank = 0, MaxRank = RankMap[I->getParent()];
92 for (unsigned i = 0, e = I->getNumOperands();
93 i != e && Rank != MaxRank; ++i)
94 Rank = std::max(Rank, getRank(I->getOperand(i)));
96 DEBUG(std::cerr << "Calculated Rank[" << V->getName() << "] = "
99 return CachedRank = Rank+1;
102 // Otherwise it's a global or constant, rank 0.
107 bool Reassociate::ReassociateExpr(BinaryOperator *I) {
108 Value *LHS = I->getOperand(0);
109 Value *RHS = I->getOperand(1);
110 unsigned LHSRank = getRank(LHS);
111 unsigned RHSRank = getRank(RHS);
113 bool Changed = false;
115 // Make sure the LHS of the operand always has the greater rank...
116 if (LHSRank < RHSRank) {
117 bool Success = !I->swapOperands();
118 assert(Success && "swapOperands failed");
121 std::swap(LHSRank, RHSRank);
124 DEBUG(std::cerr << "Transposed: " << I
125 /* << " Result BB: " << I->getParent()*/);
128 // If the LHS is the same operator as the current one is, and if we are the
129 // only expression using it...
131 if (BinaryOperator *LHSI = dyn_cast<BinaryOperator>(LHS))
132 if (LHSI->getOpcode() == I->getOpcode() && LHSI->use_size() == 1) {
133 // If the rank of our current RHS is less than the rank of the LHS's LHS,
134 // then we reassociate the two instructions...
137 if (BinaryOperator *IOp = dyn_cast<BinaryOperator>(LHSI->getOperand(0)))
138 if (IOp->getOpcode() == LHSI->getOpcode())
139 TakeOp = 1; // Hoist out non-tree portion
141 if (RHSRank < getRank(LHSI->getOperand(TakeOp))) {
142 // Convert ((a + 12) + 10) into (a + (12 + 10))
143 I->setOperand(0, LHSI->getOperand(TakeOp));
144 LHSI->setOperand(TakeOp, RHS);
145 I->setOperand(1, LHSI);
147 // Move the LHS expression forward, to ensure that it is dominated by
149 LHSI->getParent()->getInstList().remove(LHSI);
150 I->getParent()->getInstList().insert(I, LHSI);
153 DEBUG(std::cerr << "Reassociated: " << I/* << " Result BB: "
154 << I->getParent()*/);
156 // Since we modified the RHS instruction, make sure that we recheck it.
157 ReassociateExpr(LHSI);
167 // NegateValue - Insert instructions before the instruction pointed to by BI,
168 // that computes the negative version of the value specified. The negative
169 // version of the value is returned, and BI is left pointing at the instruction
170 // that should be processed next by the reassociation pass.
172 static Value *NegateValue(Value *V, BasicBlock::iterator &BI) {
173 // We are trying to expose opportunity for reassociation. One of the things
174 // that we want to do to achieve this is to push a negation as deep into an
175 // expression chain as possible, to expose the add instructions. In practice,
176 // this means that we turn this:
177 // X = -(A+12+C+D) into X = -A + -12 + -C + -D = -12 + -A + -C + -D
178 // so that later, a: Y = 12+X could get reassociated with the -12 to eliminate
179 // the constants. We assume that instcombine will clean up the mess later if
180 // we introduce tons of unnecessary negation instructions...
182 if (Instruction *I = dyn_cast<Instruction>(V))
183 if (I->getOpcode() == Instruction::Add && I->use_size() == 1) {
184 Value *RHS = NegateValue(I->getOperand(1), BI);
185 Value *LHS = NegateValue(I->getOperand(0), BI);
187 // We must actually insert a new add instruction here, because the neg
188 // instructions do not dominate the old add instruction in general. By
189 // adding it now, we are assured that the neg instructions we just
190 // inserted dominate the instruction we are about to insert after them.
192 return BinaryOperator::create(Instruction::Add, LHS, RHS,
194 cast<Instruction>(RHS)->getNext());
197 // Insert a 'neg' instruction that subtracts the value from zero to get the
200 return BI = BinaryOperator::createNeg(V, V->getName() + ".neg", BI);
204 bool Reassociate::ReassociateBB(BasicBlock *BB) {
205 bool Changed = false;
206 for (BasicBlock::iterator BI = BB->begin(); BI != BB->end(); ++BI) {
208 DEBUG(std::cerr << "Processing: " << *BI);
209 if (BI->getOpcode() == Instruction::Sub && !BinaryOperator::isNeg(BI)) {
210 // Convert a subtract into an add and a neg instruction... so that sub
211 // instructions can be commuted with other add instructions...
213 // Calculate the negative value of Operand 1 of the sub instruction...
214 // and set it as the RHS of the add instruction we just made...
216 std::string Name = BI->getName();
219 BinaryOperator::create(Instruction::Add, BI->getOperand(0),
220 BI->getOperand(1), Name, BI);
222 // Everyone now refers to the add instruction...
223 BI->replaceAllUsesWith(New);
225 // Put the new add in the place of the subtract... deleting the subtract
226 BB->getInstList().erase(BI);
229 New->setOperand(1, NegateValue(New->getOperand(1), BI));
232 DEBUG(std::cerr << "Negated: " << New /*<< " Result BB: " << BB*/);
235 // If this instruction is a commutative binary operator, and the ranks of
236 // the two operands are sorted incorrectly, fix it now.
238 if (BI->isAssociative()) {
239 BinaryOperator *I = cast<BinaryOperator>(BI);
240 if (!I->use_empty()) {
241 // Make sure that we don't have a tree-shaped computation. If we do,
242 // linearize it. Convert (A+B)+(C+D) into ((A+B)+C)+D
244 Instruction *LHSI = dyn_cast<Instruction>(I->getOperand(0));
245 Instruction *RHSI = dyn_cast<Instruction>(I->getOperand(1));
246 if (LHSI && (int)LHSI->getOpcode() == I->getOpcode() &&
247 RHSI && (int)RHSI->getOpcode() == I->getOpcode() &&
248 RHSI->use_size() == 1) {
249 // Insert a new temporary instruction... (A+B)+C
250 BinaryOperator *Tmp = BinaryOperator::create(I->getOpcode(), LHSI,
252 RHSI->getName()+".ra",
255 I->setOperand(0, Tmp);
256 I->setOperand(1, RHSI->getOperand(1));
258 // Process the temporary instruction for reassociation now.
262 DEBUG(std::cerr << "Linearized: " << I/* << " Result BB: " << BB*/);
265 // Make sure that this expression is correctly reassociated with respect
266 // to it's used values...
268 Changed |= ReassociateExpr(I);
277 bool Reassociate::runOnFunction(Function &F) {
278 // Recalculate the rank map for F
281 bool Changed = false;
282 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI)
283 Changed |= ReassociateBB(FI);
285 // We are done with the rank map...
287 ValueRankMap.clear();