1 //===- InstCombinePHI.cpp -------------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitPHINode function.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/IR/DataLayout.h"
21 #define DEBUG_TYPE "instcombine"
23 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
24 /// and if a/b/c and the add's all have a single use, turn this into a phi
25 /// and a single binop.
26 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
27 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
28 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
29 unsigned Opc = FirstInst->getOpcode();
30 Value *LHSVal = FirstInst->getOperand(0);
31 Value *RHSVal = FirstInst->getOperand(1);
33 Type *LHSType = LHSVal->getType();
34 Type *RHSType = RHSVal->getType();
36 bool isNUW = false, isNSW = false, isExact = false;
37 if (OverflowingBinaryOperator *BO =
38 dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
39 isNUW = BO->hasNoUnsignedWrap();
40 isNSW = BO->hasNoSignedWrap();
41 } else if (PossiblyExactOperator *PEO =
42 dyn_cast<PossiblyExactOperator>(FirstInst))
43 isExact = PEO->isExact();
45 // Scan to see if all operands are the same opcode, and all have one use.
46 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
47 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
48 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
49 // Verify type of the LHS matches so we don't fold cmp's of different
51 I->getOperand(0)->getType() != LHSType ||
52 I->getOperand(1)->getType() != RHSType)
55 // If they are CmpInst instructions, check their predicates
56 if (CmpInst *CI = dyn_cast<CmpInst>(I))
57 if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
61 isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
63 isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
65 isExact = cast<PossiblyExactOperator>(I)->isExact();
67 // Keep track of which operand needs a phi node.
68 if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
69 if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
72 // If both LHS and RHS would need a PHI, don't do this transformation,
73 // because it would increase the number of PHIs entering the block,
74 // which leads to higher register pressure. This is especially
75 // bad when the PHIs are in the header of a loop.
76 if (!LHSVal && !RHSVal)
79 // Otherwise, this is safe to transform!
81 Value *InLHS = FirstInst->getOperand(0);
82 Value *InRHS = FirstInst->getOperand(1);
83 PHINode *NewLHS = nullptr, *NewRHS = nullptr;
85 NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
86 FirstInst->getOperand(0)->getName() + ".pn");
87 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
88 InsertNewInstBefore(NewLHS, PN);
93 NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
94 FirstInst->getOperand(1)->getName() + ".pn");
95 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
96 InsertNewInstBefore(NewRHS, PN);
100 // Add all operands to the new PHIs.
101 if (NewLHS || NewRHS) {
102 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
103 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
105 Value *NewInLHS = InInst->getOperand(0);
106 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
109 Value *NewInRHS = InInst->getOperand(1);
110 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
115 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
116 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
118 NewCI->setDebugLoc(FirstInst->getDebugLoc());
122 BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
123 BinaryOperator *NewBinOp =
124 BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
125 if (isNUW) NewBinOp->setHasNoUnsignedWrap();
126 if (isNSW) NewBinOp->setHasNoSignedWrap();
127 if (isExact) NewBinOp->setIsExact();
128 NewBinOp->setDebugLoc(FirstInst->getDebugLoc());
132 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
133 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
135 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
136 FirstInst->op_end());
137 // This is true if all GEP bases are allocas and if all indices into them are
139 bool AllBasePointersAreAllocas = true;
141 // We don't want to replace this phi if the replacement would require
142 // more than one phi, which leads to higher register pressure. This is
143 // especially bad when the PHIs are in the header of a loop.
144 bool NeededPhi = false;
146 bool AllInBounds = true;
148 // Scan to see if all operands are the same opcode, and all have one use.
149 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
150 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
151 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
152 GEP->getNumOperands() != FirstInst->getNumOperands())
155 AllInBounds &= GEP->isInBounds();
157 // Keep track of whether or not all GEPs are of alloca pointers.
158 if (AllBasePointersAreAllocas &&
159 (!isa<AllocaInst>(GEP->getOperand(0)) ||
160 !GEP->hasAllConstantIndices()))
161 AllBasePointersAreAllocas = false;
163 // Compare the operand lists.
164 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
165 if (FirstInst->getOperand(op) == GEP->getOperand(op))
168 // Don't merge two GEPs when two operands differ (introducing phi nodes)
169 // if one of the PHIs has a constant for the index. The index may be
170 // substantially cheaper to compute for the constants, so making it a
171 // variable index could pessimize the path. This also handles the case
172 // for struct indices, which must always be constant.
173 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
174 isa<ConstantInt>(GEP->getOperand(op)))
177 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
180 // If we already needed a PHI for an earlier operand, and another operand
181 // also requires a PHI, we'd be introducing more PHIs than we're
182 // eliminating, which increases register pressure on entry to the PHI's
187 FixedOperands[op] = nullptr; // Needs a PHI.
192 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
193 // bother doing this transformation. At best, this will just save a bit of
194 // offset calculation, but all the predecessors will have to materialize the
195 // stack address into a register anyway. We'd actually rather *clone* the
196 // load up into the predecessors so that we have a load of a gep of an alloca,
197 // which can usually all be folded into the load.
198 if (AllBasePointersAreAllocas)
201 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
203 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
205 bool HasAnyPHIs = false;
206 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
207 if (FixedOperands[i]) continue; // operand doesn't need a phi.
208 Value *FirstOp = FirstInst->getOperand(i);
209 PHINode *NewPN = PHINode::Create(FirstOp->getType(), e,
210 FirstOp->getName()+".pn");
211 InsertNewInstBefore(NewPN, PN);
213 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
214 OperandPhis[i] = NewPN;
215 FixedOperands[i] = NewPN;
220 // Add all operands to the new PHIs.
222 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
223 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
224 BasicBlock *InBB = PN.getIncomingBlock(i);
226 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
227 if (PHINode *OpPhi = OperandPhis[op])
228 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
232 Value *Base = FixedOperands[0];
233 GetElementPtrInst *NewGEP =
234 GetElementPtrInst::Create(Base, makeArrayRef(FixedOperands).slice(1));
235 if (AllInBounds) NewGEP->setIsInBounds();
236 NewGEP->setDebugLoc(FirstInst->getDebugLoc());
241 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
242 /// sink the load out of the block that defines it. This means that it must be
243 /// obvious the value of the load is not changed from the point of the load to
244 /// the end of the block it is in.
246 /// Finally, it is safe, but not profitable, to sink a load targeting a
247 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
249 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
250 BasicBlock::iterator BBI = L, E = L->getParent()->end();
252 for (++BBI; BBI != E; ++BBI)
253 if (BBI->mayWriteToMemory())
256 // Check for non-address taken alloca. If not address-taken already, it isn't
257 // profitable to do this xform.
258 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
259 bool isAddressTaken = false;
260 for (User *U : AI->users()) {
261 if (isa<LoadInst>(U)) continue;
262 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
263 // If storing TO the alloca, then the address isn't taken.
264 if (SI->getOperand(1) == AI) continue;
266 isAddressTaken = true;
270 if (!isAddressTaken && AI->isStaticAlloca())
274 // If this load is a load from a GEP with a constant offset from an alloca,
275 // then we don't want to sink it. In its present form, it will be
276 // load [constant stack offset]. Sinking it will cause us to have to
277 // materialize the stack addresses in each predecessor in a register only to
278 // do a shared load from register in the successor.
279 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
280 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
281 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
287 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
288 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
290 // FIXME: This is overconservative; this transform is allowed in some cases
291 // for atomic operations.
292 if (FirstLI->isAtomic())
295 // When processing loads, we need to propagate two bits of information to the
296 // sunk load: whether it is volatile, and what its alignment is. We currently
297 // don't sink loads when some have their alignment specified and some don't.
298 // visitLoadInst will propagate an alignment onto the load when TD is around,
299 // and if TD isn't around, we can't handle the mixed case.
300 bool isVolatile = FirstLI->isVolatile();
301 unsigned LoadAlignment = FirstLI->getAlignment();
302 unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
304 // We can't sink the load if the loaded value could be modified between the
306 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
307 !isSafeAndProfitableToSinkLoad(FirstLI))
310 // If the PHI is of volatile loads and the load block has multiple
311 // successors, sinking it would remove a load of the volatile value from
312 // the path through the other successor.
314 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
317 // Check to see if all arguments are the same operation.
318 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
319 LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
320 if (!LI || !LI->hasOneUse())
323 // We can't sink the load if the loaded value could be modified between
324 // the load and the PHI.
325 if (LI->isVolatile() != isVolatile ||
326 LI->getParent() != PN.getIncomingBlock(i) ||
327 LI->getPointerAddressSpace() != LoadAddrSpace ||
328 !isSafeAndProfitableToSinkLoad(LI))
331 // If some of the loads have an alignment specified but not all of them,
332 // we can't do the transformation.
333 if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
336 LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
338 // If the PHI is of volatile loads and the load block has multiple
339 // successors, sinking it would remove a load of the volatile value from
340 // the path through the other successor.
342 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
346 // Okay, they are all the same operation. Create a new PHI node of the
347 // correct type, and PHI together all of the LHS's of the instructions.
348 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
349 PN.getNumIncomingValues(),
352 Value *InVal = FirstLI->getOperand(0);
353 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
355 // Add all operands to the new PHI.
356 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
357 Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
358 if (NewInVal != InVal)
360 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
365 // The new PHI unions all of the same values together. This is really
366 // common, so we handle it intelligently here for compile-time speed.
370 InsertNewInstBefore(NewPN, PN);
374 // If this was a volatile load that we are merging, make sure to loop through
375 // and mark all the input loads as non-volatile. If we don't do this, we will
376 // insert a new volatile load and the old ones will not be deletable.
378 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
379 cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
381 LoadInst *NewLI = new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
382 NewLI->setDebugLoc(FirstLI->getDebugLoc());
388 /// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
389 /// operator and they all are only used by the PHI, PHI together their
390 /// inputs, and do the operation once, to the result of the PHI.
391 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
392 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
394 if (isa<GetElementPtrInst>(FirstInst))
395 return FoldPHIArgGEPIntoPHI(PN);
396 if (isa<LoadInst>(FirstInst))
397 return FoldPHIArgLoadIntoPHI(PN);
399 // Scan the instruction, looking for input operations that can be folded away.
400 // If all input operands to the phi are the same instruction (e.g. a cast from
401 // the same type or "+42") we can pull the operation through the PHI, reducing
402 // code size and simplifying code.
403 Constant *ConstantOp = nullptr;
404 Type *CastSrcTy = nullptr;
405 bool isNUW = false, isNSW = false, isExact = false;
407 if (isa<CastInst>(FirstInst)) {
408 CastSrcTy = FirstInst->getOperand(0)->getType();
410 // Be careful about transforming integer PHIs. We don't want to pessimize
411 // the code by turning an i32 into an i1293.
412 if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
413 if (!ShouldChangeType(PN.getType(), CastSrcTy))
416 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
417 // Can fold binop, compare or shift here if the RHS is a constant,
418 // otherwise call FoldPHIArgBinOpIntoPHI.
419 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
421 return FoldPHIArgBinOpIntoPHI(PN);
423 if (OverflowingBinaryOperator *BO =
424 dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
425 isNUW = BO->hasNoUnsignedWrap();
426 isNSW = BO->hasNoSignedWrap();
427 } else if (PossiblyExactOperator *PEO =
428 dyn_cast<PossiblyExactOperator>(FirstInst))
429 isExact = PEO->isExact();
431 return nullptr; // Cannot fold this operation.
434 // Check to see if all arguments are the same operation.
435 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
436 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
437 if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
440 if (I->getOperand(0)->getType() != CastSrcTy)
441 return nullptr; // Cast operation must match.
442 } else if (I->getOperand(1) != ConstantOp) {
447 isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
449 isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
451 isExact = cast<PossiblyExactOperator>(I)->isExact();
454 // Okay, they are all the same operation. Create a new PHI node of the
455 // correct type, and PHI together all of the LHS's of the instructions.
456 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
457 PN.getNumIncomingValues(),
460 Value *InVal = FirstInst->getOperand(0);
461 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
463 // Add all operands to the new PHI.
464 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
465 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
466 if (NewInVal != InVal)
468 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
473 // The new PHI unions all of the same values together. This is really
474 // common, so we handle it intelligently here for compile-time speed.
478 InsertNewInstBefore(NewPN, PN);
482 // Insert and return the new operation.
483 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
484 CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
486 NewCI->setDebugLoc(FirstInst->getDebugLoc());
490 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
491 BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
492 if (isNUW) BinOp->setHasNoUnsignedWrap();
493 if (isNSW) BinOp->setHasNoSignedWrap();
494 if (isExact) BinOp->setIsExact();
495 BinOp->setDebugLoc(FirstInst->getDebugLoc());
499 CmpInst *CIOp = cast<CmpInst>(FirstInst);
500 CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
502 NewCI->setDebugLoc(FirstInst->getDebugLoc());
506 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
508 static bool DeadPHICycle(PHINode *PN,
509 SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
510 if (PN->use_empty()) return true;
511 if (!PN->hasOneUse()) return false;
513 // Remember this node, and if we find the cycle, return.
514 if (!PotentiallyDeadPHIs.insert(PN).second)
517 // Don't scan crazily complex things.
518 if (PotentiallyDeadPHIs.size() == 16)
521 if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
522 return DeadPHICycle(PU, PotentiallyDeadPHIs);
527 /// PHIsEqualValue - Return true if this phi node is always equal to
528 /// NonPhiInVal. This happens with mutually cyclic phi nodes like:
529 /// z = some value; x = phi (y, z); y = phi (x, z)
530 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
531 SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
532 // See if we already saw this PHI node.
533 if (!ValueEqualPHIs.insert(PN).second)
536 // Don't scan crazily complex things.
537 if (ValueEqualPHIs.size() == 16)
540 // Scan the operands to see if they are either phi nodes or are equal to
542 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
543 Value *Op = PN->getIncomingValue(i);
544 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
545 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
547 } else if (Op != NonPhiInVal)
556 struct PHIUsageRecord {
557 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
558 unsigned Shift; // The amount shifted.
559 Instruction *Inst; // The trunc instruction.
561 PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
562 : PHIId(pn), Shift(Sh), Inst(User) {}
564 bool operator<(const PHIUsageRecord &RHS) const {
565 if (PHIId < RHS.PHIId) return true;
566 if (PHIId > RHS.PHIId) return false;
567 if (Shift < RHS.Shift) return true;
568 if (Shift > RHS.Shift) return false;
569 return Inst->getType()->getPrimitiveSizeInBits() <
570 RHS.Inst->getType()->getPrimitiveSizeInBits();
574 struct LoweredPHIRecord {
575 PHINode *PN; // The PHI that was lowered.
576 unsigned Shift; // The amount shifted.
577 unsigned Width; // The width extracted.
579 LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
580 : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
582 // Ctor form used by DenseMap.
583 LoweredPHIRecord(PHINode *pn, unsigned Sh)
584 : PN(pn), Shift(Sh), Width(0) {}
590 struct DenseMapInfo<LoweredPHIRecord> {
591 static inline LoweredPHIRecord getEmptyKey() {
592 return LoweredPHIRecord(nullptr, 0);
594 static inline LoweredPHIRecord getTombstoneKey() {
595 return LoweredPHIRecord(nullptr, 1);
597 static unsigned getHashValue(const LoweredPHIRecord &Val) {
598 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
601 static bool isEqual(const LoweredPHIRecord &LHS,
602 const LoweredPHIRecord &RHS) {
603 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
604 LHS.Width == RHS.Width;
610 /// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an
611 /// illegal type: see if it is only used by trunc or trunc(lshr) operations. If
612 /// so, we split the PHI into the various pieces being extracted. This sort of
613 /// thing is introduced when SROA promotes an aggregate to large integer values.
615 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
616 /// inttoptr. We should produce new PHIs in the right type.
618 Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
619 // PHIUsers - Keep track of all of the truncated values extracted from a set
620 // of PHIs, along with their offset. These are the things we want to rewrite.
621 SmallVector<PHIUsageRecord, 16> PHIUsers;
623 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
624 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
625 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
626 // check the uses of (to ensure they are all extracts).
627 SmallVector<PHINode*, 8> PHIsToSlice;
628 SmallPtrSet<PHINode*, 8> PHIsInspected;
630 PHIsToSlice.push_back(&FirstPhi);
631 PHIsInspected.insert(&FirstPhi);
633 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
634 PHINode *PN = PHIsToSlice[PHIId];
636 // Scan the input list of the PHI. If any input is an invoke, and if the
637 // input is defined in the predecessor, then we won't be split the critical
638 // edge which is required to insert a truncate. Because of this, we have to
640 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
641 InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
643 if (II->getParent() != PN->getIncomingBlock(i))
646 // If we have a phi, and if it's directly in the predecessor, then we have
647 // a critical edge where we need to put the truncate. Since we can't
648 // split the edge in instcombine, we have to bail out.
652 for (User *U : PN->users()) {
653 Instruction *UserI = cast<Instruction>(U);
655 // If the user is a PHI, inspect its uses recursively.
656 if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
657 if (PHIsInspected.insert(UserPN).second)
658 PHIsToSlice.push_back(UserPN);
662 // Truncates are always ok.
663 if (isa<TruncInst>(UserI)) {
664 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
668 // Otherwise it must be a lshr which can only be used by one trunc.
669 if (UserI->getOpcode() != Instruction::LShr ||
670 !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
671 !isa<ConstantInt>(UserI->getOperand(1)))
674 unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
675 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
679 // If we have no users, they must be all self uses, just nuke the PHI.
680 if (PHIUsers.empty())
681 return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
683 // If this phi node is transformable, create new PHIs for all the pieces
684 // extracted out of it. First, sort the users by their offset and size.
685 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
687 DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
688 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
689 dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';
692 // PredValues - This is a temporary used when rewriting PHI nodes. It is
693 // hoisted out here to avoid construction/destruction thrashing.
694 DenseMap<BasicBlock*, Value*> PredValues;
696 // ExtractedVals - Each new PHI we introduce is saved here so we don't
697 // introduce redundant PHIs.
698 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
700 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
701 unsigned PHIId = PHIUsers[UserI].PHIId;
702 PHINode *PN = PHIsToSlice[PHIId];
703 unsigned Offset = PHIUsers[UserI].Shift;
704 Type *Ty = PHIUsers[UserI].Inst->getType();
708 // If we've already lowered a user like this, reuse the previously lowered
710 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
712 // Otherwise, Create the new PHI node for this user.
713 EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
714 PN->getName()+".off"+Twine(Offset), PN);
715 assert(EltPHI->getType() != PN->getType() &&
716 "Truncate didn't shrink phi?");
718 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
719 BasicBlock *Pred = PN->getIncomingBlock(i);
720 Value *&PredVal = PredValues[Pred];
722 // If we already have a value for this predecessor, reuse it.
724 EltPHI->addIncoming(PredVal, Pred);
728 // Handle the PHI self-reuse case.
729 Value *InVal = PN->getIncomingValue(i);
732 EltPHI->addIncoming(PredVal, Pred);
736 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
737 // If the incoming value was a PHI, and if it was one of the PHIs we
738 // already rewrote it, just use the lowered value.
739 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
741 EltPHI->addIncoming(PredVal, Pred);
746 // Otherwise, do an extract in the predecessor.
747 Builder->SetInsertPoint(Pred, Pred->getTerminator());
750 Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
752 Res = Builder->CreateTrunc(Res, Ty, "extract.t");
754 EltPHI->addIncoming(Res, Pred);
756 // If the incoming value was a PHI, and if it was one of the PHIs we are
757 // rewriting, we will ultimately delete the code we inserted. This
758 // means we need to revisit that PHI to make sure we extract out the
760 if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
761 if (PHIsInspected.count(OldInVal)) {
762 unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(),
763 OldInVal)-PHIsToSlice.begin();
764 PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
765 cast<Instruction>(Res)));
771 DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
773 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
776 // Replace the use of this piece with the PHI node.
777 ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
780 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
782 Value *Undef = UndefValue::get(FirstPhi.getType());
783 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
784 ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
785 return ReplaceInstUsesWith(FirstPhi, Undef);
788 // PHINode simplification
790 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
791 if (Value *V = SimplifyInstruction(&PN, DL, TLI, DT, AT))
792 return ReplaceInstUsesWith(PN, V);
794 // If all PHI operands are the same operation, pull them through the PHI,
795 // reducing code size.
796 if (isa<Instruction>(PN.getIncomingValue(0)) &&
797 isa<Instruction>(PN.getIncomingValue(1)) &&
798 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
799 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
800 // FIXME: The hasOneUse check will fail for PHIs that use the value more
801 // than themselves more than once.
802 PN.getIncomingValue(0)->hasOneUse())
803 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
806 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
807 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
808 // PHI)... break the cycle.
809 if (PN.hasOneUse()) {
810 Instruction *PHIUser = cast<Instruction>(PN.user_back());
811 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
812 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
813 PotentiallyDeadPHIs.insert(&PN);
814 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
815 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
818 // If this phi has a single use, and if that use just computes a value for
819 // the next iteration of a loop, delete the phi. This occurs with unused
820 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
821 // common case here is good because the only other things that catch this
822 // are induction variable analysis (sometimes) and ADCE, which is only run
824 if (PHIUser->hasOneUse() &&
825 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
826 PHIUser->user_back() == &PN) {
827 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
831 // We sometimes end up with phi cycles that non-obviously end up being the
832 // same value, for example:
833 // z = some value; x = phi (y, z); y = phi (x, z)
834 // where the phi nodes don't necessarily need to be in the same block. Do a
835 // quick check to see if the PHI node only contains a single non-phi value, if
836 // so, scan to see if the phi cycle is actually equal to that value.
838 unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
839 // Scan for the first non-phi operand.
840 while (InValNo != NumIncomingVals &&
841 isa<PHINode>(PN.getIncomingValue(InValNo)))
844 if (InValNo != NumIncomingVals) {
845 Value *NonPhiInVal = PN.getIncomingValue(InValNo);
847 // Scan the rest of the operands to see if there are any conflicts, if so
848 // there is no need to recursively scan other phis.
849 for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
850 Value *OpVal = PN.getIncomingValue(InValNo);
851 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
855 // If we scanned over all operands, then we have one unique value plus
856 // phi values. Scan PHI nodes to see if they all merge in each other or
858 if (InValNo == NumIncomingVals) {
859 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
860 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
861 return ReplaceInstUsesWith(PN, NonPhiInVal);
866 // If there are multiple PHIs, sort their operands so that they all list
867 // the blocks in the same order. This will help identical PHIs be eliminated
868 // by other passes. Other passes shouldn't depend on this for correctness
870 PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
872 for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
873 BasicBlock *BBA = PN.getIncomingBlock(i);
874 BasicBlock *BBB = FirstPN->getIncomingBlock(i);
876 Value *VA = PN.getIncomingValue(i);
877 unsigned j = PN.getBasicBlockIndex(BBB);
878 Value *VB = PN.getIncomingValue(j);
879 PN.setIncomingBlock(i, BBB);
880 PN.setIncomingValue(i, VB);
881 PN.setIncomingBlock(j, BBA);
882 PN.setIncomingValue(j, VA);
883 // NOTE: Instcombine normally would want us to "return &PN" if we
884 // modified any of the operands of an instruction. However, since we
885 // aren't adding or removing uses (just rearranging them) we don't do
886 // this in this case.
890 // If this is an integer PHI and we know that it has an illegal type, see if
891 // it is only used by trunc or trunc(lshr) operations. If so, we split the
892 // PHI into the various pieces being extracted. This sort of thing is
893 // introduced when SROA promotes an aggregate to a single large integer type.
894 if (PN.getType()->isIntegerTy() && DL &&
895 !DL->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
896 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))