1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/AssumptionCache.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/MemoryBuiltins.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/ConstantRange.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/GetElementPtrTypeIterator.h"
27 #include "llvm/IR/GlobalAlias.h"
28 #include "llvm/IR/GlobalVariable.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/LLVMContext.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/IR/Statepoint.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/MathExtras.h"
40 using namespace llvm::PatternMatch;
42 const unsigned MaxDepth = 6;
44 /// Enable an experimental feature to leverage information about dominating
45 /// conditions to compute known bits. The individual options below control how
46 /// hard we search. The defaults are chosen to be fairly aggressive. If you
47 /// run into compile time problems when testing, scale them back and report
49 static cl::opt<bool> EnableDomConditions("value-tracking-dom-conditions",
50 cl::Hidden, cl::init(false));
52 // This is expensive, so we only do it for the top level query value.
53 // (TODO: evaluate cost vs profit, consider higher thresholds)
54 static cl::opt<unsigned> DomConditionsMaxDepth("dom-conditions-max-depth",
55 cl::Hidden, cl::init(1));
57 /// How many dominating blocks should be scanned looking for dominating
59 static cl::opt<unsigned> DomConditionsMaxDomBlocks("dom-conditions-dom-blocks",
63 // Controls the number of uses of the value searched for possible
64 // dominating comparisons.
65 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
66 cl::Hidden, cl::init(2000));
68 // If true, don't consider only compares whose only use is a branch.
69 static cl::opt<bool> DomConditionsSingleCmpUse("dom-conditions-single-cmp-use",
70 cl::Hidden, cl::init(false));
72 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
73 /// 0). For vector types, returns the element type's bitwidth.
74 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
75 if (unsigned BitWidth = Ty->getScalarSizeInBits())
78 return DL.getPointerTypeSizeInBits(Ty);
81 // Many of these functions have internal versions that take an assumption
82 // exclusion set. This is because of the potential for mutual recursion to
83 // cause computeKnownBits to repeatedly visit the same assume intrinsic. The
84 // classic case of this is assume(x = y), which will attempt to determine
85 // bits in x from bits in y, which will attempt to determine bits in y from
86 // bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
87 // isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
88 // isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so on.
89 typedef SmallPtrSet<const Value *, 8> ExclInvsSet;
92 // Simplifying using an assume can only be done in a particular control-flow
93 // context (the context instruction provides that context). If an assume and
94 // the context instruction are not in the same block then the DT helps in
95 // figuring out if we can use it.
99 const Instruction *CxtI;
100 const DominatorTree *DT;
102 Query(AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr,
103 const DominatorTree *DT = nullptr)
104 : AC(AC), CxtI(CxtI), DT(DT) {}
106 Query(const Query &Q, const Value *NewExcl)
107 : ExclInvs(Q.ExclInvs), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT) {
108 ExclInvs.insert(NewExcl);
111 } // end anonymous namespace
113 // Given the provided Value and, potentially, a context instruction, return
114 // the preferred context instruction (if any).
115 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
116 // If we've been provided with a context instruction, then use that (provided
117 // it has been inserted).
118 if (CxtI && CxtI->getParent())
121 // If the value is really an already-inserted instruction, then use that.
122 CxtI = dyn_cast<Instruction>(V);
123 if (CxtI && CxtI->getParent())
129 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
130 const DataLayout &DL, unsigned Depth,
133 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
134 const DataLayout &DL, unsigned Depth,
135 AssumptionCache *AC, const Instruction *CxtI,
136 const DominatorTree *DT) {
137 ::computeKnownBits(V, KnownZero, KnownOne, DL, Depth,
138 Query(AC, safeCxtI(V, CxtI), DT));
141 bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
142 AssumptionCache *AC, const Instruction *CxtI,
143 const DominatorTree *DT) {
144 assert(LHS->getType() == RHS->getType() &&
145 "LHS and RHS should have the same type");
146 assert(LHS->getType()->isIntOrIntVectorTy() &&
147 "LHS and RHS should be integers");
148 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
149 APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
150 APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
151 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
152 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
153 return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
156 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
157 const DataLayout &DL, unsigned Depth,
160 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
161 const DataLayout &DL, unsigned Depth,
162 AssumptionCache *AC, const Instruction *CxtI,
163 const DominatorTree *DT) {
164 ::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth,
165 Query(AC, safeCxtI(V, CxtI), DT));
168 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
169 const Query &Q, const DataLayout &DL);
171 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero,
172 unsigned Depth, AssumptionCache *AC,
173 const Instruction *CxtI,
174 const DominatorTree *DT) {
175 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
176 Query(AC, safeCxtI(V, CxtI), DT), DL);
179 static bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
182 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
183 AssumptionCache *AC, const Instruction *CxtI,
184 const DominatorTree *DT) {
185 return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
188 bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
189 AssumptionCache *AC, const Instruction *CxtI,
190 const DominatorTree *DT) {
191 bool NonNegative, Negative;
192 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
196 static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
197 unsigned Depth, const Query &Q);
199 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
200 unsigned Depth, AssumptionCache *AC,
201 const Instruction *CxtI, const DominatorTree *DT) {
202 return ::MaskedValueIsZero(V, Mask, DL, Depth,
203 Query(AC, safeCxtI(V, CxtI), DT));
206 static unsigned ComputeNumSignBits(Value *V, const DataLayout &DL,
207 unsigned Depth, const Query &Q);
209 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL,
210 unsigned Depth, AssumptionCache *AC,
211 const Instruction *CxtI,
212 const DominatorTree *DT) {
213 return ::ComputeNumSignBits(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
216 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
217 APInt &KnownZero, APInt &KnownOne,
218 APInt &KnownZero2, APInt &KnownOne2,
219 const DataLayout &DL, unsigned Depth,
222 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
223 // We know that the top bits of C-X are clear if X contains less bits
224 // than C (i.e. no wrap-around can happen). For example, 20-X is
225 // positive if we can prove that X is >= 0 and < 16.
226 if (!CLHS->getValue().isNegative()) {
227 unsigned BitWidth = KnownZero.getBitWidth();
228 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
229 // NLZ can't be BitWidth with no sign bit
230 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
231 computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q);
233 // If all of the MaskV bits are known to be zero, then we know the
234 // output top bits are zero, because we now know that the output is
236 if ((KnownZero2 & MaskV) == MaskV) {
237 unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
238 // Top bits known zero.
239 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
245 unsigned BitWidth = KnownZero.getBitWidth();
247 // If an initial sequence of bits in the result is not needed, the
248 // corresponding bits in the operands are not needed.
249 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
250 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, DL, Depth + 1, Q);
251 computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q);
253 // Carry in a 1 for a subtract, rather than a 0.
254 APInt CarryIn(BitWidth, 0);
256 // Sum = LHS + ~RHS + 1
257 std::swap(KnownZero2, KnownOne2);
261 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
262 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
264 // Compute known bits of the carry.
265 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
266 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
268 // Compute set of known bits (where all three relevant bits are known).
269 APInt LHSKnown = LHSKnownZero | LHSKnownOne;
270 APInt RHSKnown = KnownZero2 | KnownOne2;
271 APInt CarryKnown = CarryKnownZero | CarryKnownOne;
272 APInt Known = LHSKnown & RHSKnown & CarryKnown;
274 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
275 "known bits of sum differ");
277 // Compute known bits of the result.
278 KnownZero = ~PossibleSumOne & Known;
279 KnownOne = PossibleSumOne & Known;
281 // Are we still trying to solve for the sign bit?
282 if (!Known.isNegative()) {
284 // Adding two non-negative numbers, or subtracting a negative number from
285 // a non-negative one, can't wrap into negative.
286 if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
287 KnownZero |= APInt::getSignBit(BitWidth);
288 // Adding two negative numbers, or subtracting a non-negative number from
289 // a negative one, can't wrap into non-negative.
290 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
291 KnownOne |= APInt::getSignBit(BitWidth);
296 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
297 APInt &KnownZero, APInt &KnownOne,
298 APInt &KnownZero2, APInt &KnownOne2,
299 const DataLayout &DL, unsigned Depth,
301 unsigned BitWidth = KnownZero.getBitWidth();
302 computeKnownBits(Op1, KnownZero, KnownOne, DL, Depth + 1, Q);
303 computeKnownBits(Op0, KnownZero2, KnownOne2, DL, Depth + 1, Q);
305 bool isKnownNegative = false;
306 bool isKnownNonNegative = false;
307 // If the multiplication is known not to overflow, compute the sign bit.
310 // The product of a number with itself is non-negative.
311 isKnownNonNegative = true;
313 bool isKnownNonNegativeOp1 = KnownZero.isNegative();
314 bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
315 bool isKnownNegativeOp1 = KnownOne.isNegative();
316 bool isKnownNegativeOp0 = KnownOne2.isNegative();
317 // The product of two numbers with the same sign is non-negative.
318 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
319 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
320 // The product of a negative number and a non-negative number is either
322 if (!isKnownNonNegative)
323 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
324 isKnownNonZero(Op0, DL, Depth, Q)) ||
325 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
326 isKnownNonZero(Op1, DL, Depth, Q));
330 // If low bits are zero in either operand, output low known-0 bits.
331 // Also compute a conserative estimate for high known-0 bits.
332 // More trickiness is possible, but this is sufficient for the
333 // interesting case of alignment computation.
334 KnownOne.clearAllBits();
335 unsigned TrailZ = KnownZero.countTrailingOnes() +
336 KnownZero2.countTrailingOnes();
337 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
338 KnownZero2.countLeadingOnes(),
339 BitWidth) - BitWidth;
341 TrailZ = std::min(TrailZ, BitWidth);
342 LeadZ = std::min(LeadZ, BitWidth);
343 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
344 APInt::getHighBitsSet(BitWidth, LeadZ);
346 // Only make use of no-wrap flags if we failed to compute the sign bit
347 // directly. This matters if the multiplication always overflows, in
348 // which case we prefer to follow the result of the direct computation,
349 // though as the program is invoking undefined behaviour we can choose
350 // whatever we like here.
351 if (isKnownNonNegative && !KnownOne.isNegative())
352 KnownZero.setBit(BitWidth - 1);
353 else if (isKnownNegative && !KnownZero.isNegative())
354 KnownOne.setBit(BitWidth - 1);
357 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
359 unsigned BitWidth = KnownZero.getBitWidth();
360 unsigned NumRanges = Ranges.getNumOperands() / 2;
361 assert(NumRanges >= 1);
363 // Use the high end of the ranges to find leading zeros.
364 unsigned MinLeadingZeros = BitWidth;
365 for (unsigned i = 0; i < NumRanges; ++i) {
367 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
369 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
370 ConstantRange Range(Lower->getValue(), Upper->getValue());
371 if (Range.isWrappedSet())
372 MinLeadingZeros = 0; // -1 has no zeros
373 unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros();
374 MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros);
377 KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
380 static bool isEphemeralValueOf(Instruction *I, const Value *E) {
381 SmallVector<const Value *, 16> WorkSet(1, I);
382 SmallPtrSet<const Value *, 32> Visited;
383 SmallPtrSet<const Value *, 16> EphValues;
385 while (!WorkSet.empty()) {
386 const Value *V = WorkSet.pop_back_val();
387 if (!Visited.insert(V).second)
390 // If all uses of this value are ephemeral, then so is this value.
391 bool FoundNEUse = false;
392 for (const User *I : V->users())
393 if (!EphValues.count(I)) {
403 if (const User *U = dyn_cast<User>(V))
404 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
406 if (isSafeToSpeculativelyExecute(*J))
407 WorkSet.push_back(*J);
415 // Is this an intrinsic that cannot be speculated but also cannot trap?
416 static bool isAssumeLikeIntrinsic(const Instruction *I) {
417 if (const CallInst *CI = dyn_cast<CallInst>(I))
418 if (Function *F = CI->getCalledFunction())
419 switch (F->getIntrinsicID()) {
421 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
422 case Intrinsic::assume:
423 case Intrinsic::dbg_declare:
424 case Intrinsic::dbg_value:
425 case Intrinsic::invariant_start:
426 case Intrinsic::invariant_end:
427 case Intrinsic::lifetime_start:
428 case Intrinsic::lifetime_end:
429 case Intrinsic::objectsize:
430 case Intrinsic::ptr_annotation:
431 case Intrinsic::var_annotation:
438 static bool isValidAssumeForContext(Value *V, const Query &Q) {
439 Instruction *Inv = cast<Instruction>(V);
441 // There are two restrictions on the use of an assume:
442 // 1. The assume must dominate the context (or the control flow must
443 // reach the assume whenever it reaches the context).
444 // 2. The context must not be in the assume's set of ephemeral values
445 // (otherwise we will use the assume to prove that the condition
446 // feeding the assume is trivially true, thus causing the removal of
450 if (Q.DT->dominates(Inv, Q.CxtI)) {
452 } else if (Inv->getParent() == Q.CxtI->getParent()) {
453 // The context comes first, but they're both in the same block. Make sure
454 // there is nothing in between that might interrupt the control flow.
455 for (BasicBlock::const_iterator I =
456 std::next(BasicBlock::const_iterator(Q.CxtI)),
457 IE(Inv); I != IE; ++I)
458 if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
461 return !isEphemeralValueOf(Inv, Q.CxtI);
467 // When we don't have a DT, we do a limited search...
468 if (Inv->getParent() == Q.CxtI->getParent()->getSinglePredecessor()) {
470 } else if (Inv->getParent() == Q.CxtI->getParent()) {
471 // Search forward from the assume until we reach the context (or the end
472 // of the block); the common case is that the assume will come first.
473 for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
474 IE = Inv->getParent()->end(); I != IE; ++I)
478 // The context must come first...
479 for (BasicBlock::const_iterator I =
480 std::next(BasicBlock::const_iterator(Q.CxtI)),
481 IE(Inv); I != IE; ++I)
482 if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
485 return !isEphemeralValueOf(Inv, Q.CxtI);
491 bool llvm::isValidAssumeForContext(const Instruction *I,
492 const Instruction *CxtI,
493 const DominatorTree *DT) {
494 return ::isValidAssumeForContext(const_cast<Instruction *>(I),
495 Query(nullptr, CxtI, DT));
498 template<typename LHS, typename RHS>
499 inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>,
500 CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>>
501 m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
502 return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L));
505 template<typename LHS, typename RHS>
506 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>,
507 BinaryOp_match<RHS, LHS, Instruction::And>>
508 m_c_And(const LHS &L, const RHS &R) {
509 return m_CombineOr(m_And(L, R), m_And(R, L));
512 template<typename LHS, typename RHS>
513 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>,
514 BinaryOp_match<RHS, LHS, Instruction::Or>>
515 m_c_Or(const LHS &L, const RHS &R) {
516 return m_CombineOr(m_Or(L, R), m_Or(R, L));
519 template<typename LHS, typename RHS>
520 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>,
521 BinaryOp_match<RHS, LHS, Instruction::Xor>>
522 m_c_Xor(const LHS &L, const RHS &R) {
523 return m_CombineOr(m_Xor(L, R), m_Xor(R, L));
526 /// Compute known bits in 'V' under the assumption that the condition 'Cmp' is
527 /// true (at the context instruction.) This is mostly a utility function for
528 /// the prototype dominating conditions reasoning below.
529 static void computeKnownBitsFromTrueCondition(Value *V, ICmpInst *Cmp,
532 const DataLayout &DL,
533 unsigned Depth, const Query &Q) {
534 Value *LHS = Cmp->getOperand(0);
535 Value *RHS = Cmp->getOperand(1);
536 // TODO: We could potentially be more aggressive here. This would be worth
537 // evaluating. If we can, explore commoning this code with the assume
539 if (LHS != V && RHS != V)
542 const unsigned BitWidth = KnownZero.getBitWidth();
544 switch (Cmp->getPredicate()) {
546 // We know nothing from this condition
548 // TODO: implement unsigned bound from below (known one bits)
549 // TODO: common condition check implementations with assumes
550 // TODO: implement other patterns from assume (e.g. V & B == A)
551 case ICmpInst::ICMP_SGT:
553 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
554 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
555 if (KnownOneTemp.isAllOnesValue() || KnownZeroTemp.isNegative()) {
556 // We know that the sign bit is zero.
557 KnownZero |= APInt::getSignBit(BitWidth);
561 case ICmpInst::ICMP_EQ:
563 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
565 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
567 computeKnownBits(LHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
569 llvm_unreachable("missing use?");
570 KnownZero |= KnownZeroTemp;
571 KnownOne |= KnownOneTemp;
574 case ICmpInst::ICMP_ULE:
576 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
577 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
578 // The known zero bits carry over
579 unsigned SignBits = KnownZeroTemp.countLeadingOnes();
580 KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits);
583 case ICmpInst::ICMP_ULT:
585 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
586 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
587 // Whatever high bits in rhs are zero are known to be zero (if rhs is a
588 // power of 2, then one more).
589 unsigned SignBits = KnownZeroTemp.countLeadingOnes();
590 if (isKnownToBeAPowerOfTwo(RHS, false, Depth + 1, Query(Q, Cmp), DL))
592 KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits);
598 /// Compute known bits in 'V' from conditions which are known to be true along
599 /// all paths leading to the context instruction. In particular, look for
600 /// cases where one branch of an interesting condition dominates the context
601 /// instruction. This does not do general dataflow.
602 /// NOTE: This code is EXPERIMENTAL and currently off by default.
603 static void computeKnownBitsFromDominatingCondition(Value *V, APInt &KnownZero,
605 const DataLayout &DL,
608 // Need both the dominator tree and the query location to do anything useful
609 if (!Q.DT || !Q.CxtI)
611 Instruction *Cxt = const_cast<Instruction *>(Q.CxtI);
613 // Avoid useless work
614 if (auto VI = dyn_cast<Instruction>(V))
615 if (VI->getParent() == Cxt->getParent())
618 // Note: We currently implement two options. It's not clear which of these
619 // will survive long term, we need data for that.
620 // Option 1 - Try walking the dominator tree looking for conditions which
621 // might apply. This works well for local conditions (loop guards, etc..),
622 // but not as well for things far from the context instruction (presuming a
623 // low max blocks explored). If we can set an high enough limit, this would
625 // Option 2 - We restrict out search to those conditions which are uses of
626 // the value we're interested in. This is independent of dom structure,
627 // but is slightly less powerful without looking through lots of use chains.
628 // It does handle conditions far from the context instruction (e.g. early
629 // function exits on entry) really well though.
631 // Option 1 - Search the dom tree
632 unsigned NumBlocksExplored = 0;
633 BasicBlock *Current = Cxt->getParent();
635 // Stop searching if we've gone too far up the chain
636 if (NumBlocksExplored >= DomConditionsMaxDomBlocks)
640 if (!Q.DT->getNode(Current)->getIDom())
642 Current = Q.DT->getNode(Current)->getIDom()->getBlock();
644 // found function entry
647 BranchInst *BI = dyn_cast<BranchInst>(Current->getTerminator());
648 if (!BI || BI->isUnconditional())
650 ICmpInst *Cmp = dyn_cast<ICmpInst>(BI->getCondition());
654 // We're looking for conditions that are guaranteed to hold at the context
655 // instruction. Finding a condition where one path dominates the context
656 // isn't enough because both the true and false cases could merge before
657 // the context instruction we're actually interested in. Instead, we need
658 // to ensure that the taken *edge* dominates the context instruction.
659 BasicBlock *BB0 = BI->getSuccessor(0);
660 BasicBlockEdge Edge(BI->getParent(), BB0);
661 if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent()))
664 computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth,
668 // Option 2 - Search the other uses of V
669 unsigned NumUsesExplored = 0;
670 for (auto U : V->users()) {
671 // Avoid massive lists
672 if (NumUsesExplored >= DomConditionsMaxUses)
675 // Consider only compare instructions uniquely controlling a branch
676 ICmpInst *Cmp = dyn_cast<ICmpInst>(U);
680 if (DomConditionsSingleCmpUse && !Cmp->hasOneUse())
683 for (auto *CmpU : Cmp->users()) {
684 BranchInst *BI = dyn_cast<BranchInst>(CmpU);
685 if (!BI || BI->isUnconditional())
687 // We're looking for conditions that are guaranteed to hold at the
688 // context instruction. Finding a condition where one path dominates
689 // the context isn't enough because both the true and false cases could
690 // merge before the context instruction we're actually interested in.
691 // Instead, we need to ensure that the taken *edge* dominates the context
693 BasicBlock *BB0 = BI->getSuccessor(0);
694 BasicBlockEdge Edge(BI->getParent(), BB0);
695 if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent()))
698 computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth,
704 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
705 APInt &KnownOne, const DataLayout &DL,
706 unsigned Depth, const Query &Q) {
707 // Use of assumptions is context-sensitive. If we don't have a context, we
709 if (!Q.AC || !Q.CxtI)
712 unsigned BitWidth = KnownZero.getBitWidth();
714 for (auto &AssumeVH : Q.AC->assumptions()) {
717 CallInst *I = cast<CallInst>(AssumeVH);
718 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
719 "Got assumption for the wrong function!");
720 if (Q.ExclInvs.count(I))
723 // Warning: This loop can end up being somewhat performance sensetive.
724 // We're running this loop for once for each value queried resulting in a
725 // runtime of ~O(#assumes * #values).
727 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
728 "must be an assume intrinsic");
730 Value *Arg = I->getArgOperand(0);
732 if (Arg == V && isValidAssumeForContext(I, Q)) {
733 assert(BitWidth == 1 && "assume operand is not i1?");
734 KnownZero.clearAllBits();
735 KnownOne.setAllBits();
739 // The remaining tests are all recursive, so bail out if we hit the limit.
740 if (Depth == MaxDepth)
744 auto m_V = m_CombineOr(m_Specific(V),
745 m_CombineOr(m_PtrToInt(m_Specific(V)),
746 m_BitCast(m_Specific(V))));
748 CmpInst::Predicate Pred;
751 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
752 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
753 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
754 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
755 KnownZero |= RHSKnownZero;
756 KnownOne |= RHSKnownOne;
758 } else if (match(Arg,
759 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
760 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
761 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
762 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
763 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
764 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I));
766 // For those bits in the mask that are known to be one, we can propagate
767 // known bits from the RHS to V.
768 KnownZero |= RHSKnownZero & MaskKnownOne;
769 KnownOne |= RHSKnownOne & MaskKnownOne;
770 // assume(~(v & b) = a)
771 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
773 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
774 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
775 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
776 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
777 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I));
779 // For those bits in the mask that are known to be one, we can propagate
780 // inverted known bits from the RHS to V.
781 KnownZero |= RHSKnownOne & MaskKnownOne;
782 KnownOne |= RHSKnownZero & MaskKnownOne;
784 } else if (match(Arg,
785 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
786 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
787 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
788 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
789 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
790 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
792 // For those bits in B that are known to be zero, we can propagate known
793 // bits from the RHS to V.
794 KnownZero |= RHSKnownZero & BKnownZero;
795 KnownOne |= RHSKnownOne & BKnownZero;
796 // assume(~(v | b) = a)
797 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
799 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
800 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
801 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
802 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
803 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
805 // For those bits in B that are known to be zero, we can propagate
806 // inverted known bits from the RHS to V.
807 KnownZero |= RHSKnownOne & BKnownZero;
808 KnownOne |= RHSKnownZero & BKnownZero;
810 } else if (match(Arg,
811 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
812 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
813 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
814 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
815 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
816 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
818 // For those bits in B that are known to be zero, we can propagate known
819 // bits from the RHS to V. For those bits in B that are known to be one,
820 // we can propagate inverted known bits from the RHS to V.
821 KnownZero |= RHSKnownZero & BKnownZero;
822 KnownOne |= RHSKnownOne & BKnownZero;
823 KnownZero |= RHSKnownOne & BKnownOne;
824 KnownOne |= RHSKnownZero & BKnownOne;
825 // assume(~(v ^ b) = a)
826 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
828 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
829 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
830 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
831 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
832 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
834 // For those bits in B that are known to be zero, we can propagate
835 // inverted known bits from the RHS to V. For those bits in B that are
836 // known to be one, we can propagate known bits from the RHS to V.
837 KnownZero |= RHSKnownOne & BKnownZero;
838 KnownOne |= RHSKnownZero & BKnownZero;
839 KnownZero |= RHSKnownZero & BKnownOne;
840 KnownOne |= RHSKnownOne & BKnownOne;
841 // assume(v << c = a)
842 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
844 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
845 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
846 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
847 // For those bits in RHS that are known, we can propagate them to known
848 // bits in V shifted to the right by C.
849 KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
850 KnownOne |= RHSKnownOne.lshr(C->getZExtValue());
851 // assume(~(v << c) = a)
852 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
854 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
855 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
856 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
857 // For those bits in RHS that are known, we can propagate them inverted
858 // to known bits in V shifted to the right by C.
859 KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
860 KnownOne |= RHSKnownZero.lshr(C->getZExtValue());
861 // assume(v >> c = a)
862 } else if (match(Arg,
863 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
864 m_AShr(m_V, m_ConstantInt(C))),
866 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
867 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
868 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
869 // For those bits in RHS that are known, we can propagate them to known
870 // bits in V shifted to the right by C.
871 KnownZero |= RHSKnownZero << C->getZExtValue();
872 KnownOne |= RHSKnownOne << C->getZExtValue();
873 // assume(~(v >> c) = a)
874 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
875 m_LShr(m_V, m_ConstantInt(C)),
876 m_AShr(m_V, m_ConstantInt(C)))),
878 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
879 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
880 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
881 // For those bits in RHS that are known, we can propagate them inverted
882 // to known bits in V shifted to the right by C.
883 KnownZero |= RHSKnownOne << C->getZExtValue();
884 KnownOne |= RHSKnownZero << C->getZExtValue();
885 // assume(v >=_s c) where c is non-negative
886 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
887 Pred == ICmpInst::ICMP_SGE && isValidAssumeForContext(I, Q)) {
888 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
889 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
891 if (RHSKnownZero.isNegative()) {
892 // We know that the sign bit is zero.
893 KnownZero |= APInt::getSignBit(BitWidth);
895 // assume(v >_s c) where c is at least -1.
896 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
897 Pred == ICmpInst::ICMP_SGT && isValidAssumeForContext(I, Q)) {
898 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
899 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
901 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
902 // We know that the sign bit is zero.
903 KnownZero |= APInt::getSignBit(BitWidth);
905 // assume(v <=_s c) where c is negative
906 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
907 Pred == ICmpInst::ICMP_SLE && isValidAssumeForContext(I, Q)) {
908 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
909 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
911 if (RHSKnownOne.isNegative()) {
912 // We know that the sign bit is one.
913 KnownOne |= APInt::getSignBit(BitWidth);
915 // assume(v <_s c) where c is non-positive
916 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
917 Pred == ICmpInst::ICMP_SLT && isValidAssumeForContext(I, Q)) {
918 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
919 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
921 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
922 // We know that the sign bit is one.
923 KnownOne |= APInt::getSignBit(BitWidth);
926 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
927 Pred == ICmpInst::ICMP_ULE && isValidAssumeForContext(I, Q)) {
928 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
929 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
931 // Whatever high bits in c are zero are known to be zero.
933 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
935 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
936 Pred == ICmpInst::ICMP_ULT && isValidAssumeForContext(I, Q)) {
937 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
938 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
940 // Whatever high bits in c are zero are known to be zero (if c is a power
941 // of 2, then one more).
942 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I), DL))
944 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
947 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
952 static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
953 APInt &KnownOne, const DataLayout &DL,
954 unsigned Depth, const Query &Q) {
955 unsigned BitWidth = KnownZero.getBitWidth();
957 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
958 switch (I->getOpcode()) {
960 case Instruction::Load:
961 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
962 computeKnownBitsFromRangeMetadata(*MD, KnownZero);
964 case Instruction::And: {
965 // If either the LHS or the RHS are Zero, the result is zero.
966 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
967 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
969 // Output known-1 bits are only known if set in both the LHS & RHS.
970 KnownOne &= KnownOne2;
971 // Output known-0 are known to be clear if zero in either the LHS | RHS.
972 KnownZero |= KnownZero2;
975 case Instruction::Or: {
976 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
977 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
979 // Output known-0 bits are only known if clear in both the LHS & RHS.
980 KnownZero &= KnownZero2;
981 // Output known-1 are known to be set if set in either the LHS | RHS.
982 KnownOne |= KnownOne2;
985 case Instruction::Xor: {
986 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
987 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
989 // Output known-0 bits are known if clear or set in both the LHS & RHS.
990 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
991 // Output known-1 are known to be set if set in only one of the LHS, RHS.
992 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
993 KnownZero = KnownZeroOut;
996 case Instruction::Mul: {
997 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
998 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
999 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
1002 case Instruction::UDiv: {
1003 // For the purposes of computing leading zeros we can conservatively
1004 // treat a udiv as a logical right shift by the power of 2 known to
1005 // be less than the denominator.
1006 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1007 unsigned LeadZ = KnownZero2.countLeadingOnes();
1009 KnownOne2.clearAllBits();
1010 KnownZero2.clearAllBits();
1011 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1012 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1013 if (RHSUnknownLeadingOnes != BitWidth)
1014 LeadZ = std::min(BitWidth,
1015 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1017 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1020 case Instruction::Select:
1021 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, DL, Depth + 1, Q);
1022 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1024 // Only known if known in both the LHS and RHS.
1025 KnownOne &= KnownOne2;
1026 KnownZero &= KnownZero2;
1028 case Instruction::FPTrunc:
1029 case Instruction::FPExt:
1030 case Instruction::FPToUI:
1031 case Instruction::FPToSI:
1032 case Instruction::SIToFP:
1033 case Instruction::UIToFP:
1034 break; // Can't work with floating point.
1035 case Instruction::PtrToInt:
1036 case Instruction::IntToPtr:
1037 case Instruction::AddrSpaceCast: // Pointers could be different sizes.
1038 // FALL THROUGH and handle them the same as zext/trunc.
1039 case Instruction::ZExt:
1040 case Instruction::Trunc: {
1041 Type *SrcTy = I->getOperand(0)->getType();
1043 unsigned SrcBitWidth;
1044 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1045 // which fall through here.
1046 SrcBitWidth = DL.getTypeSizeInBits(SrcTy->getScalarType());
1048 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1049 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
1050 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
1051 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1052 KnownZero = KnownZero.zextOrTrunc(BitWidth);
1053 KnownOne = KnownOne.zextOrTrunc(BitWidth);
1054 // Any top bits are known to be zero.
1055 if (BitWidth > SrcBitWidth)
1056 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1059 case Instruction::BitCast: {
1060 Type *SrcTy = I->getOperand(0)->getType();
1061 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1062 // TODO: For now, not handling conversions like:
1063 // (bitcast i64 %x to <2 x i32>)
1064 !I->getType()->isVectorTy()) {
1065 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1070 case Instruction::SExt: {
1071 // Compute the bits in the result that are not present in the input.
1072 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1074 KnownZero = KnownZero.trunc(SrcBitWidth);
1075 KnownOne = KnownOne.trunc(SrcBitWidth);
1076 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1077 KnownZero = KnownZero.zext(BitWidth);
1078 KnownOne = KnownOne.zext(BitWidth);
1080 // If the sign bit of the input is known set or clear, then we know the
1081 // top bits of the result.
1082 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero
1083 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1084 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
1085 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1088 case Instruction::Shl:
1089 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1090 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1091 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1092 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1093 KnownZero <<= ShiftAmt;
1094 KnownOne <<= ShiftAmt;
1095 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
1098 case Instruction::LShr:
1099 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1100 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1101 // Compute the new bits that are at the top now.
1102 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1104 // Unsigned shift right.
1105 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1106 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
1107 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
1108 // high bits known zero.
1109 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
1112 case Instruction::AShr:
1113 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1114 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1115 // Compute the new bits that are at the top now.
1116 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
1118 // Signed shift right.
1119 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1120 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
1121 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
1123 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1124 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero.
1125 KnownZero |= HighBits;
1126 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one.
1127 KnownOne |= HighBits;
1130 case Instruction::Sub: {
1131 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1132 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1133 KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
1137 case Instruction::Add: {
1138 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1139 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1140 KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
1144 case Instruction::SRem:
1145 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1146 APInt RA = Rem->getValue().abs();
1147 if (RA.isPowerOf2()) {
1148 APInt LowBits = RA - 1;
1149 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1,
1152 // The low bits of the first operand are unchanged by the srem.
1153 KnownZero = KnownZero2 & LowBits;
1154 KnownOne = KnownOne2 & LowBits;
1156 // If the first operand is non-negative or has all low bits zero, then
1157 // the upper bits are all zero.
1158 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1159 KnownZero |= ~LowBits;
1161 // If the first operand is negative and not all low bits are zero, then
1162 // the upper bits are all one.
1163 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
1164 KnownOne |= ~LowBits;
1166 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1170 // The sign bit is the LHS's sign bit, except when the result of the
1171 // remainder is zero.
1172 if (KnownZero.isNonNegative()) {
1173 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1174 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, DL,
1176 // If it's known zero, our sign bit is also zero.
1177 if (LHSKnownZero.isNegative())
1178 KnownZero.setBit(BitWidth - 1);
1182 case Instruction::URem: {
1183 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1184 APInt RA = Rem->getValue();
1185 if (RA.isPowerOf2()) {
1186 APInt LowBits = (RA - 1);
1187 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1,
1189 KnownZero |= ~LowBits;
1190 KnownOne &= LowBits;
1195 // Since the result is less than or equal to either operand, any leading
1196 // zero bits in either operand must also exist in the result.
1197 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1198 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1200 unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1201 KnownZero2.countLeadingOnes());
1202 KnownOne.clearAllBits();
1203 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
1207 case Instruction::Alloca: {
1208 AllocaInst *AI = cast<AllocaInst>(I);
1209 unsigned Align = AI->getAlignment();
1211 Align = DL.getABITypeAlignment(AI->getType()->getElementType());
1214 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1217 case Instruction::GetElementPtr: {
1218 // Analyze all of the subscripts of this getelementptr instruction
1219 // to determine if we can prove known low zero bits.
1220 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1221 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, DL,
1223 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1225 gep_type_iterator GTI = gep_type_begin(I);
1226 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1227 Value *Index = I->getOperand(i);
1228 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1229 // Handle struct member offset arithmetic.
1231 // Handle case when index is vector zeroinitializer
1232 Constant *CIndex = cast<Constant>(Index);
1233 if (CIndex->isZeroValue())
1236 if (CIndex->getType()->isVectorTy())
1237 Index = CIndex->getSplatValue();
1239 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1240 const StructLayout *SL = DL.getStructLayout(STy);
1241 uint64_t Offset = SL->getElementOffset(Idx);
1242 TrailZ = std::min<unsigned>(TrailZ,
1243 countTrailingZeros(Offset));
1245 // Handle array index arithmetic.
1246 Type *IndexedTy = GTI.getIndexedType();
1247 if (!IndexedTy->isSized()) {
1251 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1252 uint64_t TypeSize = DL.getTypeAllocSize(IndexedTy);
1253 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1254 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, DL, Depth + 1,
1256 TrailZ = std::min(TrailZ,
1257 unsigned(countTrailingZeros(TypeSize) +
1258 LocalKnownZero.countTrailingOnes()));
1262 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
1265 case Instruction::PHI: {
1266 PHINode *P = cast<PHINode>(I);
1267 // Handle the case of a simple two-predecessor recurrence PHI.
1268 // There's a lot more that could theoretically be done here, but
1269 // this is sufficient to catch some interesting cases.
1270 if (P->getNumIncomingValues() == 2) {
1271 for (unsigned i = 0; i != 2; ++i) {
1272 Value *L = P->getIncomingValue(i);
1273 Value *R = P->getIncomingValue(!i);
1274 Operator *LU = dyn_cast<Operator>(L);
1277 unsigned Opcode = LU->getOpcode();
1278 // Check for operations that have the property that if
1279 // both their operands have low zero bits, the result
1280 // will have low zero bits.
1281 if (Opcode == Instruction::Add ||
1282 Opcode == Instruction::Sub ||
1283 Opcode == Instruction::And ||
1284 Opcode == Instruction::Or ||
1285 Opcode == Instruction::Mul) {
1286 Value *LL = LU->getOperand(0);
1287 Value *LR = LU->getOperand(1);
1288 // Find a recurrence.
1295 // Ok, we have a PHI of the form L op= R. Check for low
1297 computeKnownBits(R, KnownZero2, KnownOne2, DL, Depth + 1, Q);
1299 // We need to take the minimum number of known bits
1300 APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1301 computeKnownBits(L, KnownZero3, KnownOne3, DL, Depth + 1, Q);
1303 KnownZero = APInt::getLowBitsSet(BitWidth,
1304 std::min(KnownZero2.countTrailingOnes(),
1305 KnownZero3.countTrailingOnes()));
1311 // Unreachable blocks may have zero-operand PHI nodes.
1312 if (P->getNumIncomingValues() == 0)
1315 // Otherwise take the unions of the known bit sets of the operands,
1316 // taking conservative care to avoid excessive recursion.
1317 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1318 // Skip if every incoming value references to ourself.
1319 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1322 KnownZero = APInt::getAllOnesValue(BitWidth);
1323 KnownOne = APInt::getAllOnesValue(BitWidth);
1324 for (Value *IncValue : P->incoming_values()) {
1325 // Skip direct self references.
1326 if (IncValue == P) continue;
1328 KnownZero2 = APInt(BitWidth, 0);
1329 KnownOne2 = APInt(BitWidth, 0);
1330 // Recurse, but cap the recursion to one level, because we don't
1331 // want to waste time spinning around in loops.
1332 computeKnownBits(IncValue, KnownZero2, KnownOne2, DL,
1334 KnownZero &= KnownZero2;
1335 KnownOne &= KnownOne2;
1336 // If all bits have been ruled out, there's no need to check
1338 if (!KnownZero && !KnownOne)
1344 case Instruction::Call:
1345 case Instruction::Invoke:
1346 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1347 computeKnownBitsFromRangeMetadata(*MD, KnownZero);
1348 // If a range metadata is attached to this IntrinsicInst, intersect the
1349 // explicit range specified by the metadata and the implicit range of
1351 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1352 switch (II->getIntrinsicID()) {
1354 case Intrinsic::ctlz:
1355 case Intrinsic::cttz: {
1356 unsigned LowBits = Log2_32(BitWidth)+1;
1357 // If this call is undefined for 0, the result will be less than 2^n.
1358 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1360 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1363 case Intrinsic::ctpop: {
1364 unsigned LowBits = Log2_32(BitWidth)+1;
1365 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1368 case Intrinsic::x86_sse42_crc32_64_64:
1369 KnownZero |= APInt::getHighBitsSet(64, 32);
1374 case Instruction::ExtractValue:
1375 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1376 ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1377 if (EVI->getNumIndices() != 1) break;
1378 if (EVI->getIndices()[0] == 0) {
1379 switch (II->getIntrinsicID()) {
1381 case Intrinsic::uadd_with_overflow:
1382 case Intrinsic::sadd_with_overflow:
1383 computeKnownBitsAddSub(true, II->getArgOperand(0),
1384 II->getArgOperand(1), false, KnownZero,
1385 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
1387 case Intrinsic::usub_with_overflow:
1388 case Intrinsic::ssub_with_overflow:
1389 computeKnownBitsAddSub(false, II->getArgOperand(0),
1390 II->getArgOperand(1), false, KnownZero,
1391 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
1393 case Intrinsic::umul_with_overflow:
1394 case Intrinsic::smul_with_overflow:
1395 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1396 KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
1405 /// Determine which bits of V are known to be either zero or one and return
1406 /// them in the KnownZero/KnownOne bit sets.
1408 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1409 /// we cannot optimize based on the assumption that it is zero without changing
1410 /// it to be an explicit zero. If we don't change it to zero, other code could
1411 /// optimized based on the contradictory assumption that it is non-zero.
1412 /// Because instcombine aggressively folds operations with undef args anyway,
1413 /// this won't lose us code quality.
1415 /// This function is defined on values with integer type, values with pointer
1416 /// type, and vectors of integers. In the case
1417 /// where V is a vector, known zero, and known one values are the
1418 /// same width as the vector element, and the bit is set only if it is true
1419 /// for all of the elements in the vector.
1420 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
1421 const DataLayout &DL, unsigned Depth, const Query &Q) {
1422 assert(V && "No Value?");
1423 assert(Depth <= MaxDepth && "Limit Search Depth");
1424 unsigned BitWidth = KnownZero.getBitWidth();
1426 assert((V->getType()->isIntOrIntVectorTy() ||
1427 V->getType()->getScalarType()->isPointerTy()) &&
1428 "Not integer or pointer type!");
1429 assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1430 (!V->getType()->isIntOrIntVectorTy() ||
1431 V->getType()->getScalarSizeInBits() == BitWidth) &&
1432 KnownZero.getBitWidth() == BitWidth &&
1433 KnownOne.getBitWidth() == BitWidth &&
1434 "V, KnownOne and KnownZero should have same BitWidth");
1436 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1437 // We know all of the bits for a constant!
1438 KnownOne = CI->getValue();
1439 KnownZero = ~KnownOne;
1442 // Null and aggregate-zero are all-zeros.
1443 if (isa<ConstantPointerNull>(V) ||
1444 isa<ConstantAggregateZero>(V)) {
1445 KnownOne.clearAllBits();
1446 KnownZero = APInt::getAllOnesValue(BitWidth);
1449 // Handle a constant vector by taking the intersection of the known bits of
1450 // each element. There is no real need to handle ConstantVector here, because
1451 // we don't handle undef in any particularly useful way.
1452 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1453 // We know that CDS must be a vector of integers. Take the intersection of
1455 KnownZero.setAllBits(); KnownOne.setAllBits();
1456 APInt Elt(KnownZero.getBitWidth(), 0);
1457 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1458 Elt = CDS->getElementAsInteger(i);
1465 // The address of an aligned GlobalValue has trailing zeros.
1466 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1467 unsigned Align = GO->getAlignment();
1469 if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
1470 Type *ObjectType = GVar->getType()->getElementType();
1471 if (ObjectType->isSized()) {
1472 // If the object is defined in the current Module, we'll be giving
1473 // it the preferred alignment. Otherwise, we have to assume that it
1474 // may only have the minimum ABI alignment.
1475 if (GVar->isStrongDefinitionForLinker())
1476 Align = DL.getPreferredAlignment(GVar);
1478 Align = DL.getABITypeAlignment(ObjectType);
1483 KnownZero = APInt::getLowBitsSet(BitWidth,
1484 countTrailingZeros(Align));
1486 KnownZero.clearAllBits();
1487 KnownOne.clearAllBits();
1491 if (Argument *A = dyn_cast<Argument>(V)) {
1492 unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
1494 if (!Align && A->hasStructRetAttr()) {
1495 // An sret parameter has at least the ABI alignment of the return type.
1496 Type *EltTy = cast<PointerType>(A->getType())->getElementType();
1497 if (EltTy->isSized())
1498 Align = DL.getABITypeAlignment(EltTy);
1502 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1504 KnownZero.clearAllBits();
1505 KnownOne.clearAllBits();
1507 // Don't give up yet... there might be an assumption that provides more
1509 computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
1511 // Or a dominating condition for that matter
1512 if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
1513 computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL,
1518 // Start out not knowing anything.
1519 KnownZero.clearAllBits(); KnownOne.clearAllBits();
1521 // Limit search depth.
1522 // All recursive calls that increase depth must come after this.
1523 if (Depth == MaxDepth)
1526 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1527 // the bits of its aliasee.
1528 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1529 if (!GA->mayBeOverridden())
1530 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q);
1534 if (Operator *I = dyn_cast<Operator>(V))
1535 computeKnownBitsFromOperator(I, KnownZero, KnownOne, DL, Depth, Q);
1536 // computeKnownBitsFromAssume and computeKnownBitsFromDominatingCondition
1537 // strictly refines KnownZero and KnownOne. Therefore, we run them after
1538 // computeKnownBitsFromOperator.
1540 // Check whether a nearby assume intrinsic can determine some known bits.
1541 computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
1543 // Check whether there's a dominating condition which implies something about
1544 // this value at the given context.
1545 if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
1546 computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth,
1549 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1552 /// Determine whether the sign bit is known to be zero or one.
1553 /// Convenience wrapper around computeKnownBits.
1554 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
1555 const DataLayout &DL, unsigned Depth, const Query &Q) {
1556 unsigned BitWidth = getBitWidth(V->getType(), DL);
1562 APInt ZeroBits(BitWidth, 0);
1563 APInt OneBits(BitWidth, 0);
1564 computeKnownBits(V, ZeroBits, OneBits, DL, Depth, Q);
1565 KnownOne = OneBits[BitWidth - 1];
1566 KnownZero = ZeroBits[BitWidth - 1];
1569 /// Return true if the given value is known to have exactly one
1570 /// bit set when defined. For vectors return true if every element is known to
1571 /// be a power of two when defined. Supports values with integer or pointer
1572 /// types and vectors of integers.
1573 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
1574 const Query &Q, const DataLayout &DL) {
1575 if (Constant *C = dyn_cast<Constant>(V)) {
1576 if (C->isNullValue())
1578 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1579 return CI->getValue().isPowerOf2();
1580 // TODO: Handle vector constants.
1583 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1584 // it is shifted off the end then the result is undefined.
1585 if (match(V, m_Shl(m_One(), m_Value())))
1588 // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1589 // bottom. If it is shifted off the bottom then the result is undefined.
1590 if (match(V, m_LShr(m_SignBit(), m_Value())))
1593 // The remaining tests are all recursive, so bail out if we hit the limit.
1594 if (Depth++ == MaxDepth)
1597 Value *X = nullptr, *Y = nullptr;
1598 // A shift of a power of two is a power of two or zero.
1599 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1600 match(V, m_Shr(m_Value(X), m_Value()))))
1601 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL);
1603 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1604 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q, DL);
1606 if (SelectInst *SI = dyn_cast<SelectInst>(V))
1607 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q, DL) &&
1608 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q, DL);
1610 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1611 // A power of two and'd with anything is a power of two or zero.
1612 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL) ||
1613 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q, DL))
1615 // X & (-X) is always a power of two or zero.
1616 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1621 // Adding a power-of-two or zero to the same power-of-two or zero yields
1622 // either the original power-of-two, a larger power-of-two or zero.
1623 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1624 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1625 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1626 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1627 match(X, m_And(m_Value(), m_Specific(Y))))
1628 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q, DL))
1630 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1631 match(Y, m_And(m_Value(), m_Specific(X))))
1632 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q, DL))
1635 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1636 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1637 computeKnownBits(X, LHSZeroBits, LHSOneBits, DL, Depth, Q);
1639 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1640 computeKnownBits(Y, RHSZeroBits, RHSOneBits, DL, Depth, Q);
1641 // If i8 V is a power of two or zero:
1642 // ZeroBits: 1 1 1 0 1 1 1 1
1643 // ~ZeroBits: 0 0 0 1 0 0 0 0
1644 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1645 // If OrZero isn't set, we cannot give back a zero result.
1646 // Make sure either the LHS or RHS has a bit set.
1647 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1652 // An exact divide or right shift can only shift off zero bits, so the result
1653 // is a power of two only if the first operand is a power of two and not
1654 // copying a sign bit (sdiv int_min, 2).
1655 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1656 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1657 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1664 /// \brief Test whether a GEP's result is known to be non-null.
1666 /// Uses properties inherent in a GEP to try to determine whether it is known
1669 /// Currently this routine does not support vector GEPs.
1670 static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout &DL,
1671 unsigned Depth, const Query &Q) {
1672 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1675 // FIXME: Support vector-GEPs.
1676 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1678 // If the base pointer is non-null, we cannot walk to a null address with an
1679 // inbounds GEP in address space zero.
1680 if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth, Q))
1683 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1684 // If so, then the GEP cannot produce a null pointer, as doing so would
1685 // inherently violate the inbounds contract within address space zero.
1686 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1687 GTI != GTE; ++GTI) {
1688 // Struct types are easy -- they must always be indexed by a constant.
1689 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1690 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1691 unsigned ElementIdx = OpC->getZExtValue();
1692 const StructLayout *SL = DL.getStructLayout(STy);
1693 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1694 if (ElementOffset > 0)
1699 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1700 if (DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1703 // Fast path the constant operand case both for efficiency and so we don't
1704 // increment Depth when just zipping down an all-constant GEP.
1705 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1711 // We post-increment Depth here because while isKnownNonZero increments it
1712 // as well, when we pop back up that increment won't persist. We don't want
1713 // to recurse 10k times just because we have 10k GEP operands. We don't
1714 // bail completely out because we want to handle constant GEPs regardless
1716 if (Depth++ >= MaxDepth)
1719 if (isKnownNonZero(GTI.getOperand(), DL, Depth, Q))
1726 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1727 /// ensure that the value it's attached to is never Value? 'RangeType' is
1728 /// is the type of the value described by the range.
1729 static bool rangeMetadataExcludesValue(MDNode* Ranges,
1730 const APInt& Value) {
1731 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1732 assert(NumRanges >= 1);
1733 for (unsigned i = 0; i < NumRanges; ++i) {
1734 ConstantInt *Lower =
1735 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1736 ConstantInt *Upper =
1737 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1738 ConstantRange Range(Lower->getValue(), Upper->getValue());
1739 if (Range.contains(Value))
1745 /// Return true if the given value is known to be non-zero when defined.
1746 /// For vectors return true if every element is known to be non-zero when
1747 /// defined. Supports values with integer or pointer type and vectors of
1749 bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
1751 if (Constant *C = dyn_cast<Constant>(V)) {
1752 if (C->isNullValue())
1754 if (isa<ConstantInt>(C))
1755 // Must be non-zero due to null test above.
1757 // TODO: Handle vectors
1761 if (Instruction* I = dyn_cast<Instruction>(V)) {
1762 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1763 // If the possible ranges don't contain zero, then the value is
1764 // definitely non-zero.
1765 if (IntegerType* Ty = dyn_cast<IntegerType>(V->getType())) {
1766 const APInt ZeroValue(Ty->getBitWidth(), 0);
1767 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1773 // The remaining tests are all recursive, so bail out if we hit the limit.
1774 if (Depth++ >= MaxDepth)
1777 // Check for pointer simplifications.
1778 if (V->getType()->isPointerTy()) {
1779 if (isKnownNonNull(V))
1781 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1782 if (isGEPKnownNonNull(GEP, DL, Depth, Q))
1786 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), DL);
1788 // X | Y != 0 if X != 0 or Y != 0.
1789 Value *X = nullptr, *Y = nullptr;
1790 if (match(V, m_Or(m_Value(X), m_Value(Y))))
1791 return isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q);
1793 // ext X != 0 if X != 0.
1794 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1795 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), DL, Depth, Q);
1797 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1798 // if the lowest bit is shifted off the end.
1799 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1800 // shl nuw can't remove any non-zero bits.
1801 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1802 if (BO->hasNoUnsignedWrap())
1803 return isKnownNonZero(X, DL, Depth, Q);
1805 APInt KnownZero(BitWidth, 0);
1806 APInt KnownOne(BitWidth, 0);
1807 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
1811 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1812 // defined if the sign bit is shifted off the end.
1813 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1814 // shr exact can only shift out zero bits.
1815 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1817 return isKnownNonZero(X, DL, Depth, Q);
1819 bool XKnownNonNegative, XKnownNegative;
1820 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q);
1824 // div exact can only produce a zero if the dividend is zero.
1825 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1826 return isKnownNonZero(X, DL, Depth, Q);
1829 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1830 bool XKnownNonNegative, XKnownNegative;
1831 bool YKnownNonNegative, YKnownNegative;
1832 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q);
1833 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, DL, Depth, Q);
1835 // If X and Y are both non-negative (as signed values) then their sum is not
1836 // zero unless both X and Y are zero.
1837 if (XKnownNonNegative && YKnownNonNegative)
1838 if (isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q))
1841 // If X and Y are both negative (as signed values) then their sum is not
1842 // zero unless both X and Y equal INT_MIN.
1843 if (BitWidth && XKnownNegative && YKnownNegative) {
1844 APInt KnownZero(BitWidth, 0);
1845 APInt KnownOne(BitWidth, 0);
1846 APInt Mask = APInt::getSignedMaxValue(BitWidth);
1847 // The sign bit of X is set. If some other bit is set then X is not equal
1849 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
1850 if ((KnownOne & Mask) != 0)
1852 // The sign bit of Y is set. If some other bit is set then Y is not equal
1854 computeKnownBits(Y, KnownZero, KnownOne, DL, Depth, Q);
1855 if ((KnownOne & Mask) != 0)
1859 // The sum of a non-negative number and a power of two is not zero.
1860 if (XKnownNonNegative &&
1861 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q, DL))
1863 if (YKnownNonNegative &&
1864 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q, DL))
1868 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1869 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1870 // If X and Y are non-zero then so is X * Y as long as the multiplication
1871 // does not overflow.
1872 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1873 isKnownNonZero(X, DL, Depth, Q) && isKnownNonZero(Y, DL, Depth, Q))
1876 // (C ? X : Y) != 0 if X != 0 and Y != 0.
1877 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1878 if (isKnownNonZero(SI->getTrueValue(), DL, Depth, Q) &&
1879 isKnownNonZero(SI->getFalseValue(), DL, Depth, Q))
1883 if (!BitWidth) return false;
1884 APInt KnownZero(BitWidth, 0);
1885 APInt KnownOne(BitWidth, 0);
1886 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
1887 return KnownOne != 0;
1890 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
1891 /// simplify operations downstream. Mask is known to be zero for bits that V
1894 /// This function is defined on values with integer type, values with pointer
1895 /// type, and vectors of integers. In the case
1896 /// where V is a vector, the mask, known zero, and known one values are the
1897 /// same width as the vector element, and the bit is set only if it is true
1898 /// for all of the elements in the vector.
1899 bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
1900 unsigned Depth, const Query &Q) {
1901 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
1902 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
1903 return (KnownZero & Mask) == Mask;
1908 /// Return the number of times the sign bit of the register is replicated into
1909 /// the other bits. We know that at least 1 bit is always equal to the sign bit
1910 /// (itself), but other cases can give us information. For example, immediately
1911 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
1912 /// other, so we return 3.
1914 /// 'Op' must have a scalar integer type.
1916 unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, unsigned Depth,
1918 unsigned TyBits = DL.getTypeSizeInBits(V->getType()->getScalarType());
1920 unsigned FirstAnswer = 1;
1922 // Note that ConstantInt is handled by the general computeKnownBits case
1926 return 1; // Limit search depth.
1928 Operator *U = dyn_cast<Operator>(V);
1929 switch (Operator::getOpcode(V)) {
1931 case Instruction::SExt:
1932 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
1933 return ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q) + Tmp;
1935 case Instruction::SDiv: {
1936 const APInt *Denominator;
1937 // sdiv X, C -> adds log(C) sign bits.
1938 if (match(U->getOperand(1), m_APInt(Denominator))) {
1940 // Ignore non-positive denominator.
1941 if (!Denominator->isStrictlyPositive())
1944 // Calculate the incoming numerator bits.
1945 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
1947 // Add floor(log(C)) bits to the numerator bits.
1948 return std::min(TyBits, NumBits + Denominator->logBase2());
1953 case Instruction::SRem: {
1954 const APInt *Denominator;
1955 // srem X, C -> we know that the result is within [-C+1,C) when C is a
1956 // positive constant. This let us put a lower bound on the number of sign
1958 if (match(U->getOperand(1), m_APInt(Denominator))) {
1960 // Ignore non-positive denominator.
1961 if (!Denominator->isStrictlyPositive())
1964 // Calculate the incoming numerator bits. SRem by a positive constant
1965 // can't lower the number of sign bits.
1967 ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
1969 // Calculate the leading sign bit constraints by examining the
1970 // denominator. Given that the denominator is positive, there are two
1973 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
1974 // (1 << ceilLogBase2(C)).
1976 // 2. the numerator is negative. Then the result range is (-C,0] and
1977 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
1979 // Thus a lower bound on the number of sign bits is `TyBits -
1980 // ceilLogBase2(C)`.
1982 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
1983 return std::max(NumrBits, ResBits);
1988 case Instruction::AShr: {
1989 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
1990 // ashr X, C -> adds C sign bits. Vectors too.
1992 if (match(U->getOperand(1), m_APInt(ShAmt))) {
1993 Tmp += ShAmt->getZExtValue();
1994 if (Tmp > TyBits) Tmp = TyBits;
1998 case Instruction::Shl: {
2000 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2001 // shl destroys sign bits.
2002 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2003 Tmp2 = ShAmt->getZExtValue();
2004 if (Tmp2 >= TyBits || // Bad shift.
2005 Tmp2 >= Tmp) break; // Shifted all sign bits out.
2010 case Instruction::And:
2011 case Instruction::Or:
2012 case Instruction::Xor: // NOT is handled here.
2013 // Logical binary ops preserve the number of sign bits at the worst.
2014 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2016 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
2017 FirstAnswer = std::min(Tmp, Tmp2);
2018 // We computed what we know about the sign bits as our first
2019 // answer. Now proceed to the generic code that uses
2020 // computeKnownBits, and pick whichever answer is better.
2024 case Instruction::Select:
2025 Tmp = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
2026 if (Tmp == 1) return 1; // Early out.
2027 Tmp2 = ComputeNumSignBits(U->getOperand(2), DL, Depth + 1, Q);
2028 return std::min(Tmp, Tmp2);
2030 case Instruction::Add:
2031 // Add can have at most one carry bit. Thus we know that the output
2032 // is, at worst, one more bit than the inputs.
2033 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2034 if (Tmp == 1) return 1; // Early out.
2036 // Special case decrementing a value (ADD X, -1):
2037 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2038 if (CRHS->isAllOnesValue()) {
2039 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2040 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL, Depth + 1,
2043 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2045 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2048 // If we are subtracting one from a positive number, there is no carry
2049 // out of the result.
2050 if (KnownZero.isNegative())
2054 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
2055 if (Tmp2 == 1) return 1;
2056 return std::min(Tmp, Tmp2)-1;
2058 case Instruction::Sub:
2059 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
2060 if (Tmp2 == 1) return 1;
2063 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2064 if (CLHS->isNullValue()) {
2065 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2066 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, DL, Depth + 1,
2068 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2070 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2073 // If the input is known to be positive (the sign bit is known clear),
2074 // the output of the NEG has the same number of sign bits as the input.
2075 if (KnownZero.isNegative())
2078 // Otherwise, we treat this like a SUB.
2081 // Sub can have at most one carry bit. Thus we know that the output
2082 // is, at worst, one more bit than the inputs.
2083 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2084 if (Tmp == 1) return 1; // Early out.
2085 return std::min(Tmp, Tmp2)-1;
2087 case Instruction::PHI: {
2088 PHINode *PN = cast<PHINode>(U);
2089 unsigned NumIncomingValues = PN->getNumIncomingValues();
2090 // Don't analyze large in-degree PHIs.
2091 if (NumIncomingValues > 4) break;
2092 // Unreachable blocks may have zero-operand PHI nodes.
2093 if (NumIncomingValues == 0) break;
2095 // Take the minimum of all incoming values. This can't infinitely loop
2096 // because of our depth threshold.
2097 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), DL, Depth + 1, Q);
2098 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2099 if (Tmp == 1) return Tmp;
2101 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), DL, Depth + 1, Q));
2106 case Instruction::Trunc:
2107 // FIXME: it's tricky to do anything useful for this, but it is an important
2108 // case for targets like X86.
2112 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2113 // use this information.
2114 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2116 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
2118 if (KnownZero.isNegative()) { // sign bit is 0
2120 } else if (KnownOne.isNegative()) { // sign bit is 1;
2127 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2128 // the number of identical bits in the top of the input value.
2130 Mask <<= Mask.getBitWidth()-TyBits;
2131 // Return # leading zeros. We use 'min' here in case Val was zero before
2132 // shifting. We don't want to return '64' as for an i32 "0".
2133 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
2136 /// This function computes the integer multiple of Base that equals V.
2137 /// If successful, it returns true and returns the multiple in
2138 /// Multiple. If unsuccessful, it returns false. It looks
2139 /// through SExt instructions only if LookThroughSExt is true.
2140 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2141 bool LookThroughSExt, unsigned Depth) {
2142 const unsigned MaxDepth = 6;
2144 assert(V && "No Value?");
2145 assert(Depth <= MaxDepth && "Limit Search Depth");
2146 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2148 Type *T = V->getType();
2150 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2160 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2161 Constant *BaseVal = ConstantInt::get(T, Base);
2162 if (CO && CO == BaseVal) {
2164 Multiple = ConstantInt::get(T, 1);
2168 if (CI && CI->getZExtValue() % Base == 0) {
2169 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2173 if (Depth == MaxDepth) return false; // Limit search depth.
2175 Operator *I = dyn_cast<Operator>(V);
2176 if (!I) return false;
2178 switch (I->getOpcode()) {
2180 case Instruction::SExt:
2181 if (!LookThroughSExt) return false;
2182 // otherwise fall through to ZExt
2183 case Instruction::ZExt:
2184 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2185 LookThroughSExt, Depth+1);
2186 case Instruction::Shl:
2187 case Instruction::Mul: {
2188 Value *Op0 = I->getOperand(0);
2189 Value *Op1 = I->getOperand(1);
2191 if (I->getOpcode() == Instruction::Shl) {
2192 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2193 if (!Op1CI) return false;
2194 // Turn Op0 << Op1 into Op0 * 2^Op1
2195 APInt Op1Int = Op1CI->getValue();
2196 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2197 APInt API(Op1Int.getBitWidth(), 0);
2198 API.setBit(BitToSet);
2199 Op1 = ConstantInt::get(V->getContext(), API);
2202 Value *Mul0 = nullptr;
2203 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2204 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2205 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2206 if (Op1C->getType()->getPrimitiveSizeInBits() <
2207 MulC->getType()->getPrimitiveSizeInBits())
2208 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2209 if (Op1C->getType()->getPrimitiveSizeInBits() >
2210 MulC->getType()->getPrimitiveSizeInBits())
2211 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2213 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2214 Multiple = ConstantExpr::getMul(MulC, Op1C);
2218 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2219 if (Mul0CI->getValue() == 1) {
2220 // V == Base * Op1, so return Op1
2226 Value *Mul1 = nullptr;
2227 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2228 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2229 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2230 if (Op0C->getType()->getPrimitiveSizeInBits() <
2231 MulC->getType()->getPrimitiveSizeInBits())
2232 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2233 if (Op0C->getType()->getPrimitiveSizeInBits() >
2234 MulC->getType()->getPrimitiveSizeInBits())
2235 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2237 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2238 Multiple = ConstantExpr::getMul(MulC, Op0C);
2242 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2243 if (Mul1CI->getValue() == 1) {
2244 // V == Base * Op0, so return Op0
2252 // We could not determine if V is a multiple of Base.
2256 /// Return true if we can prove that the specified FP value is never equal to
2259 /// NOTE: this function will need to be revisited when we support non-default
2262 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
2263 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2264 return !CFP->getValueAPF().isNegZero();
2266 // FIXME: Magic number! At the least, this should be given a name because it's
2267 // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
2268 // expose it as a parameter, so it can be used for testing / experimenting.
2270 return false; // Limit search depth.
2272 const Operator *I = dyn_cast<Operator>(V);
2273 if (!I) return false;
2275 // Check if the nsz fast-math flag is set
2276 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2277 if (FPO->hasNoSignedZeros())
2280 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2281 if (I->getOpcode() == Instruction::FAdd)
2282 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2283 if (CFP->isNullValue())
2286 // sitofp and uitofp turn into +0.0 for zero.
2287 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2290 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2291 // sqrt(-0.0) = -0.0, no other negative results are possible.
2292 if (II->getIntrinsicID() == Intrinsic::sqrt)
2293 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
2295 if (const CallInst *CI = dyn_cast<CallInst>(I))
2296 if (const Function *F = CI->getCalledFunction()) {
2297 if (F->isDeclaration()) {
2299 if (F->getName() == "abs") return true;
2300 // fabs[lf](x) != -0.0
2301 if (F->getName() == "fabs") return true;
2302 if (F->getName() == "fabsf") return true;
2303 if (F->getName() == "fabsl") return true;
2304 if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
2305 F->getName() == "sqrtl")
2306 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
2313 bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) {
2314 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2315 return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
2317 // FIXME: Magic number! At the least, this should be given a name because it's
2318 // used similarly in CannotBeNegativeZero(). A better fix may be to
2319 // expose it as a parameter, so it can be used for testing / experimenting.
2321 return false; // Limit search depth.
2323 const Operator *I = dyn_cast<Operator>(V);
2324 if (!I) return false;
2326 switch (I->getOpcode()) {
2328 case Instruction::FMul:
2329 // x*x is always non-negative or a NaN.
2330 if (I->getOperand(0) == I->getOperand(1))
2333 case Instruction::FAdd:
2334 case Instruction::FDiv:
2335 case Instruction::FRem:
2336 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) &&
2337 CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1);
2338 case Instruction::FPExt:
2339 case Instruction::FPTrunc:
2340 // Widening/narrowing never change sign.
2341 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
2342 case Instruction::Call:
2343 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2344 switch (II->getIntrinsicID()) {
2346 case Intrinsic::exp:
2347 case Intrinsic::exp2:
2348 case Intrinsic::fabs:
2349 case Intrinsic::sqrt:
2351 case Intrinsic::powi:
2352 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
2353 // powi(x,n) is non-negative if n is even.
2354 if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
2357 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
2358 case Intrinsic::fma:
2359 case Intrinsic::fmuladd:
2360 // x*x+y is non-negative if y is non-negative.
2361 return I->getOperand(0) == I->getOperand(1) &&
2362 CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1);
2369 /// If the specified value can be set by repeating the same byte in memory,
2370 /// return the i8 value that it is represented with. This is
2371 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2372 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2373 /// byte store (e.g. i16 0x1234), return null.
2374 Value *llvm::isBytewiseValue(Value *V) {
2375 // All byte-wide stores are splatable, even of arbitrary variables.
2376 if (V->getType()->isIntegerTy(8)) return V;
2378 // Handle 'null' ConstantArrayZero etc.
2379 if (Constant *C = dyn_cast<Constant>(V))
2380 if (C->isNullValue())
2381 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2383 // Constant float and double values can be handled as integer values if the
2384 // corresponding integer value is "byteable". An important case is 0.0.
2385 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2386 if (CFP->getType()->isFloatTy())
2387 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2388 if (CFP->getType()->isDoubleTy())
2389 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2390 // Don't handle long double formats, which have strange constraints.
2393 // We can handle constant integers that are multiple of 8 bits.
2394 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2395 if (CI->getBitWidth() % 8 == 0) {
2396 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2398 if (!CI->getValue().isSplat(8))
2400 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2404 // A ConstantDataArray/Vector is splatable if all its members are equal and
2406 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2407 Value *Elt = CA->getElementAsConstant(0);
2408 Value *Val = isBytewiseValue(Elt);
2412 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2413 if (CA->getElementAsConstant(I) != Elt)
2419 // Conceptually, we could handle things like:
2420 // %a = zext i8 %X to i16
2421 // %b = shl i16 %a, 8
2422 // %c = or i16 %a, %b
2423 // but until there is an example that actually needs this, it doesn't seem
2424 // worth worrying about.
2429 // This is the recursive version of BuildSubAggregate. It takes a few different
2430 // arguments. Idxs is the index within the nested struct From that we are
2431 // looking at now (which is of type IndexedType). IdxSkip is the number of
2432 // indices from Idxs that should be left out when inserting into the resulting
2433 // struct. To is the result struct built so far, new insertvalue instructions
2435 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2436 SmallVectorImpl<unsigned> &Idxs,
2438 Instruction *InsertBefore) {
2439 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2441 // Save the original To argument so we can modify it
2443 // General case, the type indexed by Idxs is a struct
2444 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2445 // Process each struct element recursively
2448 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2452 // Couldn't find any inserted value for this index? Cleanup
2453 while (PrevTo != OrigTo) {
2454 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2455 PrevTo = Del->getAggregateOperand();
2456 Del->eraseFromParent();
2458 // Stop processing elements
2462 // If we successfully found a value for each of our subaggregates
2466 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2467 // the struct's elements had a value that was inserted directly. In the latter
2468 // case, perhaps we can't determine each of the subelements individually, but
2469 // we might be able to find the complete struct somewhere.
2471 // Find the value that is at that particular spot
2472 Value *V = FindInsertedValue(From, Idxs);
2477 // Insert the value in the new (sub) aggregrate
2478 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2479 "tmp", InsertBefore);
2482 // This helper takes a nested struct and extracts a part of it (which is again a
2483 // struct) into a new value. For example, given the struct:
2484 // { a, { b, { c, d }, e } }
2485 // and the indices "1, 1" this returns
2488 // It does this by inserting an insertvalue for each element in the resulting
2489 // struct, as opposed to just inserting a single struct. This will only work if
2490 // each of the elements of the substruct are known (ie, inserted into From by an
2491 // insertvalue instruction somewhere).
2493 // All inserted insertvalue instructions are inserted before InsertBefore
2494 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2495 Instruction *InsertBefore) {
2496 assert(InsertBefore && "Must have someplace to insert!");
2497 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2499 Value *To = UndefValue::get(IndexedType);
2500 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2501 unsigned IdxSkip = Idxs.size();
2503 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2506 /// Given an aggregrate and an sequence of indices, see if
2507 /// the scalar value indexed is already around as a register, for example if it
2508 /// were inserted directly into the aggregrate.
2510 /// If InsertBefore is not null, this function will duplicate (modified)
2511 /// insertvalues when a part of a nested struct is extracted.
2512 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2513 Instruction *InsertBefore) {
2514 // Nothing to index? Just return V then (this is useful at the end of our
2516 if (idx_range.empty())
2518 // We have indices, so V should have an indexable type.
2519 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2520 "Not looking at a struct or array?");
2521 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2522 "Invalid indices for type?");
2524 if (Constant *C = dyn_cast<Constant>(V)) {
2525 C = C->getAggregateElement(idx_range[0]);
2526 if (!C) return nullptr;
2527 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2530 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2531 // Loop the indices for the insertvalue instruction in parallel with the
2532 // requested indices
2533 const unsigned *req_idx = idx_range.begin();
2534 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2535 i != e; ++i, ++req_idx) {
2536 if (req_idx == idx_range.end()) {
2537 // We can't handle this without inserting insertvalues
2541 // The requested index identifies a part of a nested aggregate. Handle
2542 // this specially. For example,
2543 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2544 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2545 // %C = extractvalue {i32, { i32, i32 } } %B, 1
2546 // This can be changed into
2547 // %A = insertvalue {i32, i32 } undef, i32 10, 0
2548 // %C = insertvalue {i32, i32 } %A, i32 11, 1
2549 // which allows the unused 0,0 element from the nested struct to be
2551 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2555 // This insert value inserts something else than what we are looking for.
2556 // See if the (aggregate) value inserted into has the value we are
2557 // looking for, then.
2559 return FindInsertedValue(I->getAggregateOperand(), idx_range,
2562 // If we end up here, the indices of the insertvalue match with those
2563 // requested (though possibly only partially). Now we recursively look at
2564 // the inserted value, passing any remaining indices.
2565 return FindInsertedValue(I->getInsertedValueOperand(),
2566 makeArrayRef(req_idx, idx_range.end()),
2570 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2571 // If we're extracting a value from an aggregate that was extracted from
2572 // something else, we can extract from that something else directly instead.
2573 // However, we will need to chain I's indices with the requested indices.
2575 // Calculate the number of indices required
2576 unsigned size = I->getNumIndices() + idx_range.size();
2577 // Allocate some space to put the new indices in
2578 SmallVector<unsigned, 5> Idxs;
2580 // Add indices from the extract value instruction
2581 Idxs.append(I->idx_begin(), I->idx_end());
2583 // Add requested indices
2584 Idxs.append(idx_range.begin(), idx_range.end());
2586 assert(Idxs.size() == size
2587 && "Number of indices added not correct?");
2589 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2591 // Otherwise, we don't know (such as, extracting from a function return value
2592 // or load instruction)
2596 /// Analyze the specified pointer to see if it can be expressed as a base
2597 /// pointer plus a constant offset. Return the base and offset to the caller.
2598 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2599 const DataLayout &DL) {
2600 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2601 APInt ByteOffset(BitWidth, 0);
2603 if (Ptr->getType()->isVectorTy())
2606 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2607 APInt GEPOffset(BitWidth, 0);
2608 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2611 ByteOffset += GEPOffset;
2613 Ptr = GEP->getPointerOperand();
2614 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2615 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2616 Ptr = cast<Operator>(Ptr)->getOperand(0);
2617 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2618 if (GA->mayBeOverridden())
2620 Ptr = GA->getAliasee();
2625 Offset = ByteOffset.getSExtValue();
2630 /// This function computes the length of a null-terminated C string pointed to
2631 /// by V. If successful, it returns true and returns the string in Str.
2632 /// If unsuccessful, it returns false.
2633 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
2634 uint64_t Offset, bool TrimAtNul) {
2637 // Look through bitcast instructions and geps.
2638 V = V->stripPointerCasts();
2640 // If the value is a GEP instruction or constant expression, treat it as an
2642 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2643 // Make sure the GEP has exactly three arguments.
2644 if (GEP->getNumOperands() != 3)
2647 // Make sure the index-ee is a pointer to array of i8.
2648 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
2649 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
2650 if (!AT || !AT->getElementType()->isIntegerTy(8))
2653 // Check to make sure that the first operand of the GEP is an integer and
2654 // has value 0 so that we are sure we're indexing into the initializer.
2655 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2656 if (!FirstIdx || !FirstIdx->isZero())
2659 // If the second index isn't a ConstantInt, then this is a variable index
2660 // into the array. If this occurs, we can't say anything meaningful about
2662 uint64_t StartIdx = 0;
2663 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
2664 StartIdx = CI->getZExtValue();
2667 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
2671 // The GEP instruction, constant or instruction, must reference a global
2672 // variable that is a constant and is initialized. The referenced constant
2673 // initializer is the array that we'll use for optimization.
2674 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2675 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
2678 // Handle the all-zeros case
2679 if (GV->getInitializer()->isNullValue()) {
2680 // This is a degenerate case. The initializer is constant zero so the
2681 // length of the string must be zero.
2686 // Must be a Constant Array
2687 const ConstantDataArray *Array =
2688 dyn_cast<ConstantDataArray>(GV->getInitializer());
2689 if (!Array || !Array->isString())
2692 // Get the number of elements in the array
2693 uint64_t NumElts = Array->getType()->getArrayNumElements();
2695 // Start out with the entire array in the StringRef.
2696 Str = Array->getAsString();
2698 if (Offset > NumElts)
2701 // Skip over 'offset' bytes.
2702 Str = Str.substr(Offset);
2705 // Trim off the \0 and anything after it. If the array is not nul
2706 // terminated, we just return the whole end of string. The client may know
2707 // some other way that the string is length-bound.
2708 Str = Str.substr(0, Str.find('\0'));
2713 // These next two are very similar to the above, but also look through PHI
2715 // TODO: See if we can integrate these two together.
2717 /// If we can compute the length of the string pointed to by
2718 /// the specified pointer, return 'len+1'. If we can't, return 0.
2719 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
2720 // Look through noop bitcast instructions.
2721 V = V->stripPointerCasts();
2723 // If this is a PHI node, there are two cases: either we have already seen it
2725 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2726 if (!PHIs.insert(PN).second)
2727 return ~0ULL; // already in the set.
2729 // If it was new, see if all the input strings are the same length.
2730 uint64_t LenSoFar = ~0ULL;
2731 for (Value *IncValue : PN->incoming_values()) {
2732 uint64_t Len = GetStringLengthH(IncValue, PHIs);
2733 if (Len == 0) return 0; // Unknown length -> unknown.
2735 if (Len == ~0ULL) continue;
2737 if (Len != LenSoFar && LenSoFar != ~0ULL)
2738 return 0; // Disagree -> unknown.
2742 // Success, all agree.
2746 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
2747 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
2748 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
2749 if (Len1 == 0) return 0;
2750 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
2751 if (Len2 == 0) return 0;
2752 if (Len1 == ~0ULL) return Len2;
2753 if (Len2 == ~0ULL) return Len1;
2754 if (Len1 != Len2) return 0;
2758 // Otherwise, see if we can read the string.
2760 if (!getConstantStringInfo(V, StrData))
2763 return StrData.size()+1;
2766 /// If we can compute the length of the string pointed to by
2767 /// the specified pointer, return 'len+1'. If we can't, return 0.
2768 uint64_t llvm::GetStringLength(Value *V) {
2769 if (!V->getType()->isPointerTy()) return 0;
2771 SmallPtrSet<PHINode*, 32> PHIs;
2772 uint64_t Len = GetStringLengthH(V, PHIs);
2773 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
2774 // an empty string as a length.
2775 return Len == ~0ULL ? 1 : Len;
2778 /// \brief \p PN defines a loop-variant pointer to an object. Check if the
2779 /// previous iteration of the loop was referring to the same object as \p PN.
2780 static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) {
2781 // Find the loop-defined value.
2782 Loop *L = LI->getLoopFor(PN->getParent());
2783 if (PN->getNumIncomingValues() != 2)
2786 // Find the value from previous iteration.
2787 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
2788 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
2789 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
2790 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
2793 // If a new pointer is loaded in the loop, the pointer references a different
2794 // object in every iteration. E.g.:
2798 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
2799 if (!L->isLoopInvariant(Load->getPointerOperand()))
2804 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
2805 unsigned MaxLookup) {
2806 if (!V->getType()->isPointerTy())
2808 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
2809 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2810 V = GEP->getPointerOperand();
2811 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
2812 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
2813 V = cast<Operator>(V)->getOperand(0);
2814 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2815 if (GA->mayBeOverridden())
2817 V = GA->getAliasee();
2819 // See if InstructionSimplify knows any relevant tricks.
2820 if (Instruction *I = dyn_cast<Instruction>(V))
2821 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
2822 if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
2829 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
2834 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
2835 const DataLayout &DL, LoopInfo *LI,
2836 unsigned MaxLookup) {
2837 SmallPtrSet<Value *, 4> Visited;
2838 SmallVector<Value *, 4> Worklist;
2839 Worklist.push_back(V);
2841 Value *P = Worklist.pop_back_val();
2842 P = GetUnderlyingObject(P, DL, MaxLookup);
2844 if (!Visited.insert(P).second)
2847 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
2848 Worklist.push_back(SI->getTrueValue());
2849 Worklist.push_back(SI->getFalseValue());
2853 if (PHINode *PN = dyn_cast<PHINode>(P)) {
2854 // If this PHI changes the underlying object in every iteration of the
2855 // loop, don't look through it. Consider:
2858 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
2862 // Prev is tracking Curr one iteration behind so they refer to different
2863 // underlying objects.
2864 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
2865 isSameUnderlyingObjectInLoop(PN, LI))
2866 for (Value *IncValue : PN->incoming_values())
2867 Worklist.push_back(IncValue);
2871 Objects.push_back(P);
2872 } while (!Worklist.empty());
2875 /// Return true if the only users of this pointer are lifetime markers.
2876 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
2877 for (const User *U : V->users()) {
2878 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2879 if (!II) return false;
2881 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
2882 II->getIntrinsicID() != Intrinsic::lifetime_end)
2888 static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset,
2889 Type *Ty, const DataLayout &DL,
2890 const Instruction *CtxI,
2891 const DominatorTree *DT,
2892 const TargetLibraryInfo *TLI) {
2893 assert(Offset.isNonNegative() && "offset can't be negative");
2894 assert(Ty->isSized() && "must be sized");
2896 APInt DerefBytes(Offset.getBitWidth(), 0);
2897 bool CheckForNonNull = false;
2898 if (const Argument *A = dyn_cast<Argument>(BV)) {
2899 DerefBytes = A->getDereferenceableBytes();
2900 if (!DerefBytes.getBoolValue()) {
2901 DerefBytes = A->getDereferenceableOrNullBytes();
2902 CheckForNonNull = true;
2904 } else if (auto CS = ImmutableCallSite(BV)) {
2905 DerefBytes = CS.getDereferenceableBytes(0);
2906 if (!DerefBytes.getBoolValue()) {
2907 DerefBytes = CS.getDereferenceableOrNullBytes(0);
2908 CheckForNonNull = true;
2910 } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) {
2911 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
2912 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
2913 DerefBytes = CI->getLimitedValue();
2915 if (!DerefBytes.getBoolValue()) {
2917 LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
2918 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
2919 DerefBytes = CI->getLimitedValue();
2921 CheckForNonNull = true;
2925 if (DerefBytes.getBoolValue())
2926 if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
2927 if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI))
2933 static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL,
2934 const Instruction *CtxI,
2935 const DominatorTree *DT,
2936 const TargetLibraryInfo *TLI) {
2937 Type *VTy = V->getType();
2938 Type *Ty = VTy->getPointerElementType();
2942 APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
2943 return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI);
2946 static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
2947 const DataLayout &DL) {
2948 APInt BaseAlign(Offset.getBitWidth(), 0);
2949 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base))
2950 BaseAlign = AI->getAlignment();
2951 else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base))
2952 BaseAlign = GV->getAlignment();
2953 else if (const Argument *A = dyn_cast<Argument>(Base))
2954 BaseAlign = A->getParamAlignment();
2957 Type *Ty = Base->getType()->getPointerElementType();
2958 BaseAlign = DL.getABITypeAlignment(Ty);
2961 APInt Alignment(Offset.getBitWidth(), Align);
2963 assert(Alignment.isPowerOf2() && "must be a power of 2!");
2964 return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
2967 static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
2968 APInt Offset(DL.getTypeStoreSizeInBits(Base->getType()), 0);
2969 return isAligned(Base, Offset, Align, DL);
2972 /// Test if V is always a pointer to allocated and suitably aligned memory for
2973 /// a simple load or store.
2974 static bool isDereferenceableAndAlignedPointer(
2975 const Value *V, unsigned Align, const DataLayout &DL,
2976 const Instruction *CtxI, const DominatorTree *DT,
2977 const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
2978 // Note that it is not safe to speculate into a malloc'd region because
2979 // malloc may return null.
2981 // These are obviously ok if aligned.
2982 if (isa<AllocaInst>(V))
2983 return isAligned(V, Align, DL);
2985 // It's not always safe to follow a bitcast, for example:
2986 // bitcast i8* (alloca i8) to i32*
2987 // would result in a 4-byte load from a 1-byte alloca. However,
2988 // if we're casting from a pointer from a type of larger size
2989 // to a type of smaller size (or the same size), and the alignment
2990 // is at least as large as for the resulting pointer type, then
2991 // we can look through the bitcast.
2992 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
2993 Type *STy = BC->getSrcTy()->getPointerElementType(),
2994 *DTy = BC->getDestTy()->getPointerElementType();
2995 if (STy->isSized() && DTy->isSized() &&
2996 (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
2997 (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
2998 return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL,
2999 CtxI, DT, TLI, Visited);
3002 // Global variables which can't collapse to null are ok.
3003 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
3004 if (!GV->hasExternalWeakLinkage())
3005 return isAligned(V, Align, DL);
3007 // byval arguments are okay.
3008 if (const Argument *A = dyn_cast<Argument>(V))
3009 if (A->hasByValAttr())
3010 return isAligned(V, Align, DL);
3012 if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI))
3013 return isAligned(V, Align, DL);
3015 // For GEPs, determine if the indexing lands within the allocated object.
3016 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3017 Type *VTy = GEP->getType();
3018 Type *Ty = VTy->getPointerElementType();
3019 const Value *Base = GEP->getPointerOperand();
3021 // Conservatively require that the base pointer be fully dereferenceable
3023 if (!Visited.insert(Base).second)
3025 if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI,
3029 APInt Offset(DL.getPointerTypeSizeInBits(VTy), 0);
3030 if (!GEP->accumulateConstantOffset(DL, Offset))
3033 // Check if the load is within the bounds of the underlying object
3034 // and offset is aligned.
3035 uint64_t LoadSize = DL.getTypeStoreSize(Ty);
3036 Type *BaseType = Base->getType()->getPointerElementType();
3037 assert(isPowerOf2_32(Align) && "must be a power of 2!");
3038 return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) &&
3039 !(Offset & APInt(Offset.getBitWidth(), Align-1));
3042 // For gc.relocate, look through relocations
3043 if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V))
3044 if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) {
3045 GCRelocateOperands RelocateInst(I);
3046 return isDereferenceableAndAlignedPointer(
3047 RelocateInst.getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);
3050 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
3051 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL,
3052 CtxI, DT, TLI, Visited);
3054 // If we don't know, assume the worst.
3058 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
3059 const DataLayout &DL,
3060 const Instruction *CtxI,
3061 const DominatorTree *DT,
3062 const TargetLibraryInfo *TLI) {
3063 // When dereferenceability information is provided by a dereferenceable
3064 // attribute, we know exactly how many bytes are dereferenceable. If we can
3065 // determine the exact offset to the attributed variable, we can use that
3066 // information here.
3067 Type *VTy = V->getType();
3068 Type *Ty = VTy->getPointerElementType();
3070 // Require ABI alignment for loads without alignment specification
3072 Align = DL.getABITypeAlignment(Ty);
3074 if (Ty->isSized()) {
3075 APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
3076 const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3078 if (Offset.isNonNegative())
3079 if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) &&
3080 isAligned(BV, Offset, Align, DL))
3084 SmallPtrSet<const Value *, 32> Visited;
3085 return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI,
3089 bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
3090 const Instruction *CtxI,
3091 const DominatorTree *DT,
3092 const TargetLibraryInfo *TLI) {
3093 return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
3096 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3097 const Instruction *CtxI,
3098 const DominatorTree *DT,
3099 const TargetLibraryInfo *TLI) {
3100 const Operator *Inst = dyn_cast<Operator>(V);
3104 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3105 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3109 switch (Inst->getOpcode()) {
3112 case Instruction::UDiv:
3113 case Instruction::URem: {
3114 // x / y is undefined if y == 0.
3116 if (match(Inst->getOperand(1), m_APInt(V)))
3120 case Instruction::SDiv:
3121 case Instruction::SRem: {
3122 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3123 const APInt *Numerator, *Denominator;
3124 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3126 // We cannot hoist this division if the denominator is 0.
3127 if (*Denominator == 0)
3129 // It's safe to hoist if the denominator is not 0 or -1.
3130 if (*Denominator != -1)
3132 // At this point we know that the denominator is -1. It is safe to hoist as
3133 // long we know that the numerator is not INT_MIN.
3134 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3135 return !Numerator->isMinSignedValue();
3136 // The numerator *might* be MinSignedValue.
3139 case Instruction::Load: {
3140 const LoadInst *LI = cast<LoadInst>(Inst);
3141 if (!LI->isUnordered() ||
3142 // Speculative load may create a race that did not exist in the source.
3143 LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
3145 const DataLayout &DL = LI->getModule()->getDataLayout();
3146 return isDereferenceableAndAlignedPointer(
3147 LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
3149 case Instruction::Call: {
3150 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
3151 switch (II->getIntrinsicID()) {
3152 // These synthetic intrinsics have no side-effects and just mark
3153 // information about their operands.
3154 // FIXME: There are other no-op synthetic instructions that potentially
3155 // should be considered at least *safe* to speculate...
3156 case Intrinsic::dbg_declare:
3157 case Intrinsic::dbg_value:
3160 case Intrinsic::bswap:
3161 case Intrinsic::ctlz:
3162 case Intrinsic::ctpop:
3163 case Intrinsic::cttz:
3164 case Intrinsic::objectsize:
3165 case Intrinsic::sadd_with_overflow:
3166 case Intrinsic::smul_with_overflow:
3167 case Intrinsic::ssub_with_overflow:
3168 case Intrinsic::uadd_with_overflow:
3169 case Intrinsic::umul_with_overflow:
3170 case Intrinsic::usub_with_overflow:
3172 // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
3173 // errno like libm sqrt would.
3174 case Intrinsic::sqrt:
3175 case Intrinsic::fma:
3176 case Intrinsic::fmuladd:
3177 case Intrinsic::fabs:
3178 case Intrinsic::minnum:
3179 case Intrinsic::maxnum:
3181 // TODO: some fp intrinsics are marked as having the same error handling
3182 // as libm. They're safe to speculate when they won't error.
3183 // TODO: are convert_{from,to}_fp16 safe?
3184 // TODO: can we list target-specific intrinsics here?
3188 return false; // The called function could have undefined behavior or
3189 // side-effects, even if marked readnone nounwind.
3191 case Instruction::VAArg:
3192 case Instruction::Alloca:
3193 case Instruction::Invoke:
3194 case Instruction::PHI:
3195 case Instruction::Store:
3196 case Instruction::Ret:
3197 case Instruction::Br:
3198 case Instruction::IndirectBr:
3199 case Instruction::Switch:
3200 case Instruction::Unreachable:
3201 case Instruction::Fence:
3202 case Instruction::AtomicRMW:
3203 case Instruction::AtomicCmpXchg:
3204 case Instruction::LandingPad:
3205 case Instruction::Resume:
3206 case Instruction::CatchPad:
3207 case Instruction::CatchEndPad:
3208 case Instruction::CatchRet:
3209 case Instruction::CleanupPad:
3210 case Instruction::CleanupEndPad:
3211 case Instruction::CleanupRet:
3212 case Instruction::TerminatePad:
3213 return false; // Misc instructions which have effects
3217 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3218 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3221 /// Return true if we know that the specified value is never null.
3222 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
3223 // Alloca never returns null, malloc might.
3224 if (isa<AllocaInst>(V)) return true;
3226 // A byval, inalloca, or nonnull argument is never null.
3227 if (const Argument *A = dyn_cast<Argument>(V))
3228 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3230 // A global variable in address space 0 is non null unless extern weak.
3231 // Other address spaces may have null as a valid address for a global,
3232 // so we can't assume anything.
3233 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3234 return !GV->hasExternalWeakLinkage() &&
3235 GV->getType()->getAddressSpace() == 0;
3237 // A Load tagged w/nonnull metadata is never null.
3238 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3239 return LI->getMetadata(LLVMContext::MD_nonnull);
3241 if (auto CS = ImmutableCallSite(V))
3242 if (CS.isReturnNonNull())
3245 // operator new never returns null.
3246 if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))
3252 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3253 const Instruction *CtxI,
3254 const DominatorTree *DT) {
3255 unsigned NumUsesExplored = 0;
3256 for (auto U : V->users()) {
3257 // Avoid massive lists
3258 if (NumUsesExplored >= DomConditionsMaxUses)
3261 // Consider only compare instructions uniquely controlling a branch
3262 const ICmpInst *Cmp = dyn_cast<ICmpInst>(U);
3266 if (DomConditionsSingleCmpUse && !Cmp->hasOneUse())
3269 for (auto *CmpU : Cmp->users()) {
3270 const BranchInst *BI = dyn_cast<BranchInst>(CmpU);
3274 assert(BI->isConditional() && "uses a comparison!");
3276 BasicBlock *NonNullSuccessor = nullptr;
3277 CmpInst::Predicate Pred;
3279 if (match(const_cast<ICmpInst*>(Cmp),
3280 m_c_ICmp(Pred, m_Specific(V), m_Zero()))) {
3281 if (Pred == ICmpInst::ICMP_EQ)
3282 NonNullSuccessor = BI->getSuccessor(1);
3283 else if (Pred == ICmpInst::ICMP_NE)
3284 NonNullSuccessor = BI->getSuccessor(0);
3287 if (NonNullSuccessor) {
3288 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3289 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3298 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3299 const DominatorTree *DT, const TargetLibraryInfo *TLI) {
3300 if (isKnownNonNull(V, TLI))
3303 return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
3306 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
3307 const DataLayout &DL,
3308 AssumptionCache *AC,
3309 const Instruction *CxtI,
3310 const DominatorTree *DT) {
3311 // Multiplying n * m significant bits yields a result of n + m significant
3312 // bits. If the total number of significant bits does not exceed the
3313 // result bit width (minus 1), there is no overflow.
3314 // This means if we have enough leading zero bits in the operands
3315 // we can guarantee that the result does not overflow.
3316 // Ref: "Hacker's Delight" by Henry Warren
3317 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3318 APInt LHSKnownZero(BitWidth, 0);
3319 APInt LHSKnownOne(BitWidth, 0);
3320 APInt RHSKnownZero(BitWidth, 0);
3321 APInt RHSKnownOne(BitWidth, 0);
3322 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3324 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3326 // Note that underestimating the number of zero bits gives a more
3327 // conservative answer.
3328 unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3329 RHSKnownZero.countLeadingOnes();
3330 // First handle the easy case: if we have enough zero bits there's
3331 // definitely no overflow.
3332 if (ZeroBits >= BitWidth)
3333 return OverflowResult::NeverOverflows;
3335 // Get the largest possible values for each operand.
3336 APInt LHSMax = ~LHSKnownZero;
3337 APInt RHSMax = ~RHSKnownZero;
3339 // We know the multiply operation doesn't overflow if the maximum values for
3340 // each operand will not overflow after we multiply them together.
3342 LHSMax.umul_ov(RHSMax, MaxOverflow);
3344 return OverflowResult::NeverOverflows;
3346 // We know it always overflows if multiplying the smallest possible values for
3347 // the operands also results in overflow.
3349 LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3351 return OverflowResult::AlwaysOverflows;
3353 return OverflowResult::MayOverflow;
3356 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
3357 const DataLayout &DL,
3358 AssumptionCache *AC,
3359 const Instruction *CxtI,
3360 const DominatorTree *DT) {
3361 bool LHSKnownNonNegative, LHSKnownNegative;
3362 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3364 if (LHSKnownNonNegative || LHSKnownNegative) {
3365 bool RHSKnownNonNegative, RHSKnownNegative;
3366 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3369 if (LHSKnownNegative && RHSKnownNegative) {
3370 // The sign bit is set in both cases: this MUST overflow.
3371 // Create a simple add instruction, and insert it into the struct.
3372 return OverflowResult::AlwaysOverflows;
3375 if (LHSKnownNonNegative && RHSKnownNonNegative) {
3376 // The sign bit is clear in both cases: this CANNOT overflow.
3377 // Create a simple add instruction, and insert it into the struct.
3378 return OverflowResult::NeverOverflows;
3382 return OverflowResult::MayOverflow;
3385 static OverflowResult computeOverflowForSignedAdd(
3386 Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
3387 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
3388 if (Add && Add->hasNoSignedWrap()) {
3389 return OverflowResult::NeverOverflows;
3392 bool LHSKnownNonNegative, LHSKnownNegative;
3393 bool RHSKnownNonNegative, RHSKnownNegative;
3394 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3396 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3399 if ((LHSKnownNonNegative && RHSKnownNegative) ||
3400 (LHSKnownNegative && RHSKnownNonNegative)) {
3401 // The sign bits are opposite: this CANNOT overflow.
3402 return OverflowResult::NeverOverflows;
3405 // The remaining code needs Add to be available. Early returns if not so.
3407 return OverflowResult::MayOverflow;
3409 // If the sign of Add is the same as at least one of the operands, this add
3410 // CANNOT overflow. This is particularly useful when the sum is
3411 // @llvm.assume'ed non-negative rather than proved so from analyzing its
3413 bool LHSOrRHSKnownNonNegative =
3414 (LHSKnownNonNegative || RHSKnownNonNegative);
3415 bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3416 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3417 bool AddKnownNonNegative, AddKnownNegative;
3418 ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3419 /*Depth=*/0, AC, CxtI, DT);
3420 if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3421 (AddKnownNegative && LHSOrRHSKnownNegative)) {
3422 return OverflowResult::NeverOverflows;
3426 return OverflowResult::MayOverflow;
3429 OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
3430 const DataLayout &DL,
3431 AssumptionCache *AC,
3432 const Instruction *CxtI,
3433 const DominatorTree *DT) {
3434 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3435 Add, DL, AC, CxtI, DT);
3438 OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
3439 const DataLayout &DL,
3440 AssumptionCache *AC,
3441 const Instruction *CxtI,
3442 const DominatorTree *DT) {
3443 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3446 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3447 // FIXME: This conservative implementation can be relaxed. E.g. most
3448 // atomic operations are guaranteed to terminate on most platforms
3449 // and most functions terminate.
3451 return !I->isAtomic() && // atomics may never succeed on some platforms
3452 !isa<CallInst>(I) && // could throw and might not terminate
3453 !isa<InvokeInst>(I) && // might not terminate and could throw to
3454 // non-successor (see bug 24185 for details).
3455 !isa<ResumeInst>(I) && // has no successors
3456 !isa<ReturnInst>(I); // has no successors
3459 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3461 // The loop header is guaranteed to be executed for every iteration.
3463 // FIXME: Relax this constraint to cover all basic blocks that are
3464 // guaranteed to be executed at every iteration.
3465 if (I->getParent() != L->getHeader()) return false;
3467 for (const Instruction &LI : *L->getHeader()) {
3468 if (&LI == I) return true;
3469 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3471 llvm_unreachable("Instruction not contained in its own parent basic block.");
3474 bool llvm::propagatesFullPoison(const Instruction *I) {
3475 switch (I->getOpcode()) {
3476 case Instruction::Add:
3477 case Instruction::Sub:
3478 case Instruction::Xor:
3479 case Instruction::Trunc:
3480 case Instruction::BitCast:
3481 case Instruction::AddrSpaceCast:
3482 // These operations all propagate poison unconditionally. Note that poison
3483 // is not any particular value, so xor or subtraction of poison with
3484 // itself still yields poison, not zero.
3487 case Instruction::AShr:
3488 case Instruction::SExt:
3489 // For these operations, one bit of the input is replicated across
3490 // multiple output bits. A replicated poison bit is still poison.
3493 case Instruction::Shl: {
3494 // Left shift *by* a poison value is poison. The number of
3495 // positions to shift is unsigned, so no negative values are
3496 // possible there. Left shift by zero places preserves poison. So
3497 // it only remains to consider left shift of poison by a positive
3498 // number of places.
3500 // A left shift by a positive number of places leaves the lowest order bit
3501 // non-poisoned. However, if such a shift has a no-wrap flag, then we can
3502 // make the poison operand violate that flag, yielding a fresh full-poison
3504 auto *OBO = cast<OverflowingBinaryOperator>(I);
3505 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
3508 case Instruction::Mul: {
3509 // A multiplication by zero yields a non-poison zero result, so we need to
3510 // rule out zero as an operand. Conservatively, multiplication by a
3511 // non-zero constant is not multiplication by zero.
3513 // Multiplication by a non-zero constant can leave some bits
3514 // non-poisoned. For example, a multiplication by 2 leaves the lowest
3515 // order bit unpoisoned. So we need to consider that.
3517 // Multiplication by 1 preserves poison. If the multiplication has a
3518 // no-wrap flag, then we can make the poison operand violate that flag
3519 // when multiplied by any integer other than 0 and 1.
3520 auto *OBO = cast<OverflowingBinaryOperator>(I);
3521 if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
3522 for (Value *V : OBO->operands()) {
3523 if (auto *CI = dyn_cast<ConstantInt>(V)) {
3524 // A ConstantInt cannot yield poison, so we can assume that it is
3525 // the other operand that is poison.
3526 return !CI->isZero();
3533 case Instruction::GetElementPtr:
3534 // A GEP implicitly represents a sequence of additions, subtractions,
3535 // truncations, sign extensions and multiplications. The multiplications
3536 // are by the non-zero sizes of some set of types, so we do not have to be
3537 // concerned with multiplication by zero. If the GEP is in-bounds, then
3538 // these operations are implicitly no-signed-wrap so poison is propagated
3539 // by the arguments above for Add, Sub, Trunc, SExt and Mul.
3540 return cast<GEPOperator>(I)->isInBounds();
3547 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3548 switch (I->getOpcode()) {
3549 case Instruction::Store:
3550 return cast<StoreInst>(I)->getPointerOperand();
3552 case Instruction::Load:
3553 return cast<LoadInst>(I)->getPointerOperand();
3555 case Instruction::AtomicCmpXchg:
3556 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3558 case Instruction::AtomicRMW:
3559 return cast<AtomicRMWInst>(I)->getPointerOperand();
3561 case Instruction::UDiv:
3562 case Instruction::SDiv:
3563 case Instruction::URem:
3564 case Instruction::SRem:
3565 return I->getOperand(1);
3572 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3573 // We currently only look for uses of poison values within the same basic
3574 // block, as that makes it easier to guarantee that the uses will be
3575 // executed given that PoisonI is executed.
3577 // FIXME: Expand this to consider uses beyond the same basic block. To do
3578 // this, look out for the distinction between post-dominance and strong
3580 const BasicBlock *BB = PoisonI->getParent();
3582 // Set of instructions that we have proved will yield poison if PoisonI
3584 SmallSet<const Value *, 16> YieldsPoison;
3585 YieldsPoison.insert(PoisonI);
3587 for (const Instruction *I = PoisonI, *E = BB->end(); I != E;
3588 I = I->getNextNode()) {
3590 const Value *NotPoison = getGuaranteedNonFullPoisonOp(I);
3591 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
3592 if (!isGuaranteedToTransferExecutionToSuccessor(I)) return false;
3595 // Mark poison that propagates from I through uses of I.
3596 if (YieldsPoison.count(I)) {
3597 for (const User *User : I->users()) {
3598 const Instruction *UserI = cast<Instruction>(User);
3599 if (UserI->getParent() == BB && propagatesFullPoison(UserI))
3600 YieldsPoison.insert(User);
3607 static bool isKnownNonNaN(Value *V, FastMathFlags FMF) {
3611 if (auto *C = dyn_cast<ConstantFP>(V))
3616 static bool isKnownNonZero(Value *V) {
3617 if (auto *C = dyn_cast<ConstantFP>(V))
3618 return !C->isZero();
3622 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
3624 Value *CmpLHS, Value *CmpRHS,
3625 Value *TrueVal, Value *FalseVal,
3626 Value *&LHS, Value *&RHS) {
3630 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may
3631 // return inconsistent results between implementations.
3632 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
3633 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
3634 // Therefore we behave conservatively and only proceed if at least one of the
3635 // operands is known to not be zero, or if we don't care about signed zeroes.
3638 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
3639 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
3640 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
3641 !isKnownNonZero(CmpRHS))
3642 return {SPF_UNKNOWN, SPNB_NA, false};
3645 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
3646 bool Ordered = false;
3648 // When given one NaN and one non-NaN input:
3649 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
3650 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
3651 // ordered comparison fails), which could be NaN or non-NaN.
3652 // so here we discover exactly what NaN behavior is required/accepted.
3653 if (CmpInst::isFPPredicate(Pred)) {
3654 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
3655 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
3657 if (LHSSafe && RHSSafe) {
3658 // Both operands are known non-NaN.
3659 NaNBehavior = SPNB_RETURNS_ANY;
3660 } else if (CmpInst::isOrdered(Pred)) {
3661 // An ordered comparison will return false when given a NaN, so it
3665 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
3666 NaNBehavior = SPNB_RETURNS_NAN;
3668 NaNBehavior = SPNB_RETURNS_OTHER;
3670 // Completely unsafe.
3671 return {SPF_UNKNOWN, SPNB_NA, false};
3674 // An unordered comparison will return true when given a NaN, so it
3677 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
3678 NaNBehavior = SPNB_RETURNS_OTHER;
3680 NaNBehavior = SPNB_RETURNS_NAN;
3682 // Completely unsafe.
3683 return {SPF_UNKNOWN, SPNB_NA, false};
3687 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
3688 std::swap(CmpLHS, CmpRHS);
3689 Pred = CmpInst::getSwappedPredicate(Pred);
3690 if (NaNBehavior == SPNB_RETURNS_NAN)
3691 NaNBehavior = SPNB_RETURNS_OTHER;
3692 else if (NaNBehavior == SPNB_RETURNS_OTHER)
3693 NaNBehavior = SPNB_RETURNS_NAN;
3697 // ([if]cmp X, Y) ? X : Y
3698 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
3700 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
3701 case ICmpInst::ICMP_UGT:
3702 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
3703 case ICmpInst::ICMP_SGT:
3704 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
3705 case ICmpInst::ICMP_ULT:
3706 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
3707 case ICmpInst::ICMP_SLT:
3708 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
3709 case FCmpInst::FCMP_UGT:
3710 case FCmpInst::FCMP_UGE:
3711 case FCmpInst::FCMP_OGT:
3712 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
3713 case FCmpInst::FCMP_ULT:
3714 case FCmpInst::FCMP_ULE:
3715 case FCmpInst::FCMP_OLT:
3716 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
3720 if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
3721 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
3722 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
3724 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
3725 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
3726 if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
3727 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3730 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
3731 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
3732 if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
3733 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3737 // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C)
3738 if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) {
3739 if (C1->getType() == C2->getType() && ~C1->getValue() == C2->getValue() &&
3740 (match(TrueVal, m_Not(m_Specific(CmpLHS))) ||
3741 match(CmpLHS, m_Not(m_Specific(TrueVal))))) {
3744 return {SPF_SMIN, SPNB_NA, false};
3749 // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5)
3751 return {SPF_UNKNOWN, SPNB_NA, false};
3754 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
3755 Instruction::CastOps *CastOp) {
3756 CastInst *CI = dyn_cast<CastInst>(V1);
3757 Constant *C = dyn_cast<Constant>(V2);
3758 CastInst *CI2 = dyn_cast<CastInst>(V2);
3761 *CastOp = CI->getOpcode();
3764 // If V1 and V2 are both the same cast from the same type, we can look
3766 if (CI2->getOpcode() == CI->getOpcode() &&
3767 CI2->getSrcTy() == CI->getSrcTy())
3768 return CI2->getOperand(0);
3774 if (isa<SExtInst>(CI) && CmpI->isSigned()) {
3775 Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy());
3776 // This is only valid if the truncated value can be sign-extended
3777 // back to the original value.
3778 if (ConstantExpr::getSExt(T, C->getType()) == C)
3782 if (isa<ZExtInst>(CI) && CmpI->isUnsigned())
3783 return ConstantExpr::getTrunc(C, CI->getSrcTy());
3785 if (isa<TruncInst>(CI))
3786 return ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
3788 if (isa<FPToUIInst>(CI))
3789 return ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
3791 if (isa<FPToSIInst>(CI))
3792 return ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
3794 if (isa<UIToFPInst>(CI))
3795 return ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
3797 if (isa<SIToFPInst>(CI))
3798 return ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
3800 if (isa<FPTruncInst>(CI))
3801 return ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
3803 if (isa<FPExtInst>(CI))
3804 return ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
3809 SelectPatternResult llvm::matchSelectPattern(Value *V,
3810 Value *&LHS, Value *&RHS,
3811 Instruction::CastOps *CastOp) {
3812 SelectInst *SI = dyn_cast<SelectInst>(V);
3813 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
3815 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
3816 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
3818 CmpInst::Predicate Pred = CmpI->getPredicate();
3819 Value *CmpLHS = CmpI->getOperand(0);
3820 Value *CmpRHS = CmpI->getOperand(1);
3821 Value *TrueVal = SI->getTrueValue();
3822 Value *FalseVal = SI->getFalseValue();
3824 if (isa<FPMathOperator>(CmpI))
3825 FMF = CmpI->getFastMathFlags();
3828 if (CmpI->isEquality())
3829 return {SPF_UNKNOWN, SPNB_NA, false};
3831 // Deal with type mismatches.
3832 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
3833 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
3834 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3835 cast<CastInst>(TrueVal)->getOperand(0), C,
3837 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
3838 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
3839 C, cast<CastInst>(FalseVal)->getOperand(0),
3842 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,