1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/LoopInfo.h"
22 #include "llvm/IR/CallSite.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Dominators.h"
27 #include "llvm/IR/GetElementPtrTypeIterator.h"
28 #include "llvm/IR/GlobalAlias.h"
29 #include "llvm/IR/GlobalVariable.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/Metadata.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/IR/Statepoint.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/MathExtras.h"
41 using namespace llvm::PatternMatch;
43 const unsigned MaxDepth = 6;
45 /// Enable an experimental feature to leverage information about dominating
46 /// conditions to compute known bits. The individual options below control how
47 /// hard we search. The defaults are chosen to be fairly aggressive. If you
48 /// run into compile time problems when testing, scale them back and report
50 static cl::opt<bool> EnableDomConditions("value-tracking-dom-conditions",
51 cl::Hidden, cl::init(false));
53 // This is expensive, so we only do it for the top level query value.
54 // (TODO: evaluate cost vs profit, consider higher thresholds)
55 static cl::opt<unsigned> DomConditionsMaxDepth("dom-conditions-max-depth",
56 cl::Hidden, cl::init(1));
58 /// How many dominating blocks should be scanned looking for dominating
60 static cl::opt<unsigned> DomConditionsMaxDomBlocks("dom-conditions-dom-blocks",
64 // Controls the number of uses of the value searched for possible
65 // dominating comparisons.
66 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
67 cl::Hidden, cl::init(20));
69 // If true, don't consider only compares whose only use is a branch.
70 static cl::opt<bool> DomConditionsSingleCmpUse("dom-conditions-single-cmp-use",
71 cl::Hidden, cl::init(false));
73 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns
74 /// 0). For vector types, returns the element type's bitwidth.
75 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
76 if (unsigned BitWidth = Ty->getScalarSizeInBits())
79 return DL.getPointerTypeSizeInBits(Ty);
82 // Many of these functions have internal versions that take an assumption
83 // exclusion set. This is because of the potential for mutual recursion to
84 // cause computeKnownBits to repeatedly visit the same assume intrinsic. The
85 // classic case of this is assume(x = y), which will attempt to determine
86 // bits in x from bits in y, which will attempt to determine bits in y from
87 // bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
88 // isKnownNonZero, which calls computeKnownBits and ComputeSignBit and
89 // isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so on.
90 typedef SmallPtrSet<const Value *, 8> ExclInvsSet;
93 // Simplifying using an assume can only be done in a particular control-flow
94 // context (the context instruction provides that context). If an assume and
95 // the context instruction are not in the same block then the DT helps in
96 // figuring out if we can use it.
100 const Instruction *CxtI;
101 const DominatorTree *DT;
103 Query(AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr,
104 const DominatorTree *DT = nullptr)
105 : AC(AC), CxtI(CxtI), DT(DT) {}
107 Query(const Query &Q, const Value *NewExcl)
108 : ExclInvs(Q.ExclInvs), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT) {
109 ExclInvs.insert(NewExcl);
112 } // end anonymous namespace
114 // Given the provided Value and, potentially, a context instruction, return
115 // the preferred context instruction (if any).
116 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
117 // If we've been provided with a context instruction, then use that (provided
118 // it has been inserted).
119 if (CxtI && CxtI->getParent())
122 // If the value is really an already-inserted instruction, then use that.
123 CxtI = dyn_cast<Instruction>(V);
124 if (CxtI && CxtI->getParent())
130 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
131 const DataLayout &DL, unsigned Depth,
134 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
135 const DataLayout &DL, unsigned Depth,
136 AssumptionCache *AC, const Instruction *CxtI,
137 const DominatorTree *DT) {
138 ::computeKnownBits(V, KnownZero, KnownOne, DL, Depth,
139 Query(AC, safeCxtI(V, CxtI), DT));
142 bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
143 AssumptionCache *AC, const Instruction *CxtI,
144 const DominatorTree *DT) {
145 assert(LHS->getType() == RHS->getType() &&
146 "LHS and RHS should have the same type");
147 assert(LHS->getType()->isIntOrIntVectorTy() &&
148 "LHS and RHS should be integers");
149 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
150 APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0);
151 APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0);
152 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT);
153 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT);
154 return (LHSKnownZero | RHSKnownZero).isAllOnesValue();
157 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
158 const DataLayout &DL, unsigned Depth,
161 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
162 const DataLayout &DL, unsigned Depth,
163 AssumptionCache *AC, const Instruction *CxtI,
164 const DominatorTree *DT) {
165 ::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth,
166 Query(AC, safeCxtI(V, CxtI), DT));
169 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
170 const Query &Q, const DataLayout &DL);
172 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero,
173 unsigned Depth, AssumptionCache *AC,
174 const Instruction *CxtI,
175 const DominatorTree *DT) {
176 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
177 Query(AC, safeCxtI(V, CxtI), DT), DL);
180 static bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
183 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
184 AssumptionCache *AC, const Instruction *CxtI,
185 const DominatorTree *DT) {
186 return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
189 bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth,
190 AssumptionCache *AC, const Instruction *CxtI,
191 const DominatorTree *DT) {
192 bool NonNegative, Negative;
193 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT);
197 static bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
200 bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
201 AssumptionCache *AC, const Instruction *CxtI,
202 const DominatorTree *DT) {
203 return ::isKnownNonEqual(V1, V2, DL, Query(AC,
204 safeCxtI(V1, safeCxtI(V2, CxtI)),
208 static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
209 unsigned Depth, const Query &Q);
211 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
212 unsigned Depth, AssumptionCache *AC,
213 const Instruction *CxtI, const DominatorTree *DT) {
214 return ::MaskedValueIsZero(V, Mask, DL, Depth,
215 Query(AC, safeCxtI(V, CxtI), DT));
218 static unsigned ComputeNumSignBits(Value *V, const DataLayout &DL,
219 unsigned Depth, const Query &Q);
221 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL,
222 unsigned Depth, AssumptionCache *AC,
223 const Instruction *CxtI,
224 const DominatorTree *DT) {
225 return ::ComputeNumSignBits(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT));
228 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
229 APInt &KnownZero, APInt &KnownOne,
230 APInt &KnownZero2, APInt &KnownOne2,
231 const DataLayout &DL, unsigned Depth,
234 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
235 // We know that the top bits of C-X are clear if X contains less bits
236 // than C (i.e. no wrap-around can happen). For example, 20-X is
237 // positive if we can prove that X is >= 0 and < 16.
238 if (!CLHS->getValue().isNegative()) {
239 unsigned BitWidth = KnownZero.getBitWidth();
240 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
241 // NLZ can't be BitWidth with no sign bit
242 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
243 computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q);
245 // If all of the MaskV bits are known to be zero, then we know the
246 // output top bits are zero, because we now know that the output is
248 if ((KnownZero2 & MaskV) == MaskV) {
249 unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
250 // Top bits known zero.
251 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
257 unsigned BitWidth = KnownZero.getBitWidth();
259 // If an initial sequence of bits in the result is not needed, the
260 // corresponding bits in the operands are not needed.
261 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
262 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, DL, Depth + 1, Q);
263 computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q);
265 // Carry in a 1 for a subtract, rather than a 0.
266 APInt CarryIn(BitWidth, 0);
268 // Sum = LHS + ~RHS + 1
269 std::swap(KnownZero2, KnownOne2);
273 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
274 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
276 // Compute known bits of the carry.
277 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
278 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
280 // Compute set of known bits (where all three relevant bits are known).
281 APInt LHSKnown = LHSKnownZero | LHSKnownOne;
282 APInt RHSKnown = KnownZero2 | KnownOne2;
283 APInt CarryKnown = CarryKnownZero | CarryKnownOne;
284 APInt Known = LHSKnown & RHSKnown & CarryKnown;
286 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
287 "known bits of sum differ");
289 // Compute known bits of the result.
290 KnownZero = ~PossibleSumOne & Known;
291 KnownOne = PossibleSumOne & Known;
293 // Are we still trying to solve for the sign bit?
294 if (!Known.isNegative()) {
296 // Adding two non-negative numbers, or subtracting a negative number from
297 // a non-negative one, can't wrap into negative.
298 if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
299 KnownZero |= APInt::getSignBit(BitWidth);
300 // Adding two negative numbers, or subtracting a non-negative number from
301 // a negative one, can't wrap into non-negative.
302 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
303 KnownOne |= APInt::getSignBit(BitWidth);
308 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
309 APInt &KnownZero, APInt &KnownOne,
310 APInt &KnownZero2, APInt &KnownOne2,
311 const DataLayout &DL, unsigned Depth,
313 unsigned BitWidth = KnownZero.getBitWidth();
314 computeKnownBits(Op1, KnownZero, KnownOne, DL, Depth + 1, Q);
315 computeKnownBits(Op0, KnownZero2, KnownOne2, DL, Depth + 1, Q);
317 bool isKnownNegative = false;
318 bool isKnownNonNegative = false;
319 // If the multiplication is known not to overflow, compute the sign bit.
322 // The product of a number with itself is non-negative.
323 isKnownNonNegative = true;
325 bool isKnownNonNegativeOp1 = KnownZero.isNegative();
326 bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
327 bool isKnownNegativeOp1 = KnownOne.isNegative();
328 bool isKnownNegativeOp0 = KnownOne2.isNegative();
329 // The product of two numbers with the same sign is non-negative.
330 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
331 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
332 // The product of a negative number and a non-negative number is either
334 if (!isKnownNonNegative)
335 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
336 isKnownNonZero(Op0, DL, Depth, Q)) ||
337 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
338 isKnownNonZero(Op1, DL, Depth, Q));
342 // If low bits are zero in either operand, output low known-0 bits.
343 // Also compute a conservative estimate for high known-0 bits.
344 // More trickiness is possible, but this is sufficient for the
345 // interesting case of alignment computation.
346 KnownOne.clearAllBits();
347 unsigned TrailZ = KnownZero.countTrailingOnes() +
348 KnownZero2.countTrailingOnes();
349 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
350 KnownZero2.countLeadingOnes(),
351 BitWidth) - BitWidth;
353 TrailZ = std::min(TrailZ, BitWidth);
354 LeadZ = std::min(LeadZ, BitWidth);
355 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
356 APInt::getHighBitsSet(BitWidth, LeadZ);
358 // Only make use of no-wrap flags if we failed to compute the sign bit
359 // directly. This matters if the multiplication always overflows, in
360 // which case we prefer to follow the result of the direct computation,
361 // though as the program is invoking undefined behaviour we can choose
362 // whatever we like here.
363 if (isKnownNonNegative && !KnownOne.isNegative())
364 KnownZero.setBit(BitWidth - 1);
365 else if (isKnownNegative && !KnownZero.isNegative())
366 KnownOne.setBit(BitWidth - 1);
369 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
372 unsigned BitWidth = KnownZero.getBitWidth();
373 unsigned NumRanges = Ranges.getNumOperands() / 2;
374 assert(NumRanges >= 1);
376 KnownZero.setAllBits();
377 KnownOne.setAllBits();
379 for (unsigned i = 0; i < NumRanges; ++i) {
381 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
383 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
384 ConstantRange Range(Lower->getValue(), Upper->getValue());
386 // The first CommonPrefixBits of all values in Range are equal.
387 unsigned CommonPrefixBits =
388 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
390 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
391 KnownOne &= Range.getUnsignedMax() & Mask;
392 KnownZero &= ~Range.getUnsignedMax() & Mask;
396 static bool isEphemeralValueOf(Instruction *I, const Value *E) {
397 SmallVector<const Value *, 16> WorkSet(1, I);
398 SmallPtrSet<const Value *, 32> Visited;
399 SmallPtrSet<const Value *, 16> EphValues;
401 // The instruction defining an assumption's condition itself is always
402 // considered ephemeral to that assumption (even if it has other
403 // non-ephemeral users). See r246696's test case for an example.
404 if (std::find(I->op_begin(), I->op_end(), E) != I->op_end())
407 while (!WorkSet.empty()) {
408 const Value *V = WorkSet.pop_back_val();
409 if (!Visited.insert(V).second)
412 // If all uses of this value are ephemeral, then so is this value.
413 if (std::all_of(V->user_begin(), V->user_end(),
414 [&](const User *U) { return EphValues.count(U); })) {
419 if (const User *U = dyn_cast<User>(V))
420 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
422 if (isSafeToSpeculativelyExecute(*J))
423 WorkSet.push_back(*J);
431 // Is this an intrinsic that cannot be speculated but also cannot trap?
432 static bool isAssumeLikeIntrinsic(const Instruction *I) {
433 if (const CallInst *CI = dyn_cast<CallInst>(I))
434 if (Function *F = CI->getCalledFunction())
435 switch (F->getIntrinsicID()) {
437 // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
438 case Intrinsic::assume:
439 case Intrinsic::dbg_declare:
440 case Intrinsic::dbg_value:
441 case Intrinsic::invariant_start:
442 case Intrinsic::invariant_end:
443 case Intrinsic::lifetime_start:
444 case Intrinsic::lifetime_end:
445 case Intrinsic::objectsize:
446 case Intrinsic::ptr_annotation:
447 case Intrinsic::var_annotation:
454 static bool isValidAssumeForContext(Value *V, const Query &Q) {
455 Instruction *Inv = cast<Instruction>(V);
457 // There are two restrictions on the use of an assume:
458 // 1. The assume must dominate the context (or the control flow must
459 // reach the assume whenever it reaches the context).
460 // 2. The context must not be in the assume's set of ephemeral values
461 // (otherwise we will use the assume to prove that the condition
462 // feeding the assume is trivially true, thus causing the removal of
466 if (Q.DT->dominates(Inv, Q.CxtI)) {
468 } else if (Inv->getParent() == Q.CxtI->getParent()) {
469 // The context comes first, but they're both in the same block. Make sure
470 // there is nothing in between that might interrupt the control flow.
471 for (BasicBlock::const_iterator I =
472 std::next(BasicBlock::const_iterator(Q.CxtI)),
473 IE(Inv); I != IE; ++I)
474 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
477 return !isEphemeralValueOf(Inv, Q.CxtI);
483 // When we don't have a DT, we do a limited search...
484 if (Inv->getParent() == Q.CxtI->getParent()->getSinglePredecessor()) {
486 } else if (Inv->getParent() == Q.CxtI->getParent()) {
487 // Search forward from the assume until we reach the context (or the end
488 // of the block); the common case is that the assume will come first.
489 for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
490 IE = Inv->getParent()->end(); I != IE; ++I)
494 // The context must come first...
495 for (BasicBlock::const_iterator I =
496 std::next(BasicBlock::const_iterator(Q.CxtI)),
497 IE(Inv); I != IE; ++I)
498 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
501 return !isEphemeralValueOf(Inv, Q.CxtI);
507 bool llvm::isValidAssumeForContext(const Instruction *I,
508 const Instruction *CxtI,
509 const DominatorTree *DT) {
510 return ::isValidAssumeForContext(const_cast<Instruction *>(I),
511 Query(nullptr, CxtI, DT));
514 template<typename LHS, typename RHS>
515 inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>,
516 CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>>
517 m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
518 return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L));
521 template<typename LHS, typename RHS>
522 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>,
523 BinaryOp_match<RHS, LHS, Instruction::And>>
524 m_c_And(const LHS &L, const RHS &R) {
525 return m_CombineOr(m_And(L, R), m_And(R, L));
528 template<typename LHS, typename RHS>
529 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>,
530 BinaryOp_match<RHS, LHS, Instruction::Or>>
531 m_c_Or(const LHS &L, const RHS &R) {
532 return m_CombineOr(m_Or(L, R), m_Or(R, L));
535 template<typename LHS, typename RHS>
536 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>,
537 BinaryOp_match<RHS, LHS, Instruction::Xor>>
538 m_c_Xor(const LHS &L, const RHS &R) {
539 return m_CombineOr(m_Xor(L, R), m_Xor(R, L));
542 /// Compute known bits in 'V' under the assumption that the condition 'Cmp' is
543 /// true (at the context instruction.) This is mostly a utility function for
544 /// the prototype dominating conditions reasoning below.
545 static void computeKnownBitsFromTrueCondition(Value *V, ICmpInst *Cmp,
548 const DataLayout &DL,
549 unsigned Depth, const Query &Q) {
550 Value *LHS = Cmp->getOperand(0);
551 Value *RHS = Cmp->getOperand(1);
552 // TODO: We could potentially be more aggressive here. This would be worth
553 // evaluating. If we can, explore commoning this code with the assume
555 if (LHS != V && RHS != V)
558 const unsigned BitWidth = KnownZero.getBitWidth();
560 switch (Cmp->getPredicate()) {
562 // We know nothing from this condition
564 // TODO: implement unsigned bound from below (known one bits)
565 // TODO: common condition check implementations with assumes
566 // TODO: implement other patterns from assume (e.g. V & B == A)
567 case ICmpInst::ICMP_SGT:
569 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
570 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
571 if (KnownOneTemp.isAllOnesValue() || KnownZeroTemp.isNegative()) {
572 // We know that the sign bit is zero.
573 KnownZero |= APInt::getSignBit(BitWidth);
577 case ICmpInst::ICMP_EQ:
579 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
581 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
583 computeKnownBits(LHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
585 llvm_unreachable("missing use?");
586 KnownZero |= KnownZeroTemp;
587 KnownOne |= KnownOneTemp;
590 case ICmpInst::ICMP_ULE:
592 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
593 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
594 // The known zero bits carry over
595 unsigned SignBits = KnownZeroTemp.countLeadingOnes();
596 KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits);
599 case ICmpInst::ICMP_ULT:
601 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0);
602 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q);
603 // Whatever high bits in rhs are zero are known to be zero (if rhs is a
604 // power of 2, then one more).
605 unsigned SignBits = KnownZeroTemp.countLeadingOnes();
606 if (isKnownToBeAPowerOfTwo(RHS, false, Depth + 1, Query(Q, Cmp), DL))
608 KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits);
614 /// Compute known bits in 'V' from conditions which are known to be true along
615 /// all paths leading to the context instruction. In particular, look for
616 /// cases where one branch of an interesting condition dominates the context
617 /// instruction. This does not do general dataflow.
618 /// NOTE: This code is EXPERIMENTAL and currently off by default.
619 static void computeKnownBitsFromDominatingCondition(Value *V, APInt &KnownZero,
621 const DataLayout &DL,
624 // Need both the dominator tree and the query location to do anything useful
625 if (!Q.DT || !Q.CxtI)
627 Instruction *Cxt = const_cast<Instruction *>(Q.CxtI);
628 // The context instruction might be in a statically unreachable block. If
629 // so, asking dominator queries may yield suprising results. (e.g. the block
630 // may not have a dom tree node)
631 if (!Q.DT->isReachableFromEntry(Cxt->getParent()))
634 // Avoid useless work
635 if (auto VI = dyn_cast<Instruction>(V))
636 if (VI->getParent() == Cxt->getParent())
639 // Note: We currently implement two options. It's not clear which of these
640 // will survive long term, we need data for that.
641 // Option 1 - Try walking the dominator tree looking for conditions which
642 // might apply. This works well for local conditions (loop guards, etc..),
643 // but not as well for things far from the context instruction (presuming a
644 // low max blocks explored). If we can set an high enough limit, this would
646 // Option 2 - We restrict out search to those conditions which are uses of
647 // the value we're interested in. This is independent of dom structure,
648 // but is slightly less powerful without looking through lots of use chains.
649 // It does handle conditions far from the context instruction (e.g. early
650 // function exits on entry) really well though.
652 // Option 1 - Search the dom tree
653 unsigned NumBlocksExplored = 0;
654 BasicBlock *Current = Cxt->getParent();
656 // Stop searching if we've gone too far up the chain
657 if (NumBlocksExplored >= DomConditionsMaxDomBlocks)
661 if (!Q.DT->getNode(Current)->getIDom())
663 Current = Q.DT->getNode(Current)->getIDom()->getBlock();
665 // found function entry
668 BranchInst *BI = dyn_cast<BranchInst>(Current->getTerminator());
669 if (!BI || BI->isUnconditional())
671 ICmpInst *Cmp = dyn_cast<ICmpInst>(BI->getCondition());
675 // We're looking for conditions that are guaranteed to hold at the context
676 // instruction. Finding a condition where one path dominates the context
677 // isn't enough because both the true and false cases could merge before
678 // the context instruction we're actually interested in. Instead, we need
679 // to ensure that the taken *edge* dominates the context instruction. We
680 // know that the edge must be reachable since we started from a reachable
682 BasicBlock *BB0 = BI->getSuccessor(0);
683 BasicBlockEdge Edge(BI->getParent(), BB0);
684 if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent()))
687 computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth,
691 // Option 2 - Search the other uses of V
692 unsigned NumUsesExplored = 0;
693 for (auto U : V->users()) {
694 // Avoid massive lists
695 if (NumUsesExplored >= DomConditionsMaxUses)
698 // Consider only compare instructions uniquely controlling a branch
699 ICmpInst *Cmp = dyn_cast<ICmpInst>(U);
703 if (DomConditionsSingleCmpUse && !Cmp->hasOneUse())
706 for (auto *CmpU : Cmp->users()) {
707 BranchInst *BI = dyn_cast<BranchInst>(CmpU);
708 if (!BI || BI->isUnconditional())
710 // We're looking for conditions that are guaranteed to hold at the
711 // context instruction. Finding a condition where one path dominates
712 // the context isn't enough because both the true and false cases could
713 // merge before the context instruction we're actually interested in.
714 // Instead, we need to ensure that the taken *edge* dominates the context
716 BasicBlock *BB0 = BI->getSuccessor(0);
717 BasicBlockEdge Edge(BI->getParent(), BB0);
718 if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent()))
721 computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth,
727 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero,
728 APInt &KnownOne, const DataLayout &DL,
729 unsigned Depth, const Query &Q) {
730 // Use of assumptions is context-sensitive. If we don't have a context, we
732 if (!Q.AC || !Q.CxtI)
735 unsigned BitWidth = KnownZero.getBitWidth();
737 for (auto &AssumeVH : Q.AC->assumptions()) {
740 CallInst *I = cast<CallInst>(AssumeVH);
741 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
742 "Got assumption for the wrong function!");
743 if (Q.ExclInvs.count(I))
746 // Warning: This loop can end up being somewhat performance sensetive.
747 // We're running this loop for once for each value queried resulting in a
748 // runtime of ~O(#assumes * #values).
750 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
751 "must be an assume intrinsic");
753 Value *Arg = I->getArgOperand(0);
755 if (Arg == V && isValidAssumeForContext(I, Q)) {
756 assert(BitWidth == 1 && "assume operand is not i1?");
757 KnownZero.clearAllBits();
758 KnownOne.setAllBits();
762 // The remaining tests are all recursive, so bail out if we hit the limit.
763 if (Depth == MaxDepth)
767 auto m_V = m_CombineOr(m_Specific(V),
768 m_CombineOr(m_PtrToInt(m_Specific(V)),
769 m_BitCast(m_Specific(V))));
771 CmpInst::Predicate Pred;
774 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
775 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
776 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
777 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
778 KnownZero |= RHSKnownZero;
779 KnownOne |= RHSKnownOne;
781 } else if (match(Arg,
782 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
783 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
784 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
785 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
786 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
787 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I));
789 // For those bits in the mask that are known to be one, we can propagate
790 // known bits from the RHS to V.
791 KnownZero |= RHSKnownZero & MaskKnownOne;
792 KnownOne |= RHSKnownOne & MaskKnownOne;
793 // assume(~(v & b) = a)
794 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
796 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
797 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
798 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
799 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0);
800 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I));
802 // For those bits in the mask that are known to be one, we can propagate
803 // inverted known bits from the RHS to V.
804 KnownZero |= RHSKnownOne & MaskKnownOne;
805 KnownOne |= RHSKnownZero & MaskKnownOne;
807 } else if (match(Arg,
808 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
809 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
810 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
811 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
812 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
813 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
815 // For those bits in B that are known to be zero, we can propagate known
816 // bits from the RHS to V.
817 KnownZero |= RHSKnownZero & BKnownZero;
818 KnownOne |= RHSKnownOne & BKnownZero;
819 // assume(~(v | b) = a)
820 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
822 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
823 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
824 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
825 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
826 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
828 // For those bits in B that are known to be zero, we can propagate
829 // inverted known bits from the RHS to V.
830 KnownZero |= RHSKnownOne & BKnownZero;
831 KnownOne |= RHSKnownZero & BKnownZero;
833 } else if (match(Arg,
834 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
835 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
836 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
837 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
838 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
839 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
841 // For those bits in B that are known to be zero, we can propagate known
842 // bits from the RHS to V. For those bits in B that are known to be one,
843 // we can propagate inverted known bits from the RHS to V.
844 KnownZero |= RHSKnownZero & BKnownZero;
845 KnownOne |= RHSKnownOne & BKnownZero;
846 KnownZero |= RHSKnownOne & BKnownOne;
847 KnownOne |= RHSKnownZero & BKnownOne;
848 // assume(~(v ^ b) = a)
849 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
851 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
852 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
853 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
854 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0);
855 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I));
857 // For those bits in B that are known to be zero, we can propagate
858 // inverted known bits from the RHS to V. For those bits in B that are
859 // known to be one, we can propagate known bits from the RHS to V.
860 KnownZero |= RHSKnownOne & BKnownZero;
861 KnownOne |= RHSKnownZero & BKnownZero;
862 KnownZero |= RHSKnownZero & BKnownOne;
863 KnownOne |= RHSKnownOne & BKnownOne;
864 // assume(v << c = a)
865 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
867 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
868 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
869 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
870 // For those bits in RHS that are known, we can propagate them to known
871 // bits in V shifted to the right by C.
872 KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
873 KnownOne |= RHSKnownOne.lshr(C->getZExtValue());
874 // assume(~(v << c) = a)
875 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
877 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
878 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
879 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
880 // For those bits in RHS that are known, we can propagate them inverted
881 // to known bits in V shifted to the right by C.
882 KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
883 KnownOne |= RHSKnownZero.lshr(C->getZExtValue());
884 // assume(v >> c = a)
885 } else if (match(Arg,
886 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
887 m_AShr(m_V, m_ConstantInt(C))),
889 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
890 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
891 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
892 // For those bits in RHS that are known, we can propagate them to known
893 // bits in V shifted to the right by C.
894 KnownZero |= RHSKnownZero << C->getZExtValue();
895 KnownOne |= RHSKnownOne << C->getZExtValue();
896 // assume(~(v >> c) = a)
897 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr(
898 m_LShr(m_V, m_ConstantInt(C)),
899 m_AShr(m_V, m_ConstantInt(C)))),
901 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) {
902 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
903 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
904 // For those bits in RHS that are known, we can propagate them inverted
905 // to known bits in V shifted to the right by C.
906 KnownZero |= RHSKnownOne << C->getZExtValue();
907 KnownOne |= RHSKnownZero << C->getZExtValue();
908 // assume(v >=_s c) where c is non-negative
909 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
910 Pred == ICmpInst::ICMP_SGE && isValidAssumeForContext(I, Q)) {
911 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
912 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
914 if (RHSKnownZero.isNegative()) {
915 // We know that the sign bit is zero.
916 KnownZero |= APInt::getSignBit(BitWidth);
918 // assume(v >_s c) where c is at least -1.
919 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
920 Pred == ICmpInst::ICMP_SGT && isValidAssumeForContext(I, Q)) {
921 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
922 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
924 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) {
925 // We know that the sign bit is zero.
926 KnownZero |= APInt::getSignBit(BitWidth);
928 // assume(v <=_s c) where c is negative
929 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
930 Pred == ICmpInst::ICMP_SLE && isValidAssumeForContext(I, Q)) {
931 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
932 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
934 if (RHSKnownOne.isNegative()) {
935 // We know that the sign bit is one.
936 KnownOne |= APInt::getSignBit(BitWidth);
938 // assume(v <_s c) where c is non-positive
939 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
940 Pred == ICmpInst::ICMP_SLT && isValidAssumeForContext(I, Q)) {
941 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
942 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
944 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) {
945 // We know that the sign bit is one.
946 KnownOne |= APInt::getSignBit(BitWidth);
949 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
950 Pred == ICmpInst::ICMP_ULE && isValidAssumeForContext(I, Q)) {
951 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
952 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
954 // Whatever high bits in c are zero are known to be zero.
956 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
958 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
959 Pred == ICmpInst::ICMP_ULT && isValidAssumeForContext(I, Q)) {
960 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
961 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I));
963 // Whatever high bits in c are zero are known to be zero (if c is a power
964 // of 2, then one more).
965 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I), DL))
967 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1);
970 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes());
975 // Compute known bits from a shift operator, including those with a
976 // non-constant shift amount. KnownZero and KnownOne are the outputs of this
977 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the
978 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific
979 // functors that, given the known-zero or known-one bits respectively, and a
980 // shift amount, compute the implied known-zero or known-one bits of the shift
981 // operator's result respectively for that shift amount. The results from calling
982 // KZF and KOF are conservatively combined for all permitted shift amounts.
983 template <typename KZFunctor, typename KOFunctor>
984 static void computeKnownBitsFromShiftOperator(Operator *I,
985 APInt &KnownZero, APInt &KnownOne,
986 APInt &KnownZero2, APInt &KnownOne2,
987 const DataLayout &DL, unsigned Depth, const Query &Q,
988 KZFunctor KZF, KOFunctor KOF) {
989 unsigned BitWidth = KnownZero.getBitWidth();
991 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
992 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
994 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
995 KnownZero = KZF(KnownZero, ShiftAmt);
996 KnownOne = KOF(KnownOne, ShiftAmt);
1000 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
1002 // Note: We cannot use KnownZero.getLimitedValue() here, because if
1003 // BitWidth > 64 and any upper bits are known, we'll end up returning the
1004 // limit value (which implies all bits are known).
1005 uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue();
1006 uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue();
1008 // It would be more-clearly correct to use the two temporaries for this
1009 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1010 KnownZero.clearAllBits(), KnownOne.clearAllBits();
1012 // If we know the shifter operand is nonzero, we can sometimes infer more
1013 // known bits. However this is expensive to compute, so be lazy about it and
1014 // only compute it when absolutely necessary.
1015 Optional<bool> ShifterOperandIsNonZero;
1017 // Early exit if we can't constrain any well-defined shift amount.
1018 if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
1019 ShifterOperandIsNonZero =
1020 isKnownNonZero(I->getOperand(1), DL, Depth + 1, Q);
1021 if (!*ShifterOperandIsNonZero)
1025 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1027 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth);
1028 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1029 // Combine the shifted known input bits only for those shift amounts
1030 // compatible with its known constraints.
1031 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1033 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1035 // If we know the shifter is nonzero, we may be able to infer more known
1036 // bits. This check is sunk down as far as possible to avoid the expensive
1037 // call to isKnownNonZero if the cheaper checks above fail.
1038 if (ShiftAmt == 0) {
1039 if (!ShifterOperandIsNonZero.hasValue())
1040 ShifterOperandIsNonZero =
1041 isKnownNonZero(I->getOperand(1), DL, Depth + 1, Q);
1042 if (*ShifterOperandIsNonZero)
1046 KnownZero &= KZF(KnownZero2, ShiftAmt);
1047 KnownOne &= KOF(KnownOne2, ShiftAmt);
1050 // If there are no compatible shift amounts, then we've proven that the shift
1051 // amount must be >= the BitWidth, and the result is undefined. We could
1052 // return anything we'd like, but we need to make sure the sets of known bits
1053 // stay disjoint (it should be better for some other code to actually
1054 // propagate the undef than to pick a value here using known bits).
1055 if ((KnownZero & KnownOne) != 0)
1056 KnownZero.clearAllBits(), KnownOne.clearAllBits();
1059 static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero,
1060 APInt &KnownOne, const DataLayout &DL,
1061 unsigned Depth, const Query &Q) {
1062 unsigned BitWidth = KnownZero.getBitWidth();
1064 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
1065 switch (I->getOpcode()) {
1067 case Instruction::Load:
1068 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
1069 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1071 case Instruction::And: {
1072 // If either the LHS or the RHS are Zero, the result is zero.
1073 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
1074 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1076 // Output known-1 bits are only known if set in both the LHS & RHS.
1077 KnownOne &= KnownOne2;
1078 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1079 KnownZero |= KnownZero2;
1082 case Instruction::Or: {
1083 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
1084 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1086 // Output known-0 bits are only known if clear in both the LHS & RHS.
1087 KnownZero &= KnownZero2;
1088 // Output known-1 are known to be set if set in either the LHS | RHS.
1089 KnownOne |= KnownOne2;
1092 case Instruction::Xor: {
1093 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q);
1094 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1096 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1097 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1098 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1099 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1100 KnownZero = KnownZeroOut;
1103 case Instruction::Mul: {
1104 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1105 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero,
1106 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
1109 case Instruction::UDiv: {
1110 // For the purposes of computing leading zeros we can conservatively
1111 // treat a udiv as a logical right shift by the power of 2 known to
1112 // be less than the denominator.
1113 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1114 unsigned LeadZ = KnownZero2.countLeadingOnes();
1116 KnownOne2.clearAllBits();
1117 KnownZero2.clearAllBits();
1118 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1119 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1120 if (RHSUnknownLeadingOnes != BitWidth)
1121 LeadZ = std::min(BitWidth,
1122 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1124 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1127 case Instruction::Select:
1128 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, DL, Depth + 1, Q);
1129 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1131 // Only known if known in both the LHS and RHS.
1132 KnownOne &= KnownOne2;
1133 KnownZero &= KnownZero2;
1135 case Instruction::FPTrunc:
1136 case Instruction::FPExt:
1137 case Instruction::FPToUI:
1138 case Instruction::FPToSI:
1139 case Instruction::SIToFP:
1140 case Instruction::UIToFP:
1141 break; // Can't work with floating point.
1142 case Instruction::PtrToInt:
1143 case Instruction::IntToPtr:
1144 case Instruction::AddrSpaceCast: // Pointers could be different sizes.
1145 // FALL THROUGH and handle them the same as zext/trunc.
1146 case Instruction::ZExt:
1147 case Instruction::Trunc: {
1148 Type *SrcTy = I->getOperand(0)->getType();
1150 unsigned SrcBitWidth;
1151 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1152 // which fall through here.
1153 SrcBitWidth = DL.getTypeSizeInBits(SrcTy->getScalarType());
1155 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1156 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
1157 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
1158 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1159 KnownZero = KnownZero.zextOrTrunc(BitWidth);
1160 KnownOne = KnownOne.zextOrTrunc(BitWidth);
1161 // Any top bits are known to be zero.
1162 if (BitWidth > SrcBitWidth)
1163 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1166 case Instruction::BitCast: {
1167 Type *SrcTy = I->getOperand(0)->getType();
1168 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy() ||
1169 SrcTy->isFloatingPointTy()) &&
1170 // TODO: For now, not handling conversions like:
1171 // (bitcast i64 %x to <2 x i32>)
1172 !I->getType()->isVectorTy()) {
1173 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1178 case Instruction::SExt: {
1179 // Compute the bits in the result that are not present in the input.
1180 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1182 KnownZero = KnownZero.trunc(SrcBitWidth);
1183 KnownOne = KnownOne.trunc(SrcBitWidth);
1184 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1185 KnownZero = KnownZero.zext(BitWidth);
1186 KnownOne = KnownOne.zext(BitWidth);
1188 // If the sign bit of the input is known set or clear, then we know the
1189 // top bits of the result.
1190 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero
1191 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1192 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
1193 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1196 case Instruction::Shl: {
1197 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1198 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1199 return (KnownZero << ShiftAmt) |
1200 APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0.
1203 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1204 return KnownOne << ShiftAmt;
1207 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1208 KnownZero2, KnownOne2, DL, Depth, Q,
1212 case Instruction::LShr: {
1213 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1214 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1215 return APIntOps::lshr(KnownZero, ShiftAmt) |
1216 // High bits known zero.
1217 APInt::getHighBitsSet(BitWidth, ShiftAmt);
1220 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1221 return APIntOps::lshr(KnownOne, ShiftAmt);
1224 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1225 KnownZero2, KnownOne2, DL, Depth, Q,
1229 case Instruction::AShr: {
1230 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1231 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
1232 return APIntOps::ashr(KnownZero, ShiftAmt);
1235 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) {
1236 return APIntOps::ashr(KnownOne, ShiftAmt);
1239 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne,
1240 KnownZero2, KnownOne2, DL, Depth, Q,
1244 case Instruction::Sub: {
1245 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1246 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1247 KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
1251 case Instruction::Add: {
1252 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1253 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1254 KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
1258 case Instruction::SRem:
1259 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1260 APInt RA = Rem->getValue().abs();
1261 if (RA.isPowerOf2()) {
1262 APInt LowBits = RA - 1;
1263 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1,
1266 // The low bits of the first operand are unchanged by the srem.
1267 KnownZero = KnownZero2 & LowBits;
1268 KnownOne = KnownOne2 & LowBits;
1270 // If the first operand is non-negative or has all low bits zero, then
1271 // the upper bits are all zero.
1272 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
1273 KnownZero |= ~LowBits;
1275 // If the first operand is negative and not all low bits are zero, then
1276 // the upper bits are all one.
1277 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
1278 KnownOne |= ~LowBits;
1280 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1284 // The sign bit is the LHS's sign bit, except when the result of the
1285 // remainder is zero.
1286 if (KnownZero.isNonNegative()) {
1287 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
1288 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, DL,
1290 // If it's known zero, our sign bit is also zero.
1291 if (LHSKnownZero.isNegative())
1292 KnownZero.setBit(BitWidth - 1);
1296 case Instruction::URem: {
1297 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1298 APInt RA = Rem->getValue();
1299 if (RA.isPowerOf2()) {
1300 APInt LowBits = (RA - 1);
1301 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1,
1303 KnownZero |= ~LowBits;
1304 KnownOne &= LowBits;
1309 // Since the result is less than or equal to either operand, any leading
1310 // zero bits in either operand must also exist in the result.
1311 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q);
1312 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q);
1314 unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
1315 KnownZero2.countLeadingOnes());
1316 KnownOne.clearAllBits();
1317 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
1321 case Instruction::Alloca: {
1322 AllocaInst *AI = cast<AllocaInst>(I);
1323 unsigned Align = AI->getAlignment();
1325 Align = DL.getABITypeAlignment(AI->getType()->getElementType());
1328 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1331 case Instruction::GetElementPtr: {
1332 // Analyze all of the subscripts of this getelementptr instruction
1333 // to determine if we can prove known low zero bits.
1334 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
1335 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, DL,
1337 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
1339 gep_type_iterator GTI = gep_type_begin(I);
1340 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1341 Value *Index = I->getOperand(i);
1342 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1343 // Handle struct member offset arithmetic.
1345 // Handle case when index is vector zeroinitializer
1346 Constant *CIndex = cast<Constant>(Index);
1347 if (CIndex->isZeroValue())
1350 if (CIndex->getType()->isVectorTy())
1351 Index = CIndex->getSplatValue();
1353 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1354 const StructLayout *SL = DL.getStructLayout(STy);
1355 uint64_t Offset = SL->getElementOffset(Idx);
1356 TrailZ = std::min<unsigned>(TrailZ,
1357 countTrailingZeros(Offset));
1359 // Handle array index arithmetic.
1360 Type *IndexedTy = GTI.getIndexedType();
1361 if (!IndexedTy->isSized()) {
1365 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1366 uint64_t TypeSize = DL.getTypeAllocSize(IndexedTy);
1367 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
1368 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, DL, Depth + 1,
1370 TrailZ = std::min(TrailZ,
1371 unsigned(countTrailingZeros(TypeSize) +
1372 LocalKnownZero.countTrailingOnes()));
1376 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
1379 case Instruction::PHI: {
1380 PHINode *P = cast<PHINode>(I);
1381 // Handle the case of a simple two-predecessor recurrence PHI.
1382 // There's a lot more that could theoretically be done here, but
1383 // this is sufficient to catch some interesting cases.
1384 if (P->getNumIncomingValues() == 2) {
1385 for (unsigned i = 0; i != 2; ++i) {
1386 Value *L = P->getIncomingValue(i);
1387 Value *R = P->getIncomingValue(!i);
1388 Operator *LU = dyn_cast<Operator>(L);
1391 unsigned Opcode = LU->getOpcode();
1392 // Check for operations that have the property that if
1393 // both their operands have low zero bits, the result
1394 // will have low zero bits.
1395 if (Opcode == Instruction::Add ||
1396 Opcode == Instruction::Sub ||
1397 Opcode == Instruction::And ||
1398 Opcode == Instruction::Or ||
1399 Opcode == Instruction::Mul) {
1400 Value *LL = LU->getOperand(0);
1401 Value *LR = LU->getOperand(1);
1402 // Find a recurrence.
1409 // Ok, we have a PHI of the form L op= R. Check for low
1411 computeKnownBits(R, KnownZero2, KnownOne2, DL, Depth + 1, Q);
1413 // We need to take the minimum number of known bits
1414 APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
1415 computeKnownBits(L, KnownZero3, KnownOne3, DL, Depth + 1, Q);
1417 KnownZero = APInt::getLowBitsSet(BitWidth,
1418 std::min(KnownZero2.countTrailingOnes(),
1419 KnownZero3.countTrailingOnes()));
1425 // Unreachable blocks may have zero-operand PHI nodes.
1426 if (P->getNumIncomingValues() == 0)
1429 // Otherwise take the unions of the known bit sets of the operands,
1430 // taking conservative care to avoid excessive recursion.
1431 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
1432 // Skip if every incoming value references to ourself.
1433 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1436 KnownZero = APInt::getAllOnesValue(BitWidth);
1437 KnownOne = APInt::getAllOnesValue(BitWidth);
1438 for (Value *IncValue : P->incoming_values()) {
1439 // Skip direct self references.
1440 if (IncValue == P) continue;
1442 KnownZero2 = APInt(BitWidth, 0);
1443 KnownOne2 = APInt(BitWidth, 0);
1444 // Recurse, but cap the recursion to one level, because we don't
1445 // want to waste time spinning around in loops.
1446 computeKnownBits(IncValue, KnownZero2, KnownOne2, DL,
1448 KnownZero &= KnownZero2;
1449 KnownOne &= KnownOne2;
1450 // If all bits have been ruled out, there's no need to check
1452 if (!KnownZero && !KnownOne)
1458 case Instruction::Call:
1459 case Instruction::Invoke:
1460 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1461 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne);
1462 // If a range metadata is attached to this IntrinsicInst, intersect the
1463 // explicit range specified by the metadata and the implicit range of
1465 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1466 switch (II->getIntrinsicID()) {
1468 case Intrinsic::bswap:
1469 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL,
1471 KnownZero |= KnownZero2.byteSwap();
1472 KnownOne |= KnownOne2.byteSwap();
1474 case Intrinsic::ctlz:
1475 case Intrinsic::cttz: {
1476 unsigned LowBits = Log2_32(BitWidth)+1;
1477 // If this call is undefined for 0, the result will be less than 2^n.
1478 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1480 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1483 case Intrinsic::ctpop: {
1484 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL,
1486 // We can bound the space the count needs. Also, bits known to be zero
1487 // can't contribute to the population.
1488 unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
1489 unsigned LeadingZeros =
1490 APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
1491 assert(LeadingZeros <= BitWidth);
1492 KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
1493 KnownOne &= ~KnownZero;
1494 // TODO: we could bound KnownOne using the lower bound on the number
1495 // of bits which might be set provided by popcnt KnownOne2.
1498 case Intrinsic::fabs: {
1499 Type *Ty = II->getType();
1500 APInt SignBit = APInt::getSignBit(Ty->getScalarSizeInBits());
1501 KnownZero |= APInt::getSplat(Ty->getPrimitiveSizeInBits(), SignBit);
1504 case Intrinsic::x86_sse42_crc32_64_64:
1505 KnownZero |= APInt::getHighBitsSet(64, 32);
1510 case Instruction::ExtractValue:
1511 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1512 ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1513 if (EVI->getNumIndices() != 1) break;
1514 if (EVI->getIndices()[0] == 0) {
1515 switch (II->getIntrinsicID()) {
1517 case Intrinsic::uadd_with_overflow:
1518 case Intrinsic::sadd_with_overflow:
1519 computeKnownBitsAddSub(true, II->getArgOperand(0),
1520 II->getArgOperand(1), false, KnownZero,
1521 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
1523 case Intrinsic::usub_with_overflow:
1524 case Intrinsic::ssub_with_overflow:
1525 computeKnownBitsAddSub(false, II->getArgOperand(0),
1526 II->getArgOperand(1), false, KnownZero,
1527 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q);
1529 case Intrinsic::umul_with_overflow:
1530 case Intrinsic::smul_with_overflow:
1531 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1532 KnownZero, KnownOne, KnownZero2, KnownOne2, DL,
1541 static unsigned getAlignment(const Value *V, const DataLayout &DL) {
1543 if (auto *GO = dyn_cast<GlobalObject>(V)) {
1544 Align = GO->getAlignment();
1546 if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
1547 Type *ObjectType = GVar->getType()->getElementType();
1548 if (ObjectType->isSized()) {
1549 // If the object is defined in the current Module, we'll be giving
1550 // it the preferred alignment. Otherwise, we have to assume that it
1551 // may only have the minimum ABI alignment.
1552 if (GVar->isStrongDefinitionForLinker())
1553 Align = DL.getPreferredAlignment(GVar);
1555 Align = DL.getABITypeAlignment(ObjectType);
1559 } else if (const Argument *A = dyn_cast<Argument>(V)) {
1560 Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
1562 if (!Align && A->hasStructRetAttr()) {
1563 // An sret parameter has at least the ABI alignment of the return type.
1564 Type *EltTy = cast<PointerType>(A->getType())->getElementType();
1565 if (EltTy->isSized())
1566 Align = DL.getABITypeAlignment(EltTy);
1568 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
1569 Align = AI->getAlignment();
1570 else if (auto CS = ImmutableCallSite(V))
1571 Align = CS.getAttributes().getParamAlignment(AttributeSet::ReturnIndex);
1572 else if (const LoadInst *LI = dyn_cast<LoadInst>(V))
1573 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
1574 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
1575 Align = CI->getLimitedValue();
1581 /// Determine which bits of V are known to be either zero or one and return
1582 /// them in the KnownZero/KnownOne bit sets.
1584 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1585 /// we cannot optimize based on the assumption that it is zero without changing
1586 /// it to be an explicit zero. If we don't change it to zero, other code could
1587 /// optimized based on the contradictory assumption that it is non-zero.
1588 /// Because instcombine aggressively folds operations with undef args anyway,
1589 /// this won't lose us code quality.
1591 /// This function is defined on values with integer type, values with pointer
1592 /// type, and vectors of integers. In the case
1593 /// where V is a vector, known zero, and known one values are the
1594 /// same width as the vector element, and the bit is set only if it is true
1595 /// for all of the elements in the vector.
1596 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
1597 const DataLayout &DL, unsigned Depth, const Query &Q) {
1598 assert(V && "No Value?");
1599 assert(Depth <= MaxDepth && "Limit Search Depth");
1600 unsigned BitWidth = KnownZero.getBitWidth();
1602 assert((V->getType()->isIntOrIntVectorTy() ||
1603 V->getType()->isFPOrFPVectorTy() ||
1604 V->getType()->getScalarType()->isPointerTy()) &&
1605 "Not integer, floating point, or pointer type!");
1606 assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
1607 (!V->getType()->isIntOrIntVectorTy() ||
1608 V->getType()->getScalarSizeInBits() == BitWidth) &&
1609 KnownZero.getBitWidth() == BitWidth &&
1610 KnownOne.getBitWidth() == BitWidth &&
1611 "V, KnownOne and KnownZero should have same BitWidth");
1613 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1614 // We know all of the bits for a constant!
1615 KnownOne = CI->getValue();
1616 KnownZero = ~KnownOne;
1619 // Null and aggregate-zero are all-zeros.
1620 if (isa<ConstantPointerNull>(V) ||
1621 isa<ConstantAggregateZero>(V)) {
1622 KnownOne.clearAllBits();
1623 KnownZero = APInt::getAllOnesValue(BitWidth);
1626 // Handle a constant vector by taking the intersection of the known bits of
1627 // each element. There is no real need to handle ConstantVector here, because
1628 // we don't handle undef in any particularly useful way.
1629 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1630 // We know that CDS must be a vector of integers. Take the intersection of
1632 KnownZero.setAllBits(); KnownOne.setAllBits();
1633 APInt Elt(KnownZero.getBitWidth(), 0);
1634 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1635 Elt = CDS->getElementAsInteger(i);
1642 // Start out not knowing anything.
1643 KnownZero.clearAllBits(); KnownOne.clearAllBits();
1645 // Limit search depth.
1646 // All recursive calls that increase depth must come after this.
1647 if (Depth == MaxDepth)
1650 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1651 // the bits of its aliasee.
1652 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1653 if (!GA->mayBeOverridden())
1654 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q);
1658 if (Operator *I = dyn_cast<Operator>(V))
1659 computeKnownBitsFromOperator(I, KnownZero, KnownOne, DL, Depth, Q);
1661 // Aligned pointers have trailing zeros - refine KnownZero set
1662 if (V->getType()->isPointerTy()) {
1663 unsigned Align = getAlignment(V, DL);
1665 KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
1668 // computeKnownBitsFromAssume and computeKnownBitsFromDominatingCondition
1669 // strictly refines KnownZero and KnownOne. Therefore, we run them after
1670 // computeKnownBitsFromOperator.
1672 // Check whether a nearby assume intrinsic can determine some known bits.
1673 computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
1675 // Check whether there's a dominating condition which implies something about
1676 // this value at the given context.
1677 if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
1678 computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth,
1681 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1684 /// Determine whether the sign bit is known to be zero or one.
1685 /// Convenience wrapper around computeKnownBits.
1686 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
1687 const DataLayout &DL, unsigned Depth, const Query &Q) {
1688 unsigned BitWidth = getBitWidth(V->getType(), DL);
1694 APInt ZeroBits(BitWidth, 0);
1695 APInt OneBits(BitWidth, 0);
1696 computeKnownBits(V, ZeroBits, OneBits, DL, Depth, Q);
1697 KnownOne = OneBits[BitWidth - 1];
1698 KnownZero = ZeroBits[BitWidth - 1];
1701 /// Return true if the given value is known to have exactly one
1702 /// bit set when defined. For vectors return true if every element is known to
1703 /// be a power of two when defined. Supports values with integer or pointer
1704 /// types and vectors of integers.
1705 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
1706 const Query &Q, const DataLayout &DL) {
1707 if (Constant *C = dyn_cast<Constant>(V)) {
1708 if (C->isNullValue())
1710 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1711 return CI->getValue().isPowerOf2();
1712 // TODO: Handle vector constants.
1715 // 1 << X is clearly a power of two if the one is not shifted off the end. If
1716 // it is shifted off the end then the result is undefined.
1717 if (match(V, m_Shl(m_One(), m_Value())))
1720 // (signbit) >>l X is clearly a power of two if the one is not shifted off the
1721 // bottom. If it is shifted off the bottom then the result is undefined.
1722 if (match(V, m_LShr(m_SignBit(), m_Value())))
1725 // The remaining tests are all recursive, so bail out if we hit the limit.
1726 if (Depth++ == MaxDepth)
1729 Value *X = nullptr, *Y = nullptr;
1730 // A shift of a power of two is a power of two or zero.
1731 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1732 match(V, m_Shr(m_Value(X), m_Value()))))
1733 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL);
1735 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1736 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q, DL);
1738 if (SelectInst *SI = dyn_cast<SelectInst>(V))
1739 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q, DL) &&
1740 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q, DL);
1742 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1743 // A power of two and'd with anything is a power of two or zero.
1744 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL) ||
1745 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q, DL))
1747 // X & (-X) is always a power of two or zero.
1748 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1753 // Adding a power-of-two or zero to the same power-of-two or zero yields
1754 // either the original power-of-two, a larger power-of-two or zero.
1755 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1756 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1757 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1758 if (match(X, m_And(m_Specific(Y), m_Value())) ||
1759 match(X, m_And(m_Value(), m_Specific(Y))))
1760 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q, DL))
1762 if (match(Y, m_And(m_Specific(X), m_Value())) ||
1763 match(Y, m_And(m_Value(), m_Specific(X))))
1764 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q, DL))
1767 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1768 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
1769 computeKnownBits(X, LHSZeroBits, LHSOneBits, DL, Depth, Q);
1771 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
1772 computeKnownBits(Y, RHSZeroBits, RHSOneBits, DL, Depth, Q);
1773 // If i8 V is a power of two or zero:
1774 // ZeroBits: 1 1 1 0 1 1 1 1
1775 // ~ZeroBits: 0 0 0 1 0 0 0 0
1776 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
1777 // If OrZero isn't set, we cannot give back a zero result.
1778 // Make sure either the LHS or RHS has a bit set.
1779 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
1784 // An exact divide or right shift can only shift off zero bits, so the result
1785 // is a power of two only if the first operand is a power of two and not
1786 // copying a sign bit (sdiv int_min, 2).
1787 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1788 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1789 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1796 /// \brief Test whether a GEP's result is known to be non-null.
1798 /// Uses properties inherent in a GEP to try to determine whether it is known
1801 /// Currently this routine does not support vector GEPs.
1802 static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout &DL,
1803 unsigned Depth, const Query &Q) {
1804 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1807 // FIXME: Support vector-GEPs.
1808 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1810 // If the base pointer is non-null, we cannot walk to a null address with an
1811 // inbounds GEP in address space zero.
1812 if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth, Q))
1815 // Walk the GEP operands and see if any operand introduces a non-zero offset.
1816 // If so, then the GEP cannot produce a null pointer, as doing so would
1817 // inherently violate the inbounds contract within address space zero.
1818 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1819 GTI != GTE; ++GTI) {
1820 // Struct types are easy -- they must always be indexed by a constant.
1821 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
1822 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1823 unsigned ElementIdx = OpC->getZExtValue();
1824 const StructLayout *SL = DL.getStructLayout(STy);
1825 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1826 if (ElementOffset > 0)
1831 // If we have a zero-sized type, the index doesn't matter. Keep looping.
1832 if (DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1835 // Fast path the constant operand case both for efficiency and so we don't
1836 // increment Depth when just zipping down an all-constant GEP.
1837 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1843 // We post-increment Depth here because while isKnownNonZero increments it
1844 // as well, when we pop back up that increment won't persist. We don't want
1845 // to recurse 10k times just because we have 10k GEP operands. We don't
1846 // bail completely out because we want to handle constant GEPs regardless
1848 if (Depth++ >= MaxDepth)
1851 if (isKnownNonZero(GTI.getOperand(), DL, Depth, Q))
1858 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1859 /// ensure that the value it's attached to is never Value? 'RangeType' is
1860 /// is the type of the value described by the range.
1861 static bool rangeMetadataExcludesValue(MDNode* Ranges,
1862 const APInt& Value) {
1863 const unsigned NumRanges = Ranges->getNumOperands() / 2;
1864 assert(NumRanges >= 1);
1865 for (unsigned i = 0; i < NumRanges; ++i) {
1866 ConstantInt *Lower =
1867 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1868 ConstantInt *Upper =
1869 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1870 ConstantRange Range(Lower->getValue(), Upper->getValue());
1871 if (Range.contains(Value))
1877 /// Return true if the given value is known to be non-zero when defined.
1878 /// For vectors return true if every element is known to be non-zero when
1879 /// defined. Supports values with integer or pointer type and vectors of
1881 bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth,
1883 if (Constant *C = dyn_cast<Constant>(V)) {
1884 if (C->isNullValue())
1886 if (isa<ConstantInt>(C))
1887 // Must be non-zero due to null test above.
1889 // TODO: Handle vectors
1893 if (Instruction* I = dyn_cast<Instruction>(V)) {
1894 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1895 // If the possible ranges don't contain zero, then the value is
1896 // definitely non-zero.
1897 if (IntegerType* Ty = dyn_cast<IntegerType>(V->getType())) {
1898 const APInt ZeroValue(Ty->getBitWidth(), 0);
1899 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1905 // The remaining tests are all recursive, so bail out if we hit the limit.
1906 if (Depth++ >= MaxDepth)
1909 // Check for pointer simplifications.
1910 if (V->getType()->isPointerTy()) {
1911 if (isKnownNonNull(V))
1913 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1914 if (isGEPKnownNonNull(GEP, DL, Depth, Q))
1918 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), DL);
1920 // X | Y != 0 if X != 0 or Y != 0.
1921 Value *X = nullptr, *Y = nullptr;
1922 if (match(V, m_Or(m_Value(X), m_Value(Y))))
1923 return isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q);
1925 // ext X != 0 if X != 0.
1926 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1927 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), DL, Depth, Q);
1929 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1930 // if the lowest bit is shifted off the end.
1931 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1932 // shl nuw can't remove any non-zero bits.
1933 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1934 if (BO->hasNoUnsignedWrap())
1935 return isKnownNonZero(X, DL, Depth, Q);
1937 APInt KnownZero(BitWidth, 0);
1938 APInt KnownOne(BitWidth, 0);
1939 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
1943 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1944 // defined if the sign bit is shifted off the end.
1945 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1946 // shr exact can only shift out zero bits.
1947 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1949 return isKnownNonZero(X, DL, Depth, Q);
1951 bool XKnownNonNegative, XKnownNegative;
1952 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q);
1956 // If the shifter operand is a constant, and all of the bits shifted
1957 // out are known to be zero, and X is known non-zero then at least one
1958 // non-zero bit must remain.
1959 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1960 APInt KnownZero(BitWidth, 0);
1961 APInt KnownOne(BitWidth, 0);
1962 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
1964 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1965 // Is there a known one in the portion not shifted out?
1966 if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
1968 // Are all the bits to be shifted out known zero?
1969 if (KnownZero.countTrailingOnes() >= ShiftVal)
1970 return isKnownNonZero(X, DL, Depth, Q);
1973 // div exact can only produce a zero if the dividend is zero.
1974 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1975 return isKnownNonZero(X, DL, Depth, Q);
1978 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1979 bool XKnownNonNegative, XKnownNegative;
1980 bool YKnownNonNegative, YKnownNegative;
1981 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q);
1982 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, DL, Depth, Q);
1984 // If X and Y are both non-negative (as signed values) then their sum is not
1985 // zero unless both X and Y are zero.
1986 if (XKnownNonNegative && YKnownNonNegative)
1987 if (isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q))
1990 // If X and Y are both negative (as signed values) then their sum is not
1991 // zero unless both X and Y equal INT_MIN.
1992 if (BitWidth && XKnownNegative && YKnownNegative) {
1993 APInt KnownZero(BitWidth, 0);
1994 APInt KnownOne(BitWidth, 0);
1995 APInt Mask = APInt::getSignedMaxValue(BitWidth);
1996 // The sign bit of X is set. If some other bit is set then X is not equal
1998 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
1999 if ((KnownOne & Mask) != 0)
2001 // The sign bit of Y is set. If some other bit is set then Y is not equal
2003 computeKnownBits(Y, KnownZero, KnownOne, DL, Depth, Q);
2004 if ((KnownOne & Mask) != 0)
2008 // The sum of a non-negative number and a power of two is not zero.
2009 if (XKnownNonNegative &&
2010 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q, DL))
2012 if (YKnownNonNegative &&
2013 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q, DL))
2017 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2018 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2019 // If X and Y are non-zero then so is X * Y as long as the multiplication
2020 // does not overflow.
2021 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
2022 isKnownNonZero(X, DL, Depth, Q) && isKnownNonZero(Y, DL, Depth, Q))
2025 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2026 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
2027 if (isKnownNonZero(SI->getTrueValue(), DL, Depth, Q) &&
2028 isKnownNonZero(SI->getFalseValue(), DL, Depth, Q))
2032 else if (PHINode *PN = dyn_cast<PHINode>(V)) {
2033 // Try and detect a recurrence that monotonically increases from a
2034 // starting value, as these are common as induction variables.
2035 if (PN->getNumIncomingValues() == 2) {
2036 Value *Start = PN->getIncomingValue(0);
2037 Value *Induction = PN->getIncomingValue(1);
2038 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2039 std::swap(Start, Induction);
2040 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2041 if (!C->isZero() && !C->isNegative()) {
2043 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2044 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2052 if (!BitWidth) return false;
2053 APInt KnownZero(BitWidth, 0);
2054 APInt KnownOne(BitWidth, 0);
2055 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
2056 return KnownOne != 0;
2059 /// Return true if V2 == V1 + X, where X is known non-zero.
2060 static bool isAddOfNonZero(Value *V1, Value *V2, const DataLayout &DL,
2062 BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2063 if (!BO || BO->getOpcode() != Instruction::Add)
2065 Value *Op = nullptr;
2066 if (V2 == BO->getOperand(0))
2067 Op = BO->getOperand(1);
2068 else if (V2 == BO->getOperand(1))
2069 Op = BO->getOperand(0);
2072 return isKnownNonZero(Op, DL, 0, Q);
2075 /// Return true if it is known that V1 != V2.
2076 static bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
2078 if (V1->getType()->isVectorTy() || V1 == V2)
2080 if (V1->getType() != V2->getType())
2081 // We can't look through casts yet.
2083 if (isAddOfNonZero(V1, V2, DL, Q) || isAddOfNonZero(V2, V1, DL, Q))
2086 if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) {
2087 // Are any known bits in V1 contradictory to known bits in V2? If V1
2088 // has a known zero where V2 has a known one, they must not be equal.
2089 auto BitWidth = Ty->getBitWidth();
2090 APInt KnownZero1(BitWidth, 0);
2091 APInt KnownOne1(BitWidth, 0);
2092 computeKnownBits(V1, KnownZero1, KnownOne1, DL, 0, Q);
2093 APInt KnownZero2(BitWidth, 0);
2094 APInt KnownOne2(BitWidth, 0);
2095 computeKnownBits(V2, KnownZero2, KnownOne2, DL, 0, Q);
2097 auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1);
2098 if (OppositeBits.getBoolValue())
2104 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2105 /// simplify operations downstream. Mask is known to be zero for bits that V
2108 /// This function is defined on values with integer type, values with pointer
2109 /// type, and vectors of integers. In the case
2110 /// where V is a vector, the mask, known zero, and known one values are the
2111 /// same width as the vector element, and the bit is set only if it is true
2112 /// for all of the elements in the vector.
2113 bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
2114 unsigned Depth, const Query &Q) {
2115 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
2116 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
2117 return (KnownZero & Mask) == Mask;
2122 /// Return the number of times the sign bit of the register is replicated into
2123 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2124 /// (itself), but other cases can give us information. For example, immediately
2125 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2126 /// other, so we return 3.
2128 /// 'Op' must have a scalar integer type.
2130 unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, unsigned Depth,
2132 unsigned TyBits = DL.getTypeSizeInBits(V->getType()->getScalarType());
2134 unsigned FirstAnswer = 1;
2136 // Note that ConstantInt is handled by the general computeKnownBits case
2140 return 1; // Limit search depth.
2142 Operator *U = dyn_cast<Operator>(V);
2143 switch (Operator::getOpcode(V)) {
2145 case Instruction::SExt:
2146 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2147 return ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q) + Tmp;
2149 case Instruction::SDiv: {
2150 const APInt *Denominator;
2151 // sdiv X, C -> adds log(C) sign bits.
2152 if (match(U->getOperand(1), m_APInt(Denominator))) {
2154 // Ignore non-positive denominator.
2155 if (!Denominator->isStrictlyPositive())
2158 // Calculate the incoming numerator bits.
2159 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2161 // Add floor(log(C)) bits to the numerator bits.
2162 return std::min(TyBits, NumBits + Denominator->logBase2());
2167 case Instruction::SRem: {
2168 const APInt *Denominator;
2169 // srem X, C -> we know that the result is within [-C+1,C) when C is a
2170 // positive constant. This let us put a lower bound on the number of sign
2172 if (match(U->getOperand(1), m_APInt(Denominator))) {
2174 // Ignore non-positive denominator.
2175 if (!Denominator->isStrictlyPositive())
2178 // Calculate the incoming numerator bits. SRem by a positive constant
2179 // can't lower the number of sign bits.
2181 ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2183 // Calculate the leading sign bit constraints by examining the
2184 // denominator. Given that the denominator is positive, there are two
2187 // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2188 // (1 << ceilLogBase2(C)).
2190 // 2. the numerator is negative. Then the result range is (-C,0] and
2191 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2193 // Thus a lower bound on the number of sign bits is `TyBits -
2194 // ceilLogBase2(C)`.
2196 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2197 return std::max(NumrBits, ResBits);
2202 case Instruction::AShr: {
2203 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2204 // ashr X, C -> adds C sign bits. Vectors too.
2206 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2207 Tmp += ShAmt->getZExtValue();
2208 if (Tmp > TyBits) Tmp = TyBits;
2212 case Instruction::Shl: {
2214 if (match(U->getOperand(1), m_APInt(ShAmt))) {
2215 // shl destroys sign bits.
2216 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2217 Tmp2 = ShAmt->getZExtValue();
2218 if (Tmp2 >= TyBits || // Bad shift.
2219 Tmp2 >= Tmp) break; // Shifted all sign bits out.
2224 case Instruction::And:
2225 case Instruction::Or:
2226 case Instruction::Xor: // NOT is handled here.
2227 // Logical binary ops preserve the number of sign bits at the worst.
2228 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2230 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
2231 FirstAnswer = std::min(Tmp, Tmp2);
2232 // We computed what we know about the sign bits as our first
2233 // answer. Now proceed to the generic code that uses
2234 // computeKnownBits, and pick whichever answer is better.
2238 case Instruction::Select:
2239 Tmp = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
2240 if (Tmp == 1) return 1; // Early out.
2241 Tmp2 = ComputeNumSignBits(U->getOperand(2), DL, Depth + 1, Q);
2242 return std::min(Tmp, Tmp2);
2244 case Instruction::Add:
2245 // Add can have at most one carry bit. Thus we know that the output
2246 // is, at worst, one more bit than the inputs.
2247 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2248 if (Tmp == 1) return 1; // Early out.
2250 // Special case decrementing a value (ADD X, -1):
2251 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2252 if (CRHS->isAllOnesValue()) {
2253 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2254 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL, Depth + 1,
2257 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2259 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2262 // If we are subtracting one from a positive number, there is no carry
2263 // out of the result.
2264 if (KnownZero.isNegative())
2268 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
2269 if (Tmp2 == 1) return 1;
2270 return std::min(Tmp, Tmp2)-1;
2272 case Instruction::Sub:
2273 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q);
2274 if (Tmp2 == 1) return 1;
2277 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2278 if (CLHS->isNullValue()) {
2279 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2280 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, DL, Depth + 1,
2282 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2284 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
2287 // If the input is known to be positive (the sign bit is known clear),
2288 // the output of the NEG has the same number of sign bits as the input.
2289 if (KnownZero.isNegative())
2292 // Otherwise, we treat this like a SUB.
2295 // Sub can have at most one carry bit. Thus we know that the output
2296 // is, at worst, one more bit than the inputs.
2297 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q);
2298 if (Tmp == 1) return 1; // Early out.
2299 return std::min(Tmp, Tmp2)-1;
2301 case Instruction::PHI: {
2302 PHINode *PN = cast<PHINode>(U);
2303 unsigned NumIncomingValues = PN->getNumIncomingValues();
2304 // Don't analyze large in-degree PHIs.
2305 if (NumIncomingValues > 4) break;
2306 // Unreachable blocks may have zero-operand PHI nodes.
2307 if (NumIncomingValues == 0) break;
2309 // Take the minimum of all incoming values. This can't infinitely loop
2310 // because of our depth threshold.
2311 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), DL, Depth + 1, Q);
2312 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2313 if (Tmp == 1) return Tmp;
2315 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), DL, Depth + 1, Q));
2320 case Instruction::Trunc:
2321 // FIXME: it's tricky to do anything useful for this, but it is an important
2322 // case for targets like X86.
2326 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2327 // use this information.
2328 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
2330 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q);
2332 if (KnownZero.isNegative()) { // sign bit is 0
2334 } else if (KnownOne.isNegative()) { // sign bit is 1;
2341 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2342 // the number of identical bits in the top of the input value.
2344 Mask <<= Mask.getBitWidth()-TyBits;
2345 // Return # leading zeros. We use 'min' here in case Val was zero before
2346 // shifting. We don't want to return '64' as for an i32 "0".
2347 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
2350 /// This function computes the integer multiple of Base that equals V.
2351 /// If successful, it returns true and returns the multiple in
2352 /// Multiple. If unsuccessful, it returns false. It looks
2353 /// through SExt instructions only if LookThroughSExt is true.
2354 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2355 bool LookThroughSExt, unsigned Depth) {
2356 const unsigned MaxDepth = 6;
2358 assert(V && "No Value?");
2359 assert(Depth <= MaxDepth && "Limit Search Depth");
2360 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2362 Type *T = V->getType();
2364 ConstantInt *CI = dyn_cast<ConstantInt>(V);
2374 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
2375 Constant *BaseVal = ConstantInt::get(T, Base);
2376 if (CO && CO == BaseVal) {
2378 Multiple = ConstantInt::get(T, 1);
2382 if (CI && CI->getZExtValue() % Base == 0) {
2383 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2387 if (Depth == MaxDepth) return false; // Limit search depth.
2389 Operator *I = dyn_cast<Operator>(V);
2390 if (!I) return false;
2392 switch (I->getOpcode()) {
2394 case Instruction::SExt:
2395 if (!LookThroughSExt) return false;
2396 // otherwise fall through to ZExt
2397 case Instruction::ZExt:
2398 return ComputeMultiple(I->getOperand(0), Base, Multiple,
2399 LookThroughSExt, Depth+1);
2400 case Instruction::Shl:
2401 case Instruction::Mul: {
2402 Value *Op0 = I->getOperand(0);
2403 Value *Op1 = I->getOperand(1);
2405 if (I->getOpcode() == Instruction::Shl) {
2406 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2407 if (!Op1CI) return false;
2408 // Turn Op0 << Op1 into Op0 * 2^Op1
2409 APInt Op1Int = Op1CI->getValue();
2410 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2411 APInt API(Op1Int.getBitWidth(), 0);
2412 API.setBit(BitToSet);
2413 Op1 = ConstantInt::get(V->getContext(), API);
2416 Value *Mul0 = nullptr;
2417 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2418 if (Constant *Op1C = dyn_cast<Constant>(Op1))
2419 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2420 if (Op1C->getType()->getPrimitiveSizeInBits() <
2421 MulC->getType()->getPrimitiveSizeInBits())
2422 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2423 if (Op1C->getType()->getPrimitiveSizeInBits() >
2424 MulC->getType()->getPrimitiveSizeInBits())
2425 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2427 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2428 Multiple = ConstantExpr::getMul(MulC, Op1C);
2432 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2433 if (Mul0CI->getValue() == 1) {
2434 // V == Base * Op1, so return Op1
2440 Value *Mul1 = nullptr;
2441 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2442 if (Constant *Op0C = dyn_cast<Constant>(Op0))
2443 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2444 if (Op0C->getType()->getPrimitiveSizeInBits() <
2445 MulC->getType()->getPrimitiveSizeInBits())
2446 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2447 if (Op0C->getType()->getPrimitiveSizeInBits() >
2448 MulC->getType()->getPrimitiveSizeInBits())
2449 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2451 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2452 Multiple = ConstantExpr::getMul(MulC, Op0C);
2456 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2457 if (Mul1CI->getValue() == 1) {
2458 // V == Base * Op0, so return Op0
2466 // We could not determine if V is a multiple of Base.
2470 /// Return true if we can prove that the specified FP value is never equal to
2473 /// NOTE: this function will need to be revisited when we support non-default
2476 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
2477 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2478 return !CFP->getValueAPF().isNegZero();
2480 // FIXME: Magic number! At the least, this should be given a name because it's
2481 // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to
2482 // expose it as a parameter, so it can be used for testing / experimenting.
2484 return false; // Limit search depth.
2486 const Operator *I = dyn_cast<Operator>(V);
2487 if (!I) return false;
2489 // Check if the nsz fast-math flag is set
2490 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
2491 if (FPO->hasNoSignedZeros())
2494 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
2495 if (I->getOpcode() == Instruction::FAdd)
2496 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
2497 if (CFP->isNullValue())
2500 // sitofp and uitofp turn into +0.0 for zero.
2501 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
2504 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2505 // sqrt(-0.0) = -0.0, no other negative results are possible.
2506 if (II->getIntrinsicID() == Intrinsic::sqrt)
2507 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
2509 if (const CallInst *CI = dyn_cast<CallInst>(I))
2510 if (const Function *F = CI->getCalledFunction()) {
2511 if (F->isDeclaration()) {
2513 if (F->getName() == "abs") return true;
2514 // fabs[lf](x) != -0.0
2515 if (F->getName() == "fabs") return true;
2516 if (F->getName() == "fabsf") return true;
2517 if (F->getName() == "fabsl") return true;
2518 if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
2519 F->getName() == "sqrtl")
2520 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
2527 bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) {
2528 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
2529 return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
2531 // FIXME: Magic number! At the least, this should be given a name because it's
2532 // used similarly in CannotBeNegativeZero(). A better fix may be to
2533 // expose it as a parameter, so it can be used for testing / experimenting.
2535 return false; // Limit search depth.
2537 const Operator *I = dyn_cast<Operator>(V);
2538 if (!I) return false;
2540 switch (I->getOpcode()) {
2542 case Instruction::FMul:
2543 // x*x is always non-negative or a NaN.
2544 if (I->getOperand(0) == I->getOperand(1))
2547 case Instruction::FAdd:
2548 case Instruction::FDiv:
2549 case Instruction::FRem:
2550 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) &&
2551 CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1);
2552 case Instruction::FPExt:
2553 case Instruction::FPTrunc:
2554 // Widening/narrowing never change sign.
2555 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
2556 case Instruction::Call:
2557 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2558 switch (II->getIntrinsicID()) {
2560 case Intrinsic::exp:
2561 case Intrinsic::exp2:
2562 case Intrinsic::fabs:
2563 case Intrinsic::sqrt:
2565 case Intrinsic::powi:
2566 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
2567 // powi(x,n) is non-negative if n is even.
2568 if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
2571 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
2572 case Intrinsic::fma:
2573 case Intrinsic::fmuladd:
2574 // x*x+y is non-negative if y is non-negative.
2575 return I->getOperand(0) == I->getOperand(1) &&
2576 CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1);
2583 /// If the specified value can be set by repeating the same byte in memory,
2584 /// return the i8 value that it is represented with. This is
2585 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2586 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2587 /// byte store (e.g. i16 0x1234), return null.
2588 Value *llvm::isBytewiseValue(Value *V) {
2589 // All byte-wide stores are splatable, even of arbitrary variables.
2590 if (V->getType()->isIntegerTy(8)) return V;
2592 // Handle 'null' ConstantArrayZero etc.
2593 if (Constant *C = dyn_cast<Constant>(V))
2594 if (C->isNullValue())
2595 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
2597 // Constant float and double values can be handled as integer values if the
2598 // corresponding integer value is "byteable". An important case is 0.0.
2599 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2600 if (CFP->getType()->isFloatTy())
2601 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
2602 if (CFP->getType()->isDoubleTy())
2603 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
2604 // Don't handle long double formats, which have strange constraints.
2607 // We can handle constant integers that are multiple of 8 bits.
2608 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2609 if (CI->getBitWidth() % 8 == 0) {
2610 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2612 if (!CI->getValue().isSplat(8))
2614 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2618 // A ConstantDataArray/Vector is splatable if all its members are equal and
2620 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2621 Value *Elt = CA->getElementAsConstant(0);
2622 Value *Val = isBytewiseValue(Elt);
2626 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2627 if (CA->getElementAsConstant(I) != Elt)
2633 // Conceptually, we could handle things like:
2634 // %a = zext i8 %X to i16
2635 // %b = shl i16 %a, 8
2636 // %c = or i16 %a, %b
2637 // but until there is an example that actually needs this, it doesn't seem
2638 // worth worrying about.
2643 // This is the recursive version of BuildSubAggregate. It takes a few different
2644 // arguments. Idxs is the index within the nested struct From that we are
2645 // looking at now (which is of type IndexedType). IdxSkip is the number of
2646 // indices from Idxs that should be left out when inserting into the resulting
2647 // struct. To is the result struct built so far, new insertvalue instructions
2649 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2650 SmallVectorImpl<unsigned> &Idxs,
2652 Instruction *InsertBefore) {
2653 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
2655 // Save the original To argument so we can modify it
2657 // General case, the type indexed by Idxs is a struct
2658 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2659 // Process each struct element recursively
2662 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2666 // Couldn't find any inserted value for this index? Cleanup
2667 while (PrevTo != OrigTo) {
2668 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2669 PrevTo = Del->getAggregateOperand();
2670 Del->eraseFromParent();
2672 // Stop processing elements
2676 // If we successfully found a value for each of our subaggregates
2680 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2681 // the struct's elements had a value that was inserted directly. In the latter
2682 // case, perhaps we can't determine each of the subelements individually, but
2683 // we might be able to find the complete struct somewhere.
2685 // Find the value that is at that particular spot
2686 Value *V = FindInsertedValue(From, Idxs);
2691 // Insert the value in the new (sub) aggregrate
2692 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2693 "tmp", InsertBefore);
2696 // This helper takes a nested struct and extracts a part of it (which is again a
2697 // struct) into a new value. For example, given the struct:
2698 // { a, { b, { c, d }, e } }
2699 // and the indices "1, 1" this returns
2702 // It does this by inserting an insertvalue for each element in the resulting
2703 // struct, as opposed to just inserting a single struct. This will only work if
2704 // each of the elements of the substruct are known (ie, inserted into From by an
2705 // insertvalue instruction somewhere).
2707 // All inserted insertvalue instructions are inserted before InsertBefore
2708 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
2709 Instruction *InsertBefore) {
2710 assert(InsertBefore && "Must have someplace to insert!");
2711 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2713 Value *To = UndefValue::get(IndexedType);
2714 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2715 unsigned IdxSkip = Idxs.size();
2717 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2720 /// Given an aggregrate and an sequence of indices, see if
2721 /// the scalar value indexed is already around as a register, for example if it
2722 /// were inserted directly into the aggregrate.
2724 /// If InsertBefore is not null, this function will duplicate (modified)
2725 /// insertvalues when a part of a nested struct is extracted.
2726 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
2727 Instruction *InsertBefore) {
2728 // Nothing to index? Just return V then (this is useful at the end of our
2730 if (idx_range.empty())
2732 // We have indices, so V should have an indexable type.
2733 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
2734 "Not looking at a struct or array?");
2735 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
2736 "Invalid indices for type?");
2738 if (Constant *C = dyn_cast<Constant>(V)) {
2739 C = C->getAggregateElement(idx_range[0]);
2740 if (!C) return nullptr;
2741 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
2744 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
2745 // Loop the indices for the insertvalue instruction in parallel with the
2746 // requested indices
2747 const unsigned *req_idx = idx_range.begin();
2748 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
2749 i != e; ++i, ++req_idx) {
2750 if (req_idx == idx_range.end()) {
2751 // We can't handle this without inserting insertvalues
2755 // The requested index identifies a part of a nested aggregate. Handle
2756 // this specially. For example,
2757 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
2758 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
2759 // %C = extractvalue {i32, { i32, i32 } } %B, 1
2760 // This can be changed into
2761 // %A = insertvalue {i32, i32 } undef, i32 10, 0
2762 // %C = insertvalue {i32, i32 } %A, i32 11, 1
2763 // which allows the unused 0,0 element from the nested struct to be
2765 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
2769 // This insert value inserts something else than what we are looking for.
2770 // See if the (aggregate) value inserted into has the value we are
2771 // looking for, then.
2773 return FindInsertedValue(I->getAggregateOperand(), idx_range,
2776 // If we end up here, the indices of the insertvalue match with those
2777 // requested (though possibly only partially). Now we recursively look at
2778 // the inserted value, passing any remaining indices.
2779 return FindInsertedValue(I->getInsertedValueOperand(),
2780 makeArrayRef(req_idx, idx_range.end()),
2784 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
2785 // If we're extracting a value from an aggregate that was extracted from
2786 // something else, we can extract from that something else directly instead.
2787 // However, we will need to chain I's indices with the requested indices.
2789 // Calculate the number of indices required
2790 unsigned size = I->getNumIndices() + idx_range.size();
2791 // Allocate some space to put the new indices in
2792 SmallVector<unsigned, 5> Idxs;
2794 // Add indices from the extract value instruction
2795 Idxs.append(I->idx_begin(), I->idx_end());
2797 // Add requested indices
2798 Idxs.append(idx_range.begin(), idx_range.end());
2800 assert(Idxs.size() == size
2801 && "Number of indices added not correct?");
2803 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
2805 // Otherwise, we don't know (such as, extracting from a function return value
2806 // or load instruction)
2810 /// Analyze the specified pointer to see if it can be expressed as a base
2811 /// pointer plus a constant offset. Return the base and offset to the caller.
2812 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
2813 const DataLayout &DL) {
2814 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
2815 APInt ByteOffset(BitWidth, 0);
2817 if (Ptr->getType()->isVectorTy())
2820 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
2821 APInt GEPOffset(BitWidth, 0);
2822 if (!GEP->accumulateConstantOffset(DL, GEPOffset))
2825 ByteOffset += GEPOffset;
2827 Ptr = GEP->getPointerOperand();
2828 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
2829 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
2830 Ptr = cast<Operator>(Ptr)->getOperand(0);
2831 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
2832 if (GA->mayBeOverridden())
2834 Ptr = GA->getAliasee();
2839 Offset = ByteOffset.getSExtValue();
2844 /// This function computes the length of a null-terminated C string pointed to
2845 /// by V. If successful, it returns true and returns the string in Str.
2846 /// If unsuccessful, it returns false.
2847 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
2848 uint64_t Offset, bool TrimAtNul) {
2851 // Look through bitcast instructions and geps.
2852 V = V->stripPointerCasts();
2854 // If the value is a GEP instruction or constant expression, treat it as an
2856 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2857 // Make sure the GEP has exactly three arguments.
2858 if (GEP->getNumOperands() != 3)
2861 // Make sure the index-ee is a pointer to array of i8.
2862 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
2863 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
2864 if (!AT || !AT->getElementType()->isIntegerTy(8))
2867 // Check to make sure that the first operand of the GEP is an integer and
2868 // has value 0 so that we are sure we're indexing into the initializer.
2869 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
2870 if (!FirstIdx || !FirstIdx->isZero())
2873 // If the second index isn't a ConstantInt, then this is a variable index
2874 // into the array. If this occurs, we can't say anything meaningful about
2876 uint64_t StartIdx = 0;
2877 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
2878 StartIdx = CI->getZExtValue();
2881 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
2885 // The GEP instruction, constant or instruction, must reference a global
2886 // variable that is a constant and is initialized. The referenced constant
2887 // initializer is the array that we'll use for optimization.
2888 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2889 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
2892 // Handle the all-zeros case
2893 if (GV->getInitializer()->isNullValue()) {
2894 // This is a degenerate case. The initializer is constant zero so the
2895 // length of the string must be zero.
2900 // Must be a Constant Array
2901 const ConstantDataArray *Array =
2902 dyn_cast<ConstantDataArray>(GV->getInitializer());
2903 if (!Array || !Array->isString())
2906 // Get the number of elements in the array
2907 uint64_t NumElts = Array->getType()->getArrayNumElements();
2909 // Start out with the entire array in the StringRef.
2910 Str = Array->getAsString();
2912 if (Offset > NumElts)
2915 // Skip over 'offset' bytes.
2916 Str = Str.substr(Offset);
2919 // Trim off the \0 and anything after it. If the array is not nul
2920 // terminated, we just return the whole end of string. The client may know
2921 // some other way that the string is length-bound.
2922 Str = Str.substr(0, Str.find('\0'));
2927 // These next two are very similar to the above, but also look through PHI
2929 // TODO: See if we can integrate these two together.
2931 /// If we can compute the length of the string pointed to by
2932 /// the specified pointer, return 'len+1'. If we can't, return 0.
2933 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
2934 // Look through noop bitcast instructions.
2935 V = V->stripPointerCasts();
2937 // If this is a PHI node, there are two cases: either we have already seen it
2939 if (PHINode *PN = dyn_cast<PHINode>(V)) {
2940 if (!PHIs.insert(PN).second)
2941 return ~0ULL; // already in the set.
2943 // If it was new, see if all the input strings are the same length.
2944 uint64_t LenSoFar = ~0ULL;
2945 for (Value *IncValue : PN->incoming_values()) {
2946 uint64_t Len = GetStringLengthH(IncValue, PHIs);
2947 if (Len == 0) return 0; // Unknown length -> unknown.
2949 if (Len == ~0ULL) continue;
2951 if (Len != LenSoFar && LenSoFar != ~0ULL)
2952 return 0; // Disagree -> unknown.
2956 // Success, all agree.
2960 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
2961 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
2962 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
2963 if (Len1 == 0) return 0;
2964 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
2965 if (Len2 == 0) return 0;
2966 if (Len1 == ~0ULL) return Len2;
2967 if (Len2 == ~0ULL) return Len1;
2968 if (Len1 != Len2) return 0;
2972 // Otherwise, see if we can read the string.
2974 if (!getConstantStringInfo(V, StrData))
2977 return StrData.size()+1;
2980 /// If we can compute the length of the string pointed to by
2981 /// the specified pointer, return 'len+1'. If we can't, return 0.
2982 uint64_t llvm::GetStringLength(Value *V) {
2983 if (!V->getType()->isPointerTy()) return 0;
2985 SmallPtrSet<PHINode*, 32> PHIs;
2986 uint64_t Len = GetStringLengthH(V, PHIs);
2987 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
2988 // an empty string as a length.
2989 return Len == ~0ULL ? 1 : Len;
2992 /// \brief \p PN defines a loop-variant pointer to an object. Check if the
2993 /// previous iteration of the loop was referring to the same object as \p PN.
2994 static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) {
2995 // Find the loop-defined value.
2996 Loop *L = LI->getLoopFor(PN->getParent());
2997 if (PN->getNumIncomingValues() != 2)
3000 // Find the value from previous iteration.
3001 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3002 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3003 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3004 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3007 // If a new pointer is loaded in the loop, the pointer references a different
3008 // object in every iteration. E.g.:
3012 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3013 if (!L->isLoopInvariant(Load->getPointerOperand()))
3018 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
3019 unsigned MaxLookup) {
3020 if (!V->getType()->isPointerTy())
3022 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3023 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3024 V = GEP->getPointerOperand();
3025 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3026 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3027 V = cast<Operator>(V)->getOperand(0);
3028 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3029 if (GA->mayBeOverridden())
3031 V = GA->getAliasee();
3033 // See if InstructionSimplify knows any relevant tricks.
3034 if (Instruction *I = dyn_cast<Instruction>(V))
3035 // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3036 if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
3043 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3048 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
3049 const DataLayout &DL, LoopInfo *LI,
3050 unsigned MaxLookup) {
3051 SmallPtrSet<Value *, 4> Visited;
3052 SmallVector<Value *, 4> Worklist;
3053 Worklist.push_back(V);
3055 Value *P = Worklist.pop_back_val();
3056 P = GetUnderlyingObject(P, DL, MaxLookup);
3058 if (!Visited.insert(P).second)
3061 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3062 Worklist.push_back(SI->getTrueValue());
3063 Worklist.push_back(SI->getFalseValue());
3067 if (PHINode *PN = dyn_cast<PHINode>(P)) {
3068 // If this PHI changes the underlying object in every iteration of the
3069 // loop, don't look through it. Consider:
3072 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3076 // Prev is tracking Curr one iteration behind so they refer to different
3077 // underlying objects.
3078 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3079 isSameUnderlyingObjectInLoop(PN, LI))
3080 for (Value *IncValue : PN->incoming_values())
3081 Worklist.push_back(IncValue);
3085 Objects.push_back(P);
3086 } while (!Worklist.empty());
3089 /// Return true if the only users of this pointer are lifetime markers.
3090 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
3091 for (const User *U : V->users()) {
3092 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3093 if (!II) return false;
3095 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3096 II->getIntrinsicID() != Intrinsic::lifetime_end)
3102 static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset,
3103 Type *Ty, const DataLayout &DL,
3104 const Instruction *CtxI,
3105 const DominatorTree *DT,
3106 const TargetLibraryInfo *TLI) {
3107 assert(Offset.isNonNegative() && "offset can't be negative");
3108 assert(Ty->isSized() && "must be sized");
3110 APInt DerefBytes(Offset.getBitWidth(), 0);
3111 bool CheckForNonNull = false;
3112 if (const Argument *A = dyn_cast<Argument>(BV)) {
3113 DerefBytes = A->getDereferenceableBytes();
3114 if (!DerefBytes.getBoolValue()) {
3115 DerefBytes = A->getDereferenceableOrNullBytes();
3116 CheckForNonNull = true;
3118 } else if (auto CS = ImmutableCallSite(BV)) {
3119 DerefBytes = CS.getDereferenceableBytes(0);
3120 if (!DerefBytes.getBoolValue()) {
3121 DerefBytes = CS.getDereferenceableOrNullBytes(0);
3122 CheckForNonNull = true;
3124 } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) {
3125 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
3126 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
3127 DerefBytes = CI->getLimitedValue();
3129 if (!DerefBytes.getBoolValue()) {
3131 LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
3132 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
3133 DerefBytes = CI->getLimitedValue();
3135 CheckForNonNull = true;
3139 if (DerefBytes.getBoolValue())
3140 if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty)))
3141 if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI))
3147 static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL,
3148 const Instruction *CtxI,
3149 const DominatorTree *DT,
3150 const TargetLibraryInfo *TLI) {
3151 Type *VTy = V->getType();
3152 Type *Ty = VTy->getPointerElementType();
3156 APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
3157 return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI);
3160 static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
3161 const DataLayout &DL) {
3162 APInt BaseAlign(Offset.getBitWidth(), getAlignment(Base, DL));
3165 Type *Ty = Base->getType()->getPointerElementType();
3166 BaseAlign = DL.getABITypeAlignment(Ty);
3169 APInt Alignment(Offset.getBitWidth(), Align);
3171 assert(Alignment.isPowerOf2() && "must be a power of 2!");
3172 return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
3175 static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
3176 APInt Offset(DL.getTypeStoreSizeInBits(Base->getType()), 0);
3177 return isAligned(Base, Offset, Align, DL);
3180 /// Test if V is always a pointer to allocated and suitably aligned memory for
3181 /// a simple load or store.
3182 static bool isDereferenceableAndAlignedPointer(
3183 const Value *V, unsigned Align, const DataLayout &DL,
3184 const Instruction *CtxI, const DominatorTree *DT,
3185 const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
3186 // Note that it is not safe to speculate into a malloc'd region because
3187 // malloc may return null.
3189 // These are obviously ok if aligned.
3190 if (isa<AllocaInst>(V))
3191 return isAligned(V, Align, DL);
3193 // It's not always safe to follow a bitcast, for example:
3194 // bitcast i8* (alloca i8) to i32*
3195 // would result in a 4-byte load from a 1-byte alloca. However,
3196 // if we're casting from a pointer from a type of larger size
3197 // to a type of smaller size (or the same size), and the alignment
3198 // is at least as large as for the resulting pointer type, then
3199 // we can look through the bitcast.
3200 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) {
3201 Type *STy = BC->getSrcTy()->getPointerElementType(),
3202 *DTy = BC->getDestTy()->getPointerElementType();
3203 if (STy->isSized() && DTy->isSized() &&
3204 (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) &&
3205 (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy)))
3206 return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, DL,
3207 CtxI, DT, TLI, Visited);
3210 // Global variables which can't collapse to null are ok.
3211 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
3212 if (!GV->hasExternalWeakLinkage())
3213 return isAligned(V, Align, DL);
3215 // byval arguments are okay.
3216 if (const Argument *A = dyn_cast<Argument>(V))
3217 if (A->hasByValAttr())
3218 return isAligned(V, Align, DL);
3220 if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI))
3221 return isAligned(V, Align, DL);
3223 // For GEPs, determine if the indexing lands within the allocated object.
3224 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3225 Type *VTy = GEP->getType();
3226 Type *Ty = VTy->getPointerElementType();
3227 const Value *Base = GEP->getPointerOperand();
3229 // Conservatively require that the base pointer be fully dereferenceable
3231 if (!Visited.insert(Base).second)
3233 if (!isDereferenceableAndAlignedPointer(Base, Align, DL, CtxI, DT, TLI,
3237 APInt Offset(DL.getPointerTypeSizeInBits(VTy), 0);
3238 if (!GEP->accumulateConstantOffset(DL, Offset))
3241 // Check if the load is within the bounds of the underlying object
3242 // and offset is aligned.
3243 uint64_t LoadSize = DL.getTypeStoreSize(Ty);
3244 Type *BaseType = Base->getType()->getPointerElementType();
3245 assert(isPowerOf2_32(Align) && "must be a power of 2!");
3246 return (Offset + LoadSize).ule(DL.getTypeAllocSize(BaseType)) &&
3247 !(Offset & APInt(Offset.getBitWidth(), Align-1));
3250 // For gc.relocate, look through relocations
3251 if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V))
3252 if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) {
3253 GCRelocateOperands RelocateInst(I);
3254 return isDereferenceableAndAlignedPointer(
3255 RelocateInst.getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);
3258 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
3259 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL,
3260 CtxI, DT, TLI, Visited);
3262 // If we don't know, assume the worst.
3266 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
3267 const DataLayout &DL,
3268 const Instruction *CtxI,
3269 const DominatorTree *DT,
3270 const TargetLibraryInfo *TLI) {
3271 // When dereferenceability information is provided by a dereferenceable
3272 // attribute, we know exactly how many bytes are dereferenceable. If we can
3273 // determine the exact offset to the attributed variable, we can use that
3274 // information here.
3275 Type *VTy = V->getType();
3276 Type *Ty = VTy->getPointerElementType();
3278 // Require ABI alignment for loads without alignment specification
3280 Align = DL.getABITypeAlignment(Ty);
3282 if (Ty->isSized()) {
3283 APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0);
3284 const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3286 if (Offset.isNonNegative())
3287 if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, CtxI, DT, TLI) &&
3288 isAligned(BV, Offset, Align, DL))
3292 SmallPtrSet<const Value *, 32> Visited;
3293 return ::isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI,
3297 bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
3298 const Instruction *CtxI,
3299 const DominatorTree *DT,
3300 const TargetLibraryInfo *TLI) {
3301 return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
3304 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
3305 const Instruction *CtxI,
3306 const DominatorTree *DT,
3307 const TargetLibraryInfo *TLI) {
3308 const Operator *Inst = dyn_cast<Operator>(V);
3312 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3313 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3317 switch (Inst->getOpcode()) {
3320 case Instruction::UDiv:
3321 case Instruction::URem: {
3322 // x / y is undefined if y == 0.
3324 if (match(Inst->getOperand(1), m_APInt(V)))
3328 case Instruction::SDiv:
3329 case Instruction::SRem: {
3330 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3331 const APInt *Numerator, *Denominator;
3332 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3334 // We cannot hoist this division if the denominator is 0.
3335 if (*Denominator == 0)
3337 // It's safe to hoist if the denominator is not 0 or -1.
3338 if (*Denominator != -1)
3340 // At this point we know that the denominator is -1. It is safe to hoist as
3341 // long we know that the numerator is not INT_MIN.
3342 if (match(Inst->getOperand(0), m_APInt(Numerator)))
3343 return !Numerator->isMinSignedValue();
3344 // The numerator *might* be MinSignedValue.
3347 case Instruction::Load: {
3348 const LoadInst *LI = cast<LoadInst>(Inst);
3349 if (!LI->isUnordered() ||
3350 // Speculative load may create a race that did not exist in the source.
3351 LI->getParent()->getParent()->hasFnAttribute(
3352 Attribute::SanitizeThread) ||
3353 // Speculative load may load data from dirty regions.
3354 LI->getParent()->getParent()->hasFnAttribute(
3355 Attribute::SanitizeAddress))
3357 const DataLayout &DL = LI->getModule()->getDataLayout();
3358 return isDereferenceableAndAlignedPointer(
3359 LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
3361 case Instruction::Call: {
3362 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
3363 switch (II->getIntrinsicID()) {
3364 // These synthetic intrinsics have no side-effects and just mark
3365 // information about their operands.
3366 // FIXME: There are other no-op synthetic instructions that potentially
3367 // should be considered at least *safe* to speculate...
3368 case Intrinsic::dbg_declare:
3369 case Intrinsic::dbg_value:
3372 case Intrinsic::bswap:
3373 case Intrinsic::ctlz:
3374 case Intrinsic::ctpop:
3375 case Intrinsic::cttz:
3376 case Intrinsic::objectsize:
3377 case Intrinsic::sadd_with_overflow:
3378 case Intrinsic::smul_with_overflow:
3379 case Intrinsic::ssub_with_overflow:
3380 case Intrinsic::uadd_with_overflow:
3381 case Intrinsic::umul_with_overflow:
3382 case Intrinsic::usub_with_overflow:
3384 // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
3385 // errno like libm sqrt would.
3386 case Intrinsic::sqrt:
3387 case Intrinsic::fma:
3388 case Intrinsic::fmuladd:
3389 case Intrinsic::fabs:
3390 case Intrinsic::minnum:
3391 case Intrinsic::maxnum:
3393 // TODO: some fp intrinsics are marked as having the same error handling
3394 // as libm. They're safe to speculate when they won't error.
3395 // TODO: are convert_{from,to}_fp16 safe?
3396 // TODO: can we list target-specific intrinsics here?
3400 return false; // The called function could have undefined behavior or
3401 // side-effects, even if marked readnone nounwind.
3403 case Instruction::VAArg:
3404 case Instruction::Alloca:
3405 case Instruction::Invoke:
3406 case Instruction::PHI:
3407 case Instruction::Store:
3408 case Instruction::Ret:
3409 case Instruction::Br:
3410 case Instruction::IndirectBr:
3411 case Instruction::Switch:
3412 case Instruction::Unreachable:
3413 case Instruction::Fence:
3414 case Instruction::AtomicRMW:
3415 case Instruction::AtomicCmpXchg:
3416 case Instruction::LandingPad:
3417 case Instruction::Resume:
3418 case Instruction::CatchPad:
3419 case Instruction::CatchEndPad:
3420 case Instruction::CatchRet:
3421 case Instruction::CleanupPad:
3422 case Instruction::CleanupEndPad:
3423 case Instruction::CleanupRet:
3424 case Instruction::TerminatePad:
3425 return false; // Misc instructions which have effects
3429 bool llvm::mayBeMemoryDependent(const Instruction &I) {
3430 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
3433 /// Return true if we know that the specified value is never null.
3434 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
3435 assert(V->getType()->isPointerTy() && "V must be pointer type");
3437 // Alloca never returns null, malloc might.
3438 if (isa<AllocaInst>(V)) return true;
3440 // A byval, inalloca, or nonnull argument is never null.
3441 if (const Argument *A = dyn_cast<Argument>(V))
3442 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3444 // A global variable in address space 0 is non null unless extern weak.
3445 // Other address spaces may have null as a valid address for a global,
3446 // so we can't assume anything.
3447 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
3448 return !GV->hasExternalWeakLinkage() &&
3449 GV->getType()->getAddressSpace() == 0;
3451 // A Load tagged w/nonnull metadata is never null.
3452 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
3453 return LI->getMetadata(LLVMContext::MD_nonnull);
3455 if (auto CS = ImmutableCallSite(V))
3456 if (CS.isReturnNonNull())
3459 // operator new never returns null.
3460 if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))
3466 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3467 const Instruction *CtxI,
3468 const DominatorTree *DT) {
3469 assert(V->getType()->isPointerTy() && "V must be pointer type");
3471 unsigned NumUsesExplored = 0;
3472 for (auto U : V->users()) {
3473 // Avoid massive lists
3474 if (NumUsesExplored >= DomConditionsMaxUses)
3477 // Consider only compare instructions uniquely controlling a branch
3478 const ICmpInst *Cmp = dyn_cast<ICmpInst>(U);
3482 if (DomConditionsSingleCmpUse && !Cmp->hasOneUse())
3485 for (auto *CmpU : Cmp->users()) {
3486 const BranchInst *BI = dyn_cast<BranchInst>(CmpU);
3490 assert(BI->isConditional() && "uses a comparison!");
3492 BasicBlock *NonNullSuccessor = nullptr;
3493 CmpInst::Predicate Pred;
3495 if (match(const_cast<ICmpInst*>(Cmp),
3496 m_c_ICmp(Pred, m_Specific(V), m_Zero()))) {
3497 if (Pred == ICmpInst::ICMP_EQ)
3498 NonNullSuccessor = BI->getSuccessor(1);
3499 else if (Pred == ICmpInst::ICMP_NE)
3500 NonNullSuccessor = BI->getSuccessor(0);
3503 if (NonNullSuccessor) {
3504 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3505 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3514 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3515 const DominatorTree *DT, const TargetLibraryInfo *TLI) {
3516 if (isKnownNonNull(V, TLI))
3519 return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
3522 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
3523 const DataLayout &DL,
3524 AssumptionCache *AC,
3525 const Instruction *CxtI,
3526 const DominatorTree *DT) {
3527 // Multiplying n * m significant bits yields a result of n + m significant
3528 // bits. If the total number of significant bits does not exceed the
3529 // result bit width (minus 1), there is no overflow.
3530 // This means if we have enough leading zero bits in the operands
3531 // we can guarantee that the result does not overflow.
3532 // Ref: "Hacker's Delight" by Henry Warren
3533 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3534 APInt LHSKnownZero(BitWidth, 0);
3535 APInt LHSKnownOne(BitWidth, 0);
3536 APInt RHSKnownZero(BitWidth, 0);
3537 APInt RHSKnownOne(BitWidth, 0);
3538 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3540 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
3542 // Note that underestimating the number of zero bits gives a more
3543 // conservative answer.
3544 unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
3545 RHSKnownZero.countLeadingOnes();
3546 // First handle the easy case: if we have enough zero bits there's
3547 // definitely no overflow.
3548 if (ZeroBits >= BitWidth)
3549 return OverflowResult::NeverOverflows;
3551 // Get the largest possible values for each operand.
3552 APInt LHSMax = ~LHSKnownZero;
3553 APInt RHSMax = ~RHSKnownZero;
3555 // We know the multiply operation doesn't overflow if the maximum values for
3556 // each operand will not overflow after we multiply them together.
3558 LHSMax.umul_ov(RHSMax, MaxOverflow);
3560 return OverflowResult::NeverOverflows;
3562 // We know it always overflows if multiplying the smallest possible values for
3563 // the operands also results in overflow.
3565 LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow);
3567 return OverflowResult::AlwaysOverflows;
3569 return OverflowResult::MayOverflow;
3572 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
3573 const DataLayout &DL,
3574 AssumptionCache *AC,
3575 const Instruction *CxtI,
3576 const DominatorTree *DT) {
3577 bool LHSKnownNonNegative, LHSKnownNegative;
3578 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3580 if (LHSKnownNonNegative || LHSKnownNegative) {
3581 bool RHSKnownNonNegative, RHSKnownNegative;
3582 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3585 if (LHSKnownNegative && RHSKnownNegative) {
3586 // The sign bit is set in both cases: this MUST overflow.
3587 // Create a simple add instruction, and insert it into the struct.
3588 return OverflowResult::AlwaysOverflows;
3591 if (LHSKnownNonNegative && RHSKnownNonNegative) {
3592 // The sign bit is clear in both cases: this CANNOT overflow.
3593 // Create a simple add instruction, and insert it into the struct.
3594 return OverflowResult::NeverOverflows;
3598 return OverflowResult::MayOverflow;
3601 static OverflowResult computeOverflowForSignedAdd(
3602 Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL,
3603 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) {
3604 if (Add && Add->hasNoSignedWrap()) {
3605 return OverflowResult::NeverOverflows;
3608 bool LHSKnownNonNegative, LHSKnownNegative;
3609 bool RHSKnownNonNegative, RHSKnownNegative;
3610 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
3612 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
3615 if ((LHSKnownNonNegative && RHSKnownNegative) ||
3616 (LHSKnownNegative && RHSKnownNonNegative)) {
3617 // The sign bits are opposite: this CANNOT overflow.
3618 return OverflowResult::NeverOverflows;
3621 // The remaining code needs Add to be available. Early returns if not so.
3623 return OverflowResult::MayOverflow;
3625 // If the sign of Add is the same as at least one of the operands, this add
3626 // CANNOT overflow. This is particularly useful when the sum is
3627 // @llvm.assume'ed non-negative rather than proved so from analyzing its
3629 bool LHSOrRHSKnownNonNegative =
3630 (LHSKnownNonNegative || RHSKnownNonNegative);
3631 bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative);
3632 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3633 bool AddKnownNonNegative, AddKnownNegative;
3634 ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL,
3635 /*Depth=*/0, AC, CxtI, DT);
3636 if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) ||
3637 (AddKnownNegative && LHSOrRHSKnownNegative)) {
3638 return OverflowResult::NeverOverflows;
3642 return OverflowResult::MayOverflow;
3645 OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add,
3646 const DataLayout &DL,
3647 AssumptionCache *AC,
3648 const Instruction *CxtI,
3649 const DominatorTree *DT) {
3650 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
3651 Add, DL, AC, CxtI, DT);
3654 OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS,
3655 const DataLayout &DL,
3656 AssumptionCache *AC,
3657 const Instruction *CxtI,
3658 const DominatorTree *DT) {
3659 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3662 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
3663 // FIXME: This conservative implementation can be relaxed. E.g. most
3664 // atomic operations are guaranteed to terminate on most platforms
3665 // and most functions terminate.
3667 return !I->isAtomic() && // atomics may never succeed on some platforms
3668 !isa<CallInst>(I) && // could throw and might not terminate
3669 !isa<InvokeInst>(I) && // might not terminate and could throw to
3670 // non-successor (see bug 24185 for details).
3671 !isa<ResumeInst>(I) && // has no successors
3672 !isa<ReturnInst>(I); // has no successors
3675 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
3677 // The loop header is guaranteed to be executed for every iteration.
3679 // FIXME: Relax this constraint to cover all basic blocks that are
3680 // guaranteed to be executed at every iteration.
3681 if (I->getParent() != L->getHeader()) return false;
3683 for (const Instruction &LI : *L->getHeader()) {
3684 if (&LI == I) return true;
3685 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3687 llvm_unreachable("Instruction not contained in its own parent basic block.");
3690 bool llvm::propagatesFullPoison(const Instruction *I) {
3691 switch (I->getOpcode()) {
3692 case Instruction::Add:
3693 case Instruction::Sub:
3694 case Instruction::Xor:
3695 case Instruction::Trunc:
3696 case Instruction::BitCast:
3697 case Instruction::AddrSpaceCast:
3698 // These operations all propagate poison unconditionally. Note that poison
3699 // is not any particular value, so xor or subtraction of poison with
3700 // itself still yields poison, not zero.
3703 case Instruction::AShr:
3704 case Instruction::SExt:
3705 // For these operations, one bit of the input is replicated across
3706 // multiple output bits. A replicated poison bit is still poison.
3709 case Instruction::Shl: {
3710 // Left shift *by* a poison value is poison. The number of
3711 // positions to shift is unsigned, so no negative values are
3712 // possible there. Left shift by zero places preserves poison. So
3713 // it only remains to consider left shift of poison by a positive
3714 // number of places.
3716 // A left shift by a positive number of places leaves the lowest order bit
3717 // non-poisoned. However, if such a shift has a no-wrap flag, then we can
3718 // make the poison operand violate that flag, yielding a fresh full-poison
3720 auto *OBO = cast<OverflowingBinaryOperator>(I);
3721 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap();
3724 case Instruction::Mul: {
3725 // A multiplication by zero yields a non-poison zero result, so we need to
3726 // rule out zero as an operand. Conservatively, multiplication by a
3727 // non-zero constant is not multiplication by zero.
3729 // Multiplication by a non-zero constant can leave some bits
3730 // non-poisoned. For example, a multiplication by 2 leaves the lowest
3731 // order bit unpoisoned. So we need to consider that.
3733 // Multiplication by 1 preserves poison. If the multiplication has a
3734 // no-wrap flag, then we can make the poison operand violate that flag
3735 // when multiplied by any integer other than 0 and 1.
3736 auto *OBO = cast<OverflowingBinaryOperator>(I);
3737 if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) {
3738 for (Value *V : OBO->operands()) {
3739 if (auto *CI = dyn_cast<ConstantInt>(V)) {
3740 // A ConstantInt cannot yield poison, so we can assume that it is
3741 // the other operand that is poison.
3742 return !CI->isZero();
3749 case Instruction::GetElementPtr:
3750 // A GEP implicitly represents a sequence of additions, subtractions,
3751 // truncations, sign extensions and multiplications. The multiplications
3752 // are by the non-zero sizes of some set of types, so we do not have to be
3753 // concerned with multiplication by zero. If the GEP is in-bounds, then
3754 // these operations are implicitly no-signed-wrap so poison is propagated
3755 // by the arguments above for Add, Sub, Trunc, SExt and Mul.
3756 return cast<GEPOperator>(I)->isInBounds();
3763 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
3764 switch (I->getOpcode()) {
3765 case Instruction::Store:
3766 return cast<StoreInst>(I)->getPointerOperand();
3768 case Instruction::Load:
3769 return cast<LoadInst>(I)->getPointerOperand();
3771 case Instruction::AtomicCmpXchg:
3772 return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3774 case Instruction::AtomicRMW:
3775 return cast<AtomicRMWInst>(I)->getPointerOperand();
3777 case Instruction::UDiv:
3778 case Instruction::SDiv:
3779 case Instruction::URem:
3780 case Instruction::SRem:
3781 return I->getOperand(1);
3788 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
3789 // We currently only look for uses of poison values within the same basic
3790 // block, as that makes it easier to guarantee that the uses will be
3791 // executed given that PoisonI is executed.
3793 // FIXME: Expand this to consider uses beyond the same basic block. To do
3794 // this, look out for the distinction between post-dominance and strong
3796 const BasicBlock *BB = PoisonI->getParent();
3798 // Set of instructions that we have proved will yield poison if PoisonI
3800 SmallSet<const Value *, 16> YieldsPoison;
3801 YieldsPoison.insert(PoisonI);
3803 for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end();
3805 if (&*I != PoisonI) {
3806 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I);
3807 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
3808 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
3812 // Mark poison that propagates from I through uses of I.
3813 if (YieldsPoison.count(&*I)) {
3814 for (const User *User : I->users()) {
3815 const Instruction *UserI = cast<Instruction>(User);
3816 if (UserI->getParent() == BB && propagatesFullPoison(UserI))
3817 YieldsPoison.insert(User);
3824 static bool isKnownNonNaN(Value *V, FastMathFlags FMF) {
3828 if (auto *C = dyn_cast<ConstantFP>(V))
3833 static bool isKnownNonZero(Value *V) {
3834 if (auto *C = dyn_cast<ConstantFP>(V))
3835 return !C->isZero();
3839 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
3841 Value *CmpLHS, Value *CmpRHS,
3842 Value *TrueVal, Value *FalseVal,
3843 Value *&LHS, Value *&RHS) {
3847 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may
3848 // return inconsistent results between implementations.
3849 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
3850 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
3851 // Therefore we behave conservatively and only proceed if at least one of the
3852 // operands is known to not be zero, or if we don't care about signed zeroes.
3855 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
3856 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
3857 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
3858 !isKnownNonZero(CmpRHS))
3859 return {SPF_UNKNOWN, SPNB_NA, false};
3862 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
3863 bool Ordered = false;
3865 // When given one NaN and one non-NaN input:
3866 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
3867 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
3868 // ordered comparison fails), which could be NaN or non-NaN.
3869 // so here we discover exactly what NaN behavior is required/accepted.
3870 if (CmpInst::isFPPredicate(Pred)) {
3871 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
3872 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
3874 if (LHSSafe && RHSSafe) {
3875 // Both operands are known non-NaN.
3876 NaNBehavior = SPNB_RETURNS_ANY;
3877 } else if (CmpInst::isOrdered(Pred)) {
3878 // An ordered comparison will return false when given a NaN, so it
3882 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
3883 NaNBehavior = SPNB_RETURNS_NAN;
3885 NaNBehavior = SPNB_RETURNS_OTHER;
3887 // Completely unsafe.
3888 return {SPF_UNKNOWN, SPNB_NA, false};
3891 // An unordered comparison will return true when given a NaN, so it
3894 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
3895 NaNBehavior = SPNB_RETURNS_OTHER;
3897 NaNBehavior = SPNB_RETURNS_NAN;
3899 // Completely unsafe.
3900 return {SPF_UNKNOWN, SPNB_NA, false};
3904 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
3905 std::swap(CmpLHS, CmpRHS);
3906 Pred = CmpInst::getSwappedPredicate(Pred);
3907 if (NaNBehavior == SPNB_RETURNS_NAN)
3908 NaNBehavior = SPNB_RETURNS_OTHER;
3909 else if (NaNBehavior == SPNB_RETURNS_OTHER)
3910 NaNBehavior = SPNB_RETURNS_NAN;
3914 // ([if]cmp X, Y) ? X : Y
3915 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
3917 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
3918 case ICmpInst::ICMP_UGT:
3919 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
3920 case ICmpInst::ICMP_SGT:
3921 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
3922 case ICmpInst::ICMP_ULT:
3923 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
3924 case ICmpInst::ICMP_SLT:
3925 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
3926 case FCmpInst::FCMP_UGT:
3927 case FCmpInst::FCMP_UGE:
3928 case FCmpInst::FCMP_OGT:
3929 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
3930 case FCmpInst::FCMP_ULT:
3931 case FCmpInst::FCMP_ULE:
3932 case FCmpInst::FCMP_OLT:
3933 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
3937 if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) {
3938 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
3939 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
3941 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
3942 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
3943 if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) {
3944 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3947 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
3948 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
3949 if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) {
3950 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
3954 // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C)
3955 if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) {
3956 if (C1->getType() == C2->getType() && ~C1->getValue() == C2->getValue() &&
3957 (match(TrueVal, m_Not(m_Specific(CmpLHS))) ||
3958 match(CmpLHS, m_Not(m_Specific(TrueVal))))) {
3961 return {SPF_SMIN, SPNB_NA, false};
3966 // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5)
3968 return {SPF_UNKNOWN, SPNB_NA, false};
3971 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
3972 Instruction::CastOps *CastOp) {
3973 CastInst *CI = dyn_cast<CastInst>(V1);
3974 Constant *C = dyn_cast<Constant>(V2);
3975 CastInst *CI2 = dyn_cast<CastInst>(V2);
3978 *CastOp = CI->getOpcode();
3981 // If V1 and V2 are both the same cast from the same type, we can look
3983 if (CI2->getOpcode() == CI->getOpcode() &&
3984 CI2->getSrcTy() == CI->getSrcTy())
3985 return CI2->getOperand(0);
3991 if (isa<SExtInst>(CI) && CmpI->isSigned()) {
3992 Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy());
3993 // This is only valid if the truncated value can be sign-extended
3994 // back to the original value.
3995 if (ConstantExpr::getSExt(T, C->getType()) == C)
3999 if (isa<ZExtInst>(CI) && CmpI->isUnsigned())
4000 return ConstantExpr::getTrunc(C, CI->getSrcTy());
4002 if (isa<TruncInst>(CI))
4003 return ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned());
4005 if (isa<FPToUIInst>(CI))
4006 return ConstantExpr::getUIToFP(C, CI->getSrcTy(), true);
4008 if (isa<FPToSIInst>(CI))
4009 return ConstantExpr::getSIToFP(C, CI->getSrcTy(), true);
4011 if (isa<UIToFPInst>(CI))
4012 return ConstantExpr::getFPToUI(C, CI->getSrcTy(), true);
4014 if (isa<SIToFPInst>(CI))
4015 return ConstantExpr::getFPToSI(C, CI->getSrcTy(), true);
4017 if (isa<FPTruncInst>(CI))
4018 return ConstantExpr::getFPExtend(C, CI->getSrcTy(), true);
4020 if (isa<FPExtInst>(CI))
4021 return ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true);
4026 SelectPatternResult llvm::matchSelectPattern(Value *V,
4027 Value *&LHS, Value *&RHS,
4028 Instruction::CastOps *CastOp) {
4029 SelectInst *SI = dyn_cast<SelectInst>(V);
4030 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4032 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4033 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4035 CmpInst::Predicate Pred = CmpI->getPredicate();
4036 Value *CmpLHS = CmpI->getOperand(0);
4037 Value *CmpRHS = CmpI->getOperand(1);
4038 Value *TrueVal = SI->getTrueValue();
4039 Value *FalseVal = SI->getFalseValue();
4041 if (isa<FPMathOperator>(CmpI))
4042 FMF = CmpI->getFastMathFlags();
4045 if (CmpI->isEquality())
4046 return {SPF_UNKNOWN, SPNB_NA, false};
4048 // Deal with type mismatches.
4049 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4050 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
4051 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4052 cast<CastInst>(TrueVal)->getOperand(0), C,
4054 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
4055 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4056 C, cast<CastInst>(FalseVal)->getOperand(0),
4059 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4063 ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) {
4064 const unsigned NumRanges = Ranges.getNumOperands() / 2;
4065 assert(NumRanges >= 1 && "Must have at least one range!");
4066 assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs");
4068 auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0));
4069 auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1));
4071 ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue());
4073 for (unsigned i = 1; i < NumRanges; ++i) {
4074 auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
4075 auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
4077 // Note: unionWith will potentially create a range that contains values not
4078 // contained in any of the original N ranges.
4079 CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue()));
4085 /// Return true if "icmp Pred LHS RHS" is always true.
4086 static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
4087 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4094 case CmpInst::ICMP_SLT:
4095 case CmpInst::ICMP_SLE: {
4098 // LHS s< LHS +_{nsw} C if C > 0
4099 // LHS s<= LHS +_{nsw} C if C >= 0
4100 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_ConstantInt(CI)))) {
4101 if (Pred == CmpInst::ICMP_SLT)
4102 return CI->getValue().isStrictlyPositive();
4103 return !CI->isNegative();
4108 case CmpInst::ICMP_ULT:
4109 case CmpInst::ICMP_ULE: {
4112 // LHS u< LHS +_{nuw} C if C > 0
4113 // LHS u<= LHS +_{nuw} C if C >= 0
4114 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_ConstantInt(CI)))) {
4115 if (Pred == CmpInst::ICMP_ULT)
4116 return CI->getValue().isStrictlyPositive();
4117 return !CI->isNegative();
4124 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4125 /// ALHS ARHS" is true.
4126 static bool isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS,
4127 Value *ARHS, Value *BLHS, Value *BRHS) {
4132 case CmpInst::ICMP_SLT:
4133 case CmpInst::ICMP_SLE:
4134 return isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS) &&
4135 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS);
4137 case CmpInst::ICMP_ULT:
4138 case CmpInst::ICMP_ULE:
4139 return isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS) &&
4140 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS);
4144 bool llvm::isImpliedCondition(Value *LHS, Value *RHS) {
4145 assert(LHS->getType() == RHS->getType() && "mismatched type");
4146 Type *OpTy = LHS->getType();
4147 assert(OpTy->getScalarType()->isIntegerTy(1));
4149 // LHS ==> RHS by definition
4150 if (LHS == RHS) return true;
4152 if (OpTy->isVectorTy())
4153 // TODO: extending the code below to handle vectors
4155 assert(OpTy->isIntegerTy(1) && "implied by above");
4157 ICmpInst::Predicate APred, BPred;
4161 if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) ||
4162 !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS))))
4166 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS);