1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/IR/CallSite.h"
20 #include "llvm/IR/ConstantRange.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/GlobalAlias.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/IR/Metadata.h"
30 #include "llvm/IR/Operator.h"
31 #include "llvm/IR/PatternMatch.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/MathExtras.h"
36 using namespace llvm::PatternMatch;
38 const unsigned MaxDepth = 6;
40 /// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
41 /// unknown returns 0). For vector types, returns the element type's bitwidth.
42 static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
43 if (unsigned BitWidth = Ty->getScalarSizeInBits())
46 return TD ? TD->getPointerTypeSizeInBits(Ty) : 0;
49 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
50 APInt &KnownZero, APInt &KnownOne,
51 APInt &KnownZero2, APInt &KnownOne2,
52 const DataLayout *TD, unsigned Depth) {
53 unsigned BitWidth = KnownZero.getBitWidth();
55 // If an initial sequence of bits in the result is not needed, the
56 // corresponding bits in the operands are not needed.
57 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
58 llvm::computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1);
59 llvm::computeKnownBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
61 // Carry in a 1 for a subtract, rather than a 0.
62 APInt CarryIn(BitWidth, 0);
64 // Sum = LHS + ~RHS + 1
65 std::swap(KnownZero2, KnownOne2);
69 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn;
70 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn;
72 // Compute known bits of the carry.
73 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2);
74 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2;
76 // Compute set of known bits (where all three relevant bits are known).
77 APInt LHSKnown = LHSKnownZero | LHSKnownOne;
78 APInt RHSKnown = KnownZero2 | KnownOne2;
79 APInt CarryKnown = CarryKnownZero | CarryKnownOne;
80 APInt Known = LHSKnown & RHSKnown & CarryKnown;
82 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) &&
83 "known bits of sum differ");
85 // Compute known bits of the result.
86 KnownZero = ~PossibleSumOne & Known;
87 KnownOne = PossibleSumOne & Known;
89 // Are we still trying to solve for the sign bit?
90 if (!Known.isNegative()) {
92 // Adding two non-negative numbers, or subtracting a negative number from
93 // a non-negative one, can't wrap into negative.
94 if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
95 KnownZero |= APInt::getSignBit(BitWidth);
96 // Adding two negative numbers, or subtracting a non-negative number from
97 // a negative one, can't wrap into non-negative.
98 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
99 KnownOne |= APInt::getSignBit(BitWidth);
104 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
105 APInt &KnownZero, APInt &KnownOne,
106 APInt &KnownZero2, APInt &KnownOne2,
107 const DataLayout *TD, unsigned Depth) {
108 unsigned BitWidth = KnownZero.getBitWidth();
109 computeKnownBits(Op1, KnownZero, KnownOne, TD, Depth+1);
110 computeKnownBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
112 bool isKnownNegative = false;
113 bool isKnownNonNegative = false;
114 // If the multiplication is known not to overflow, compute the sign bit.
117 // The product of a number with itself is non-negative.
118 isKnownNonNegative = true;
120 bool isKnownNonNegativeOp1 = KnownZero.isNegative();
121 bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
122 bool isKnownNegativeOp1 = KnownOne.isNegative();
123 bool isKnownNegativeOp0 = KnownOne2.isNegative();
124 // The product of two numbers with the same sign is non-negative.
125 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
126 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
127 // The product of a negative number and a non-negative number is either
129 if (!isKnownNonNegative)
130 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
131 isKnownNonZero(Op0, TD, Depth)) ||
132 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
133 isKnownNonZero(Op1, TD, Depth));
137 // If low bits are zero in either operand, output low known-0 bits.
138 // Also compute a conserative estimate for high known-0 bits.
139 // More trickiness is possible, but this is sufficient for the
140 // interesting case of alignment computation.
141 KnownOne.clearAllBits();
142 unsigned TrailZ = KnownZero.countTrailingOnes() +
143 KnownZero2.countTrailingOnes();
144 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
145 KnownZero2.countLeadingOnes(),
146 BitWidth) - BitWidth;
148 TrailZ = std::min(TrailZ, BitWidth);
149 LeadZ = std::min(LeadZ, BitWidth);
150 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
151 APInt::getHighBitsSet(BitWidth, LeadZ);
153 // Only make use of no-wrap flags if we failed to compute the sign bit
154 // directly. This matters if the multiplication always overflows, in
155 // which case we prefer to follow the result of the direct computation,
156 // though as the program is invoking undefined behaviour we can choose
157 // whatever we like here.
158 if (isKnownNonNegative && !KnownOne.isNegative())
159 KnownZero.setBit(BitWidth - 1);
160 else if (isKnownNegative && !KnownZero.isNegative())
161 KnownOne.setBit(BitWidth - 1);
164 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
166 unsigned BitWidth = KnownZero.getBitWidth();
167 unsigned NumRanges = Ranges.getNumOperands() / 2;
168 assert(NumRanges >= 1);
170 // Use the high end of the ranges to find leading zeros.
171 unsigned MinLeadingZeros = BitWidth;
172 for (unsigned i = 0; i < NumRanges; ++i) {
173 ConstantInt *Lower = cast<ConstantInt>(Ranges.getOperand(2*i + 0));
174 ConstantInt *Upper = cast<ConstantInt>(Ranges.getOperand(2*i + 1));
175 ConstantRange Range(Lower->getValue(), Upper->getValue());
176 if (Range.isWrappedSet())
177 MinLeadingZeros = 0; // -1 has no zeros
178 unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros();
179 MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros);
182 KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
185 /// Determine which bits of V are known to be either zero or one and return
186 /// them in the KnownZero/KnownOne bit sets.
188 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
189 /// we cannot optimize based on the assumption that it is zero without changing
190 /// it to be an explicit zero. If we don't change it to zero, other code could
191 /// optimized based on the contradictory assumption that it is non-zero.
192 /// Because instcombine aggressively folds operations with undef args anyway,
193 /// this won't lose us code quality.
195 /// This function is defined on values with integer type, values with pointer
196 /// type (but only if TD is non-null), and vectors of integers. In the case
197 /// where V is a vector, known zero, and known one values are the
198 /// same width as the vector element, and the bit is set only if it is true
199 /// for all of the elements in the vector.
200 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
201 const DataLayout *TD, unsigned Depth) {
202 assert(V && "No Value?");
203 assert(Depth <= MaxDepth && "Limit Search Depth");
204 unsigned BitWidth = KnownZero.getBitWidth();
206 assert((V->getType()->isIntOrIntVectorTy() ||
207 V->getType()->getScalarType()->isPointerTy()) &&
208 "Not integer or pointer type!");
210 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
211 (!V->getType()->isIntOrIntVectorTy() ||
212 V->getType()->getScalarSizeInBits() == BitWidth) &&
213 KnownZero.getBitWidth() == BitWidth &&
214 KnownOne.getBitWidth() == BitWidth &&
215 "V, KnownOne and KnownZero should have same BitWidth");
217 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
218 // We know all of the bits for a constant!
219 KnownOne = CI->getValue();
220 KnownZero = ~KnownOne;
223 // Null and aggregate-zero are all-zeros.
224 if (isa<ConstantPointerNull>(V) ||
225 isa<ConstantAggregateZero>(V)) {
226 KnownOne.clearAllBits();
227 KnownZero = APInt::getAllOnesValue(BitWidth);
230 // Handle a constant vector by taking the intersection of the known bits of
231 // each element. There is no real need to handle ConstantVector here, because
232 // we don't handle undef in any particularly useful way.
233 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
234 // We know that CDS must be a vector of integers. Take the intersection of
236 KnownZero.setAllBits(); KnownOne.setAllBits();
237 APInt Elt(KnownZero.getBitWidth(), 0);
238 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
239 Elt = CDS->getElementAsInteger(i);
246 // The address of an aligned GlobalValue has trailing zeros.
247 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
248 unsigned Align = GV->getAlignment();
249 if (Align == 0 && TD) {
250 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) {
251 Type *ObjectType = GVar->getType()->getElementType();
252 if (ObjectType->isSized()) {
253 // If the object is defined in the current Module, we'll be giving
254 // it the preferred alignment. Otherwise, we have to assume that it
255 // may only have the minimum ABI alignment.
256 if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
257 Align = TD->getPreferredAlignment(GVar);
259 Align = TD->getABITypeAlignment(ObjectType);
264 KnownZero = APInt::getLowBitsSet(BitWidth,
265 countTrailingZeros(Align));
267 KnownZero.clearAllBits();
268 KnownOne.clearAllBits();
271 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
272 // the bits of its aliasee.
273 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
274 if (GA->mayBeOverridden()) {
275 KnownZero.clearAllBits(); KnownOne.clearAllBits();
277 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1);
282 if (Argument *A = dyn_cast<Argument>(V)) {
283 unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
285 if (!Align && TD && A->hasStructRetAttr()) {
286 // An sret parameter has at least the ABI alignment of the return type.
287 Type *EltTy = cast<PointerType>(A->getType())->getElementType();
288 if (EltTy->isSized())
289 Align = TD->getABITypeAlignment(EltTy);
293 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
297 // Start out not knowing anything.
298 KnownZero.clearAllBits(); KnownOne.clearAllBits();
300 if (Depth == MaxDepth)
301 return; // Limit search depth.
303 Operator *I = dyn_cast<Operator>(V);
306 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
307 switch (I->getOpcode()) {
309 case Instruction::Load:
310 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
311 computeKnownBitsFromRangeMetadata(*MD, KnownZero);
313 case Instruction::And: {
314 // If either the LHS or the RHS are Zero, the result is zero.
315 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
316 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
318 // Output known-1 bits are only known if set in both the LHS & RHS.
319 KnownOne &= KnownOne2;
320 // Output known-0 are known to be clear if zero in either the LHS | RHS.
321 KnownZero |= KnownZero2;
324 case Instruction::Or: {
325 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
326 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
328 // Output known-0 bits are only known if clear in both the LHS & RHS.
329 KnownZero &= KnownZero2;
330 // Output known-1 are known to be set if set in either the LHS | RHS.
331 KnownOne |= KnownOne2;
334 case Instruction::Xor: {
335 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
336 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
338 // Output known-0 bits are known if clear or set in both the LHS & RHS.
339 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
340 // Output known-1 are known to be set if set in only one of the LHS, RHS.
341 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
342 KnownZero = KnownZeroOut;
345 case Instruction::Mul: {
346 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
347 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW,
348 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, Depth);
351 case Instruction::UDiv: {
352 // For the purposes of computing leading zeros we can conservatively
353 // treat a udiv as a logical right shift by the power of 2 known to
354 // be less than the denominator.
355 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
356 unsigned LeadZ = KnownZero2.countLeadingOnes();
358 KnownOne2.clearAllBits();
359 KnownZero2.clearAllBits();
360 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
361 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
362 if (RHSUnknownLeadingOnes != BitWidth)
363 LeadZ = std::min(BitWidth,
364 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
366 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
369 case Instruction::Select:
370 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1);
371 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD,
374 // Only known if known in both the LHS and RHS.
375 KnownOne &= KnownOne2;
376 KnownZero &= KnownZero2;
378 case Instruction::FPTrunc:
379 case Instruction::FPExt:
380 case Instruction::FPToUI:
381 case Instruction::FPToSI:
382 case Instruction::SIToFP:
383 case Instruction::UIToFP:
384 break; // Can't work with floating point.
385 case Instruction::PtrToInt:
386 case Instruction::IntToPtr:
387 case Instruction::AddrSpaceCast: // Pointers could be different sizes.
388 // We can't handle these if we don't know the pointer size.
390 // FALL THROUGH and handle them the same as zext/trunc.
391 case Instruction::ZExt:
392 case Instruction::Trunc: {
393 Type *SrcTy = I->getOperand(0)->getType();
395 unsigned SrcBitWidth;
396 // Note that we handle pointer operands here because of inttoptr/ptrtoint
397 // which fall through here.
399 SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType());
401 SrcBitWidth = SrcTy->getScalarSizeInBits();
402 if (!SrcBitWidth) break;
405 assert(SrcBitWidth && "SrcBitWidth can't be zero");
406 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
407 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
408 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
409 KnownZero = KnownZero.zextOrTrunc(BitWidth);
410 KnownOne = KnownOne.zextOrTrunc(BitWidth);
411 // Any top bits are known to be zero.
412 if (BitWidth > SrcBitWidth)
413 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
416 case Instruction::BitCast: {
417 Type *SrcTy = I->getOperand(0)->getType();
418 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
419 // TODO: For now, not handling conversions like:
420 // (bitcast i64 %x to <2 x i32>)
421 !I->getType()->isVectorTy()) {
422 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
427 case Instruction::SExt: {
428 // Compute the bits in the result that are not present in the input.
429 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
431 KnownZero = KnownZero.trunc(SrcBitWidth);
432 KnownOne = KnownOne.trunc(SrcBitWidth);
433 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
434 KnownZero = KnownZero.zext(BitWidth);
435 KnownOne = KnownOne.zext(BitWidth);
437 // If the sign bit of the input is known set or clear, then we know the
438 // top bits of the result.
439 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero
440 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
441 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
442 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
445 case Instruction::Shl:
446 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
447 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
448 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
449 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
450 KnownZero <<= ShiftAmt;
451 KnownOne <<= ShiftAmt;
452 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
456 case Instruction::LShr:
457 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
458 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
459 // Compute the new bits that are at the top now.
460 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
462 // Unsigned shift right.
463 computeKnownBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1);
464 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
465 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
466 // high bits known zero.
467 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
471 case Instruction::AShr:
472 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
473 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
474 // Compute the new bits that are at the top now.
475 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
477 // Signed shift right.
478 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
479 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
480 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
482 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
483 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero.
484 KnownZero |= HighBits;
485 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one.
486 KnownOne |= HighBits;
490 case Instruction::Sub: {
491 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
492 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
493 KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
497 case Instruction::Add: {
498 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
499 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
500 KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
504 case Instruction::SRem:
505 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
506 APInt RA = Rem->getValue().abs();
507 if (RA.isPowerOf2()) {
508 APInt LowBits = RA - 1;
509 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
511 // The low bits of the first operand are unchanged by the srem.
512 KnownZero = KnownZero2 & LowBits;
513 KnownOne = KnownOne2 & LowBits;
515 // If the first operand is non-negative or has all low bits zero, then
516 // the upper bits are all zero.
517 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
518 KnownZero |= ~LowBits;
520 // If the first operand is negative and not all low bits are zero, then
521 // the upper bits are all one.
522 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
523 KnownOne |= ~LowBits;
525 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
529 // The sign bit is the LHS's sign bit, except when the result of the
530 // remainder is zero.
531 if (KnownZero.isNonNegative()) {
532 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
533 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD,
535 // If it's known zero, our sign bit is also zero.
536 if (LHSKnownZero.isNegative())
537 KnownZero.setBit(BitWidth - 1);
541 case Instruction::URem: {
542 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
543 APInt RA = Rem->getValue();
544 if (RA.isPowerOf2()) {
545 APInt LowBits = (RA - 1);
546 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD,
548 KnownZero |= ~LowBits;
554 // Since the result is less than or equal to either operand, any leading
555 // zero bits in either operand must also exist in the result.
556 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
557 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
559 unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
560 KnownZero2.countLeadingOnes());
561 KnownOne.clearAllBits();
562 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
566 case Instruction::Alloca: {
567 AllocaInst *AI = cast<AllocaInst>(V);
568 unsigned Align = AI->getAlignment();
569 if (Align == 0 && TD)
570 Align = TD->getABITypeAlignment(AI->getType()->getElementType());
573 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
576 case Instruction::GetElementPtr: {
577 // Analyze all of the subscripts of this getelementptr instruction
578 // to determine if we can prove known low zero bits.
579 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
580 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD,
582 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
584 gep_type_iterator GTI = gep_type_begin(I);
585 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
586 Value *Index = I->getOperand(i);
587 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
588 // Handle struct member offset arithmetic.
594 // Handle case when index is vector zeroinitializer
595 Constant *CIndex = cast<Constant>(Index);
596 if (CIndex->isZeroValue())
599 if (CIndex->getType()->isVectorTy())
600 Index = CIndex->getSplatValue();
602 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
603 const StructLayout *SL = TD->getStructLayout(STy);
604 uint64_t Offset = SL->getElementOffset(Idx);
605 TrailZ = std::min<unsigned>(TrailZ,
606 countTrailingZeros(Offset));
608 // Handle array index arithmetic.
609 Type *IndexedTy = GTI.getIndexedType();
610 if (!IndexedTy->isSized()) {
614 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
615 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
616 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
617 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
618 TrailZ = std::min(TrailZ,
619 unsigned(countTrailingZeros(TypeSize) +
620 LocalKnownZero.countTrailingOnes()));
624 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
627 case Instruction::PHI: {
628 PHINode *P = cast<PHINode>(I);
629 // Handle the case of a simple two-predecessor recurrence PHI.
630 // There's a lot more that could theoretically be done here, but
631 // this is sufficient to catch some interesting cases.
632 if (P->getNumIncomingValues() == 2) {
633 for (unsigned i = 0; i != 2; ++i) {
634 Value *L = P->getIncomingValue(i);
635 Value *R = P->getIncomingValue(!i);
636 Operator *LU = dyn_cast<Operator>(L);
639 unsigned Opcode = LU->getOpcode();
640 // Check for operations that have the property that if
641 // both their operands have low zero bits, the result
642 // will have low zero bits.
643 if (Opcode == Instruction::Add ||
644 Opcode == Instruction::Sub ||
645 Opcode == Instruction::And ||
646 Opcode == Instruction::Or ||
647 Opcode == Instruction::Mul) {
648 Value *LL = LU->getOperand(0);
649 Value *LR = LU->getOperand(1);
650 // Find a recurrence.
657 // Ok, we have a PHI of the form L op= R. Check for low
659 computeKnownBits(R, KnownZero2, KnownOne2, TD, Depth+1);
661 // We need to take the minimum number of known bits
662 APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
663 computeKnownBits(L, KnownZero3, KnownOne3, TD, Depth+1);
665 KnownZero = APInt::getLowBitsSet(BitWidth,
666 std::min(KnownZero2.countTrailingOnes(),
667 KnownZero3.countTrailingOnes()));
673 // Unreachable blocks may have zero-operand PHI nodes.
674 if (P->getNumIncomingValues() == 0)
677 // Otherwise take the unions of the known bit sets of the operands,
678 // taking conservative care to avoid excessive recursion.
679 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
680 // Skip if every incoming value references to ourself.
681 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
684 KnownZero = APInt::getAllOnesValue(BitWidth);
685 KnownOne = APInt::getAllOnesValue(BitWidth);
686 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
687 // Skip direct self references.
688 if (P->getIncomingValue(i) == P) continue;
690 KnownZero2 = APInt(BitWidth, 0);
691 KnownOne2 = APInt(BitWidth, 0);
692 // Recurse, but cap the recursion to one level, because we don't
693 // want to waste time spinning around in loops.
694 computeKnownBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD,
696 KnownZero &= KnownZero2;
697 KnownOne &= KnownOne2;
698 // If all bits have been ruled out, there's no need to check
700 if (!KnownZero && !KnownOne)
706 case Instruction::Call:
707 case Instruction::Invoke:
708 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
709 computeKnownBitsFromRangeMetadata(*MD, KnownZero);
710 // If a range metadata is attached to this IntrinsicInst, intersect the
711 // explicit range specified by the metadata and the implicit range of
713 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
714 switch (II->getIntrinsicID()) {
716 case Intrinsic::ctlz:
717 case Intrinsic::cttz: {
718 unsigned LowBits = Log2_32(BitWidth)+1;
719 // If this call is undefined for 0, the result will be less than 2^n.
720 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
722 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
725 case Intrinsic::ctpop: {
726 unsigned LowBits = Log2_32(BitWidth)+1;
727 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
730 case Intrinsic::x86_sse42_crc32_64_64:
731 KnownZero |= APInt::getHighBitsSet(64, 32);
736 case Instruction::ExtractValue:
737 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
738 ExtractValueInst *EVI = cast<ExtractValueInst>(I);
739 if (EVI->getNumIndices() != 1) break;
740 if (EVI->getIndices()[0] == 0) {
741 switch (II->getIntrinsicID()) {
743 case Intrinsic::uadd_with_overflow:
744 case Intrinsic::sadd_with_overflow:
745 computeKnownBitsAddSub(true, II->getArgOperand(0),
746 II->getArgOperand(1), false, KnownZero,
747 KnownOne, KnownZero2, KnownOne2, TD, Depth);
749 case Intrinsic::usub_with_overflow:
750 case Intrinsic::ssub_with_overflow:
751 computeKnownBitsAddSub(false, II->getArgOperand(0),
752 II->getArgOperand(1), false, KnownZero,
753 KnownOne, KnownZero2, KnownOne2, TD, Depth);
755 case Intrinsic::umul_with_overflow:
756 case Intrinsic::smul_with_overflow:
757 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1),
758 false, KnownZero, KnownOne,
759 KnownZero2, KnownOne2, TD, Depth);
766 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
769 /// ComputeSignBit - Determine whether the sign bit is known to be zero or
770 /// one. Convenience wrapper around computeKnownBits.
771 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
772 const DataLayout *TD, unsigned Depth) {
773 unsigned BitWidth = getBitWidth(V->getType(), TD);
779 APInt ZeroBits(BitWidth, 0);
780 APInt OneBits(BitWidth, 0);
781 computeKnownBits(V, ZeroBits, OneBits, TD, Depth);
782 KnownOne = OneBits[BitWidth - 1];
783 KnownZero = ZeroBits[BitWidth - 1];
786 /// isKnownToBeAPowerOfTwo - Return true if the given value is known to have exactly one
787 /// bit set when defined. For vectors return true if every element is known to
788 /// be a power of two when defined. Supports values with integer or pointer
789 /// types and vectors of integers.
790 bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth) {
791 if (Constant *C = dyn_cast<Constant>(V)) {
792 if (C->isNullValue())
794 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
795 return CI->getValue().isPowerOf2();
796 // TODO: Handle vector constants.
799 // 1 << X is clearly a power of two if the one is not shifted off the end. If
800 // it is shifted off the end then the result is undefined.
801 if (match(V, m_Shl(m_One(), m_Value())))
804 // (signbit) >>l X is clearly a power of two if the one is not shifted off the
805 // bottom. If it is shifted off the bottom then the result is undefined.
806 if (match(V, m_LShr(m_SignBit(), m_Value())))
809 // The remaining tests are all recursive, so bail out if we hit the limit.
810 if (Depth++ == MaxDepth)
813 Value *X = nullptr, *Y = nullptr;
814 // A shift of a power of two is a power of two or zero.
815 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
816 match(V, m_Shr(m_Value(X), m_Value()))))
817 return isKnownToBeAPowerOfTwo(X, /*OrZero*/true, Depth);
819 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
820 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth);
822 if (SelectInst *SI = dyn_cast<SelectInst>(V))
823 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth) &&
824 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth);
826 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
827 // A power of two and'd with anything is a power of two or zero.
828 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/true, Depth) ||
829 isKnownToBeAPowerOfTwo(Y, /*OrZero*/true, Depth))
831 // X & (-X) is always a power of two or zero.
832 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
837 // Adding a power-of-two or zero to the same power-of-two or zero yields
838 // either the original power-of-two, a larger power-of-two or zero.
839 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
840 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
841 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
842 if (match(X, m_And(m_Specific(Y), m_Value())) ||
843 match(X, m_And(m_Value(), m_Specific(Y))))
844 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth))
846 if (match(Y, m_And(m_Specific(X), m_Value())) ||
847 match(Y, m_And(m_Value(), m_Specific(X))))
848 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth))
851 unsigned BitWidth = V->getType()->getScalarSizeInBits();
852 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
853 computeKnownBits(X, LHSZeroBits, LHSOneBits, nullptr, Depth);
855 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
856 computeKnownBits(Y, RHSZeroBits, RHSOneBits, nullptr, Depth);
857 // If i8 V is a power of two or zero:
858 // ZeroBits: 1 1 1 0 1 1 1 1
859 // ~ZeroBits: 0 0 0 1 0 0 0 0
860 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
861 // If OrZero isn't set, we cannot give back a zero result.
862 // Make sure either the LHS or RHS has a bit set.
863 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
868 // An exact divide or right shift can only shift off zero bits, so the result
869 // is a power of two only if the first operand is a power of two and not
870 // copying a sign bit (sdiv int_min, 2).
871 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
872 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
873 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, Depth);
879 /// \brief Test whether a GEP's result is known to be non-null.
881 /// Uses properties inherent in a GEP to try to determine whether it is known
884 /// Currently this routine does not support vector GEPs.
885 static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout *DL,
887 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
890 // FIXME: Support vector-GEPs.
891 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
893 // If the base pointer is non-null, we cannot walk to a null address with an
894 // inbounds GEP in address space zero.
895 if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth))
898 // Past this, if we don't have DataLayout, we can't do much.
902 // Walk the GEP operands and see if any operand introduces a non-zero offset.
903 // If so, then the GEP cannot produce a null pointer, as doing so would
904 // inherently violate the inbounds contract within address space zero.
905 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
907 // Struct types are easy -- they must always be indexed by a constant.
908 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
909 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
910 unsigned ElementIdx = OpC->getZExtValue();
911 const StructLayout *SL = DL->getStructLayout(STy);
912 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
913 if (ElementOffset > 0)
918 // If we have a zero-sized type, the index doesn't matter. Keep looping.
919 if (DL->getTypeAllocSize(GTI.getIndexedType()) == 0)
922 // Fast path the constant operand case both for efficiency and so we don't
923 // increment Depth when just zipping down an all-constant GEP.
924 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
930 // We post-increment Depth here because while isKnownNonZero increments it
931 // as well, when we pop back up that increment won't persist. We don't want
932 // to recurse 10k times just because we have 10k GEP operands. We don't
933 // bail completely out because we want to handle constant GEPs regardless
935 if (Depth++ >= MaxDepth)
938 if (isKnownNonZero(GTI.getOperand(), DL, Depth))
945 /// isKnownNonZero - Return true if the given value is known to be non-zero
946 /// when defined. For vectors return true if every element is known to be
947 /// non-zero when defined. Supports values with integer or pointer type and
948 /// vectors of integers.
949 bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
950 if (Constant *C = dyn_cast<Constant>(V)) {
951 if (C->isNullValue())
953 if (isa<ConstantInt>(C))
954 // Must be non-zero due to null test above.
956 // TODO: Handle vectors
960 // The remaining tests are all recursive, so bail out if we hit the limit.
961 if (Depth++ >= MaxDepth)
964 // Check for pointer simplifications.
965 if (V->getType()->isPointerTy()) {
966 if (isKnownNonNull(V))
968 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
969 if (isGEPKnownNonNull(GEP, TD, Depth))
973 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), TD);
975 // X | Y != 0 if X != 0 or Y != 0.
976 Value *X = nullptr, *Y = nullptr;
977 if (match(V, m_Or(m_Value(X), m_Value(Y))))
978 return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth);
980 // ext X != 0 if X != 0.
981 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
982 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), TD, Depth);
984 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
985 // if the lowest bit is shifted off the end.
986 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
987 // shl nuw can't remove any non-zero bits.
988 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
989 if (BO->hasNoUnsignedWrap())
990 return isKnownNonZero(X, TD, Depth);
992 APInt KnownZero(BitWidth, 0);
993 APInt KnownOne(BitWidth, 0);
994 computeKnownBits(X, KnownZero, KnownOne, TD, Depth);
998 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
999 // defined if the sign bit is shifted off the end.
1000 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1001 // shr exact can only shift out zero bits.
1002 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1004 return isKnownNonZero(X, TD, Depth);
1006 bool XKnownNonNegative, XKnownNegative;
1007 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth);
1011 // div exact can only produce a zero if the dividend is zero.
1012 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1013 return isKnownNonZero(X, TD, Depth);
1016 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1017 bool XKnownNonNegative, XKnownNegative;
1018 bool YKnownNonNegative, YKnownNegative;
1019 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth);
1020 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, TD, Depth);
1022 // If X and Y are both non-negative (as signed values) then their sum is not
1023 // zero unless both X and Y are zero.
1024 if (XKnownNonNegative && YKnownNonNegative)
1025 if (isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth))
1028 // If X and Y are both negative (as signed values) then their sum is not
1029 // zero unless both X and Y equal INT_MIN.
1030 if (BitWidth && XKnownNegative && YKnownNegative) {
1031 APInt KnownZero(BitWidth, 0);
1032 APInt KnownOne(BitWidth, 0);
1033 APInt Mask = APInt::getSignedMaxValue(BitWidth);
1034 // The sign bit of X is set. If some other bit is set then X is not equal
1036 computeKnownBits(X, KnownZero, KnownOne, TD, Depth);
1037 if ((KnownOne & Mask) != 0)
1039 // The sign bit of Y is set. If some other bit is set then Y is not equal
1041 computeKnownBits(Y, KnownZero, KnownOne, TD, Depth);
1042 if ((KnownOne & Mask) != 0)
1046 // The sum of a non-negative number and a power of two is not zero.
1047 if (XKnownNonNegative && isKnownToBeAPowerOfTwo(Y, /*OrZero*/false, Depth))
1049 if (YKnownNonNegative && isKnownToBeAPowerOfTwo(X, /*OrZero*/false, Depth))
1053 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1054 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1055 // If X and Y are non-zero then so is X * Y as long as the multiplication
1056 // does not overflow.
1057 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1058 isKnownNonZero(X, TD, Depth) && isKnownNonZero(Y, TD, Depth))
1061 // (C ? X : Y) != 0 if X != 0 and Y != 0.
1062 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1063 if (isKnownNonZero(SI->getTrueValue(), TD, Depth) &&
1064 isKnownNonZero(SI->getFalseValue(), TD, Depth))
1068 if (!BitWidth) return false;
1069 APInt KnownZero(BitWidth, 0);
1070 APInt KnownOne(BitWidth, 0);
1071 computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
1072 return KnownOne != 0;
1075 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1076 /// this predicate to simplify operations downstream. Mask is known to be zero
1077 /// for bits that V cannot have.
1079 /// This function is defined on values with integer type, values with pointer
1080 /// type (but only if TD is non-null), and vectors of integers. In the case
1081 /// where V is a vector, the mask, known zero, and known one values are the
1082 /// same width as the vector element, and the bit is set only if it is true
1083 /// for all of the elements in the vector.
1084 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
1085 const DataLayout *TD, unsigned Depth) {
1086 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
1087 computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
1088 return (KnownZero & Mask) == Mask;
1093 /// ComputeNumSignBits - Return the number of times the sign bit of the
1094 /// register is replicated into the other bits. We know that at least 1 bit
1095 /// is always equal to the sign bit (itself), but other cases can give us
1096 /// information. For example, immediately after an "ashr X, 2", we know that
1097 /// the top 3 bits are all equal to each other, so we return 3.
1099 /// 'Op' must have a scalar integer type.
1101 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
1103 assert((TD || V->getType()->isIntOrIntVectorTy()) &&
1104 "ComputeNumSignBits requires a DataLayout object to operate "
1105 "on non-integer values!");
1106 Type *Ty = V->getType();
1107 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
1108 Ty->getScalarSizeInBits();
1110 unsigned FirstAnswer = 1;
1112 // Note that ConstantInt is handled by the general computeKnownBits case
1116 return 1; // Limit search depth.
1118 Operator *U = dyn_cast<Operator>(V);
1119 switch (Operator::getOpcode(V)) {
1121 case Instruction::SExt:
1122 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
1123 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
1125 case Instruction::AShr: {
1126 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1127 // ashr X, C -> adds C sign bits. Vectors too.
1129 if (match(U->getOperand(1), m_APInt(ShAmt))) {
1130 Tmp += ShAmt->getZExtValue();
1131 if (Tmp > TyBits) Tmp = TyBits;
1135 case Instruction::Shl: {
1137 if (match(U->getOperand(1), m_APInt(ShAmt))) {
1138 // shl destroys sign bits.
1139 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1140 Tmp2 = ShAmt->getZExtValue();
1141 if (Tmp2 >= TyBits || // Bad shift.
1142 Tmp2 >= Tmp) break; // Shifted all sign bits out.
1147 case Instruction::And:
1148 case Instruction::Or:
1149 case Instruction::Xor: // NOT is handled here.
1150 // Logical binary ops preserve the number of sign bits at the worst.
1151 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1153 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
1154 FirstAnswer = std::min(Tmp, Tmp2);
1155 // We computed what we know about the sign bits as our first
1156 // answer. Now proceed to the generic code that uses
1157 // computeKnownBits, and pick whichever answer is better.
1161 case Instruction::Select:
1162 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
1163 if (Tmp == 1) return 1; // Early out.
1164 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1);
1165 return std::min(Tmp, Tmp2);
1167 case Instruction::Add:
1168 // Add can have at most one carry bit. Thus we know that the output
1169 // is, at worst, one more bit than the inputs.
1170 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1171 if (Tmp == 1) return 1; // Early out.
1173 // Special case decrementing a value (ADD X, -1):
1174 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1)))
1175 if (CRHS->isAllOnesValue()) {
1176 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
1177 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
1179 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1181 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
1184 // If we are subtracting one from a positive number, there is no carry
1185 // out of the result.
1186 if (KnownZero.isNegative())
1190 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
1191 if (Tmp2 == 1) return 1;
1192 return std::min(Tmp, Tmp2)-1;
1194 case Instruction::Sub:
1195 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
1196 if (Tmp2 == 1) return 1;
1199 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
1200 if (CLHS->isNullValue()) {
1201 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
1202 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
1203 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1205 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
1208 // If the input is known to be positive (the sign bit is known clear),
1209 // the output of the NEG has the same number of sign bits as the input.
1210 if (KnownZero.isNegative())
1213 // Otherwise, we treat this like a SUB.
1216 // Sub can have at most one carry bit. Thus we know that the output
1217 // is, at worst, one more bit than the inputs.
1218 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1219 if (Tmp == 1) return 1; // Early out.
1220 return std::min(Tmp, Tmp2)-1;
1222 case Instruction::PHI: {
1223 PHINode *PN = cast<PHINode>(U);
1224 // Don't analyze large in-degree PHIs.
1225 if (PN->getNumIncomingValues() > 4) break;
1227 // Take the minimum of all incoming values. This can't infinitely loop
1228 // because of our depth threshold.
1229 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1);
1230 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
1231 if (Tmp == 1) return Tmp;
1233 ComputeNumSignBits(PN->getIncomingValue(i), TD, Depth+1));
1238 case Instruction::Trunc:
1239 // FIXME: it's tricky to do anything useful for this, but it is an important
1240 // case for targets like X86.
1244 // Finally, if we can prove that the top bits of the result are 0's or 1's,
1245 // use this information.
1246 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
1248 computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
1250 if (KnownZero.isNegative()) { // sign bit is 0
1252 } else if (KnownOne.isNegative()) { // sign bit is 1;
1259 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
1260 // the number of identical bits in the top of the input value.
1262 Mask <<= Mask.getBitWidth()-TyBits;
1263 // Return # leading zeros. We use 'min' here in case Val was zero before
1264 // shifting. We don't want to return '64' as for an i32 "0".
1265 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
1268 /// ComputeMultiple - This function computes the integer multiple of Base that
1269 /// equals V. If successful, it returns true and returns the multiple in
1270 /// Multiple. If unsuccessful, it returns false. It looks
1271 /// through SExt instructions only if LookThroughSExt is true.
1272 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
1273 bool LookThroughSExt, unsigned Depth) {
1274 const unsigned MaxDepth = 6;
1276 assert(V && "No Value?");
1277 assert(Depth <= MaxDepth && "Limit Search Depth");
1278 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
1280 Type *T = V->getType();
1282 ConstantInt *CI = dyn_cast<ConstantInt>(V);
1292 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
1293 Constant *BaseVal = ConstantInt::get(T, Base);
1294 if (CO && CO == BaseVal) {
1296 Multiple = ConstantInt::get(T, 1);
1300 if (CI && CI->getZExtValue() % Base == 0) {
1301 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
1305 if (Depth == MaxDepth) return false; // Limit search depth.
1307 Operator *I = dyn_cast<Operator>(V);
1308 if (!I) return false;
1310 switch (I->getOpcode()) {
1312 case Instruction::SExt:
1313 if (!LookThroughSExt) return false;
1314 // otherwise fall through to ZExt
1315 case Instruction::ZExt:
1316 return ComputeMultiple(I->getOperand(0), Base, Multiple,
1317 LookThroughSExt, Depth+1);
1318 case Instruction::Shl:
1319 case Instruction::Mul: {
1320 Value *Op0 = I->getOperand(0);
1321 Value *Op1 = I->getOperand(1);
1323 if (I->getOpcode() == Instruction::Shl) {
1324 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
1325 if (!Op1CI) return false;
1326 // Turn Op0 << Op1 into Op0 * 2^Op1
1327 APInt Op1Int = Op1CI->getValue();
1328 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
1329 APInt API(Op1Int.getBitWidth(), 0);
1330 API.setBit(BitToSet);
1331 Op1 = ConstantInt::get(V->getContext(), API);
1334 Value *Mul0 = nullptr;
1335 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
1336 if (Constant *Op1C = dyn_cast<Constant>(Op1))
1337 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
1338 if (Op1C->getType()->getPrimitiveSizeInBits() <
1339 MulC->getType()->getPrimitiveSizeInBits())
1340 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
1341 if (Op1C->getType()->getPrimitiveSizeInBits() >
1342 MulC->getType()->getPrimitiveSizeInBits())
1343 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
1345 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
1346 Multiple = ConstantExpr::getMul(MulC, Op1C);
1350 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
1351 if (Mul0CI->getValue() == 1) {
1352 // V == Base * Op1, so return Op1
1358 Value *Mul1 = nullptr;
1359 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
1360 if (Constant *Op0C = dyn_cast<Constant>(Op0))
1361 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
1362 if (Op0C->getType()->getPrimitiveSizeInBits() <
1363 MulC->getType()->getPrimitiveSizeInBits())
1364 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
1365 if (Op0C->getType()->getPrimitiveSizeInBits() >
1366 MulC->getType()->getPrimitiveSizeInBits())
1367 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
1369 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
1370 Multiple = ConstantExpr::getMul(MulC, Op0C);
1374 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
1375 if (Mul1CI->getValue() == 1) {
1376 // V == Base * Op0, so return Op0
1384 // We could not determine if V is a multiple of Base.
1388 /// CannotBeNegativeZero - Return true if we can prove that the specified FP
1389 /// value is never equal to -0.0.
1391 /// NOTE: this function will need to be revisited when we support non-default
1394 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
1395 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
1396 return !CFP->getValueAPF().isNegZero();
1399 return 1; // Limit search depth.
1401 const Operator *I = dyn_cast<Operator>(V);
1402 if (!I) return false;
1404 // Check if the nsz fast-math flag is set
1405 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
1406 if (FPO->hasNoSignedZeros())
1409 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
1410 if (I->getOpcode() == Instruction::FAdd)
1411 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
1412 if (CFP->isNullValue())
1415 // sitofp and uitofp turn into +0.0 for zero.
1416 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
1419 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1420 // sqrt(-0.0) = -0.0, no other negative results are possible.
1421 if (II->getIntrinsicID() == Intrinsic::sqrt)
1422 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
1424 if (const CallInst *CI = dyn_cast<CallInst>(I))
1425 if (const Function *F = CI->getCalledFunction()) {
1426 if (F->isDeclaration()) {
1428 if (F->getName() == "abs") return true;
1429 // fabs[lf](x) != -0.0
1430 if (F->getName() == "fabs") return true;
1431 if (F->getName() == "fabsf") return true;
1432 if (F->getName() == "fabsl") return true;
1433 if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
1434 F->getName() == "sqrtl")
1435 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
1442 /// isBytewiseValue - If the specified value can be set by repeating the same
1443 /// byte in memory, return the i8 value that it is represented with. This is
1444 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
1445 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
1446 /// byte store (e.g. i16 0x1234), return null.
1447 Value *llvm::isBytewiseValue(Value *V) {
1448 // All byte-wide stores are splatable, even of arbitrary variables.
1449 if (V->getType()->isIntegerTy(8)) return V;
1451 // Handle 'null' ConstantArrayZero etc.
1452 if (Constant *C = dyn_cast<Constant>(V))
1453 if (C->isNullValue())
1454 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
1456 // Constant float and double values can be handled as integer values if the
1457 // corresponding integer value is "byteable". An important case is 0.0.
1458 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
1459 if (CFP->getType()->isFloatTy())
1460 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
1461 if (CFP->getType()->isDoubleTy())
1462 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
1463 // Don't handle long double formats, which have strange constraints.
1466 // We can handle constant integers that are power of two in size and a
1467 // multiple of 8 bits.
1468 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1469 unsigned Width = CI->getBitWidth();
1470 if (isPowerOf2_32(Width) && Width > 8) {
1471 // We can handle this value if the recursive binary decomposition is the
1472 // same at all levels.
1473 APInt Val = CI->getValue();
1475 while (Val.getBitWidth() != 8) {
1476 unsigned NextWidth = Val.getBitWidth()/2;
1477 Val2 = Val.lshr(NextWidth);
1478 Val2 = Val2.trunc(Val.getBitWidth()/2);
1479 Val = Val.trunc(Val.getBitWidth()/2);
1481 // If the top/bottom halves aren't the same, reject it.
1485 return ConstantInt::get(V->getContext(), Val);
1489 // A ConstantDataArray/Vector is splatable if all its members are equal and
1491 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
1492 Value *Elt = CA->getElementAsConstant(0);
1493 Value *Val = isBytewiseValue(Elt);
1497 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
1498 if (CA->getElementAsConstant(I) != Elt)
1504 // Conceptually, we could handle things like:
1505 // %a = zext i8 %X to i16
1506 // %b = shl i16 %a, 8
1507 // %c = or i16 %a, %b
1508 // but until there is an example that actually needs this, it doesn't seem
1509 // worth worrying about.
1514 // This is the recursive version of BuildSubAggregate. It takes a few different
1515 // arguments. Idxs is the index within the nested struct From that we are
1516 // looking at now (which is of type IndexedType). IdxSkip is the number of
1517 // indices from Idxs that should be left out when inserting into the resulting
1518 // struct. To is the result struct built so far, new insertvalue instructions
1520 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
1521 SmallVectorImpl<unsigned> &Idxs,
1523 Instruction *InsertBefore) {
1524 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
1526 // Save the original To argument so we can modify it
1528 // General case, the type indexed by Idxs is a struct
1529 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1530 // Process each struct element recursively
1533 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
1537 // Couldn't find any inserted value for this index? Cleanup
1538 while (PrevTo != OrigTo) {
1539 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
1540 PrevTo = Del->getAggregateOperand();
1541 Del->eraseFromParent();
1543 // Stop processing elements
1547 // If we successfully found a value for each of our subaggregates
1551 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
1552 // the struct's elements had a value that was inserted directly. In the latter
1553 // case, perhaps we can't determine each of the subelements individually, but
1554 // we might be able to find the complete struct somewhere.
1556 // Find the value that is at that particular spot
1557 Value *V = FindInsertedValue(From, Idxs);
1562 // Insert the value in the new (sub) aggregrate
1563 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
1564 "tmp", InsertBefore);
1567 // This helper takes a nested struct and extracts a part of it (which is again a
1568 // struct) into a new value. For example, given the struct:
1569 // { a, { b, { c, d }, e } }
1570 // and the indices "1, 1" this returns
1573 // It does this by inserting an insertvalue for each element in the resulting
1574 // struct, as opposed to just inserting a single struct. This will only work if
1575 // each of the elements of the substruct are known (ie, inserted into From by an
1576 // insertvalue instruction somewhere).
1578 // All inserted insertvalue instructions are inserted before InsertBefore
1579 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
1580 Instruction *InsertBefore) {
1581 assert(InsertBefore && "Must have someplace to insert!");
1582 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
1584 Value *To = UndefValue::get(IndexedType);
1585 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
1586 unsigned IdxSkip = Idxs.size();
1588 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
1591 /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
1592 /// the scalar value indexed is already around as a register, for example if it
1593 /// were inserted directly into the aggregrate.
1595 /// If InsertBefore is not null, this function will duplicate (modified)
1596 /// insertvalues when a part of a nested struct is extracted.
1597 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
1598 Instruction *InsertBefore) {
1599 // Nothing to index? Just return V then (this is useful at the end of our
1601 if (idx_range.empty())
1603 // We have indices, so V should have an indexable type.
1604 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
1605 "Not looking at a struct or array?");
1606 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
1607 "Invalid indices for type?");
1609 if (Constant *C = dyn_cast<Constant>(V)) {
1610 C = C->getAggregateElement(idx_range[0]);
1611 if (!C) return nullptr;
1612 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
1615 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
1616 // Loop the indices for the insertvalue instruction in parallel with the
1617 // requested indices
1618 const unsigned *req_idx = idx_range.begin();
1619 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
1620 i != e; ++i, ++req_idx) {
1621 if (req_idx == idx_range.end()) {
1622 // We can't handle this without inserting insertvalues
1626 // The requested index identifies a part of a nested aggregate. Handle
1627 // this specially. For example,
1628 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
1629 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
1630 // %C = extractvalue {i32, { i32, i32 } } %B, 1
1631 // This can be changed into
1632 // %A = insertvalue {i32, i32 } undef, i32 10, 0
1633 // %C = insertvalue {i32, i32 } %A, i32 11, 1
1634 // which allows the unused 0,0 element from the nested struct to be
1636 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
1640 // This insert value inserts something else than what we are looking for.
1641 // See if the (aggregrate) value inserted into has the value we are
1642 // looking for, then.
1644 return FindInsertedValue(I->getAggregateOperand(), idx_range,
1647 // If we end up here, the indices of the insertvalue match with those
1648 // requested (though possibly only partially). Now we recursively look at
1649 // the inserted value, passing any remaining indices.
1650 return FindInsertedValue(I->getInsertedValueOperand(),
1651 makeArrayRef(req_idx, idx_range.end()),
1655 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
1656 // If we're extracting a value from an aggregrate that was extracted from
1657 // something else, we can extract from that something else directly instead.
1658 // However, we will need to chain I's indices with the requested indices.
1660 // Calculate the number of indices required
1661 unsigned size = I->getNumIndices() + idx_range.size();
1662 // Allocate some space to put the new indices in
1663 SmallVector<unsigned, 5> Idxs;
1665 // Add indices from the extract value instruction
1666 Idxs.append(I->idx_begin(), I->idx_end());
1668 // Add requested indices
1669 Idxs.append(idx_range.begin(), idx_range.end());
1671 assert(Idxs.size() == size
1672 && "Number of indices added not correct?");
1674 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
1676 // Otherwise, we don't know (such as, extracting from a function return value
1677 // or load instruction)
1681 /// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
1682 /// it can be expressed as a base pointer plus a constant offset. Return the
1683 /// base and offset to the caller.
1684 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
1685 const DataLayout *DL) {
1686 // Without DataLayout, conservatively assume 64-bit offsets, which is
1687 // the widest we support.
1688 unsigned BitWidth = DL ? DL->getPointerTypeSizeInBits(Ptr->getType()) : 64;
1689 APInt ByteOffset(BitWidth, 0);
1691 if (Ptr->getType()->isVectorTy())
1694 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1696 APInt GEPOffset(BitWidth, 0);
1697 if (!GEP->accumulateConstantOffset(*DL, GEPOffset))
1700 ByteOffset += GEPOffset;
1703 Ptr = GEP->getPointerOperand();
1704 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
1705 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
1706 Ptr = cast<Operator>(Ptr)->getOperand(0);
1707 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1708 if (GA->mayBeOverridden())
1710 Ptr = GA->getAliasee();
1715 Offset = ByteOffset.getSExtValue();
1720 /// getConstantStringInfo - This function computes the length of a
1721 /// null-terminated C string pointed to by V. If successful, it returns true
1722 /// and returns the string in Str. If unsuccessful, it returns false.
1723 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
1724 uint64_t Offset, bool TrimAtNul) {
1727 // Look through bitcast instructions and geps.
1728 V = V->stripPointerCasts();
1730 // If the value is a GEP instructionor constant expression, treat it as an
1732 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1733 // Make sure the GEP has exactly three arguments.
1734 if (GEP->getNumOperands() != 3)
1737 // Make sure the index-ee is a pointer to array of i8.
1738 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
1739 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
1740 if (!AT || !AT->getElementType()->isIntegerTy(8))
1743 // Check to make sure that the first operand of the GEP is an integer and
1744 // has value 0 so that we are sure we're indexing into the initializer.
1745 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
1746 if (!FirstIdx || !FirstIdx->isZero())
1749 // If the second index isn't a ConstantInt, then this is a variable index
1750 // into the array. If this occurs, we can't say anything meaningful about
1752 uint64_t StartIdx = 0;
1753 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
1754 StartIdx = CI->getZExtValue();
1757 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset);
1760 // The GEP instruction, constant or instruction, must reference a global
1761 // variable that is a constant and is initialized. The referenced constant
1762 // initializer is the array that we'll use for optimization.
1763 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
1764 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
1767 // Handle the all-zeros case
1768 if (GV->getInitializer()->isNullValue()) {
1769 // This is a degenerate case. The initializer is constant zero so the
1770 // length of the string must be zero.
1775 // Must be a Constant Array
1776 const ConstantDataArray *Array =
1777 dyn_cast<ConstantDataArray>(GV->getInitializer());
1778 if (!Array || !Array->isString())
1781 // Get the number of elements in the array
1782 uint64_t NumElts = Array->getType()->getArrayNumElements();
1784 // Start out with the entire array in the StringRef.
1785 Str = Array->getAsString();
1787 if (Offset > NumElts)
1790 // Skip over 'offset' bytes.
1791 Str = Str.substr(Offset);
1794 // Trim off the \0 and anything after it. If the array is not nul
1795 // terminated, we just return the whole end of string. The client may know
1796 // some other way that the string is length-bound.
1797 Str = Str.substr(0, Str.find('\0'));
1802 // These next two are very similar to the above, but also look through PHI
1804 // TODO: See if we can integrate these two together.
1806 /// GetStringLengthH - If we can compute the length of the string pointed to by
1807 /// the specified pointer, return 'len+1'. If we can't, return 0.
1808 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
1809 // Look through noop bitcast instructions.
1810 V = V->stripPointerCasts();
1812 // If this is a PHI node, there are two cases: either we have already seen it
1814 if (PHINode *PN = dyn_cast<PHINode>(V)) {
1815 if (!PHIs.insert(PN))
1816 return ~0ULL; // already in the set.
1818 // If it was new, see if all the input strings are the same length.
1819 uint64_t LenSoFar = ~0ULL;
1820 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1821 uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs);
1822 if (Len == 0) return 0; // Unknown length -> unknown.
1824 if (Len == ~0ULL) continue;
1826 if (Len != LenSoFar && LenSoFar != ~0ULL)
1827 return 0; // Disagree -> unknown.
1831 // Success, all agree.
1835 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
1836 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1837 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
1838 if (Len1 == 0) return 0;
1839 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
1840 if (Len2 == 0) return 0;
1841 if (Len1 == ~0ULL) return Len2;
1842 if (Len2 == ~0ULL) return Len1;
1843 if (Len1 != Len2) return 0;
1847 // Otherwise, see if we can read the string.
1849 if (!getConstantStringInfo(V, StrData))
1852 return StrData.size()+1;
1855 /// GetStringLength - If we can compute the length of the string pointed to by
1856 /// the specified pointer, return 'len+1'. If we can't, return 0.
1857 uint64_t llvm::GetStringLength(Value *V) {
1858 if (!V->getType()->isPointerTy()) return 0;
1860 SmallPtrSet<PHINode*, 32> PHIs;
1861 uint64_t Len = GetStringLengthH(V, PHIs);
1862 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
1863 // an empty string as a length.
1864 return Len == ~0ULL ? 1 : Len;
1868 llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
1869 if (!V->getType()->isPointerTy())
1871 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
1872 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1873 V = GEP->getPointerOperand();
1874 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
1875 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
1876 V = cast<Operator>(V)->getOperand(0);
1877 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1878 if (GA->mayBeOverridden())
1880 V = GA->getAliasee();
1882 // See if InstructionSimplify knows any relevant tricks.
1883 if (Instruction *I = dyn_cast<Instruction>(V))
1884 // TODO: Acquire a DominatorTree and use it.
1885 if (Value *Simplified = SimplifyInstruction(I, TD, nullptr)) {
1892 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1898 llvm::GetUnderlyingObjects(Value *V,
1899 SmallVectorImpl<Value *> &Objects,
1900 const DataLayout *TD,
1901 unsigned MaxLookup) {
1902 SmallPtrSet<Value *, 4> Visited;
1903 SmallVector<Value *, 4> Worklist;
1904 Worklist.push_back(V);
1906 Value *P = Worklist.pop_back_val();
1907 P = GetUnderlyingObject(P, TD, MaxLookup);
1909 if (!Visited.insert(P))
1912 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
1913 Worklist.push_back(SI->getTrueValue());
1914 Worklist.push_back(SI->getFalseValue());
1918 if (PHINode *PN = dyn_cast<PHINode>(P)) {
1919 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1920 Worklist.push_back(PN->getIncomingValue(i));
1924 Objects.push_back(P);
1925 } while (!Worklist.empty());
1928 /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
1929 /// are lifetime markers.
1931 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
1932 for (const User *U : V->users()) {
1933 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1934 if (!II) return false;
1936 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
1937 II->getIntrinsicID() != Intrinsic::lifetime_end)
1943 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
1944 const DataLayout *TD) {
1945 const Operator *Inst = dyn_cast<Operator>(V);
1949 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
1950 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
1954 switch (Inst->getOpcode()) {
1957 case Instruction::UDiv:
1958 case Instruction::URem:
1959 // x / y is undefined if y == 0, but calculations like x / 3 are safe.
1960 return isKnownNonZero(Inst->getOperand(1), TD);
1961 case Instruction::SDiv:
1962 case Instruction::SRem: {
1963 Value *Op = Inst->getOperand(1);
1964 // x / y is undefined if y == 0
1965 if (!isKnownNonZero(Op, TD))
1967 // x / y might be undefined if y == -1
1968 unsigned BitWidth = getBitWidth(Op->getType(), TD);
1971 APInt KnownZero(BitWidth, 0);
1972 APInt KnownOne(BitWidth, 0);
1973 computeKnownBits(Op, KnownZero, KnownOne, TD);
1976 case Instruction::Load: {
1977 const LoadInst *LI = cast<LoadInst>(Inst);
1978 if (!LI->isUnordered() ||
1979 // Speculative load may create a race that did not exist in the source.
1980 LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
1982 return LI->getPointerOperand()->isDereferenceablePointer(TD);
1984 case Instruction::Call: {
1985 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
1986 switch (II->getIntrinsicID()) {
1987 // These synthetic intrinsics have no side-effects and just mark
1988 // information about their operands.
1989 // FIXME: There are other no-op synthetic instructions that potentially
1990 // should be considered at least *safe* to speculate...
1991 case Intrinsic::dbg_declare:
1992 case Intrinsic::dbg_value:
1995 case Intrinsic::bswap:
1996 case Intrinsic::ctlz:
1997 case Intrinsic::ctpop:
1998 case Intrinsic::cttz:
1999 case Intrinsic::objectsize:
2000 case Intrinsic::sadd_with_overflow:
2001 case Intrinsic::smul_with_overflow:
2002 case Intrinsic::ssub_with_overflow:
2003 case Intrinsic::uadd_with_overflow:
2004 case Intrinsic::umul_with_overflow:
2005 case Intrinsic::usub_with_overflow:
2007 // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
2008 // errno like libm sqrt would.
2009 case Intrinsic::sqrt:
2010 case Intrinsic::fma:
2011 case Intrinsic::fmuladd:
2012 case Intrinsic::fabs:
2014 // TODO: some fp intrinsics are marked as having the same error handling
2015 // as libm. They're safe to speculate when they won't error.
2016 // TODO: are convert_{from,to}_fp16 safe?
2017 // TODO: can we list target-specific intrinsics here?
2021 return false; // The called function could have undefined behavior or
2022 // side-effects, even if marked readnone nounwind.
2024 case Instruction::VAArg:
2025 case Instruction::Alloca:
2026 case Instruction::Invoke:
2027 case Instruction::PHI:
2028 case Instruction::Store:
2029 case Instruction::Ret:
2030 case Instruction::Br:
2031 case Instruction::IndirectBr:
2032 case Instruction::Switch:
2033 case Instruction::Unreachable:
2034 case Instruction::Fence:
2035 case Instruction::LandingPad:
2036 case Instruction::AtomicRMW:
2037 case Instruction::AtomicCmpXchg:
2038 case Instruction::Resume:
2039 return false; // Misc instructions which have effects
2043 /// isKnownNonNull - Return true if we know that the specified value is never
2045 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
2046 // Alloca never returns null, malloc might.
2047 if (isa<AllocaInst>(V)) return true;
2049 // A byval, inalloca, or nonnull argument is never null.
2050 if (const Argument *A = dyn_cast<Argument>(V))
2051 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
2053 // Global values are not null unless extern weak.
2054 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2055 return !GV->hasExternalWeakLinkage();
2057 if (ImmutableCallSite CS = V)
2058 if (CS.isReturnNonNull())
2061 // operator new never returns null.
2062 if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))