1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/IR/CallSite.h"
20 #include "llvm/IR/ConstantRange.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/GlobalAlias.h"
25 #include "llvm/IR/GlobalVariable.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/LLVMContext.h"
29 #include "llvm/IR/Metadata.h"
30 #include "llvm/IR/Operator.h"
31 #include "llvm/IR/PatternMatch.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/MathExtras.h"
36 using namespace llvm::PatternMatch;
38 const unsigned MaxDepth = 6;
40 /// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
41 /// unknown returns 0). For vector types, returns the element type's bitwidth.
42 static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
43 if (unsigned BitWidth = Ty->getScalarSizeInBits())
46 return TD ? TD->getPointerTypeSizeInBits(Ty) : 0;
49 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
50 APInt &KnownZero, APInt &KnownOne,
51 APInt &KnownZero2, APInt &KnownOne2,
52 const DataLayout *TD, unsigned Depth) {
54 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
55 // We know that the top bits of C-X are clear if X contains less bits
56 // than C (i.e. no wrap-around can happen). For example, 20-X is
57 // positive if we can prove that X is >= 0 and < 16.
58 if (!CLHS->getValue().isNegative()) {
59 unsigned BitWidth = KnownZero.getBitWidth();
60 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
61 // NLZ can't be BitWidth with no sign bit
62 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
63 llvm::computeKnownBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
65 // If all of the MaskV bits are known to be zero, then we know the
66 // output top bits are zero, because we now know that the output is
68 if ((KnownZero2 & MaskV) == MaskV) {
69 unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
70 // Top bits known zero.
71 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
77 unsigned BitWidth = KnownZero.getBitWidth();
79 // If one of the operands has trailing zeros, then the bits that the
80 // other operand has in those bit positions will be preserved in the
81 // result. For an add, this works with either operand. For a subtract,
82 // this only works if the known zeros are in the right operand.
83 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
84 llvm::computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, TD, Depth+1);
85 unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes();
87 llvm::computeKnownBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
88 unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes();
90 // Determine which operand has more trailing zeros, and use that
91 // many bits from the other operand.
92 if (LHSKnownZeroOut > RHSKnownZeroOut) {
94 APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut);
95 KnownZero |= KnownZero2 & Mask;
96 KnownOne |= KnownOne2 & Mask;
98 // If the known zeros are in the left operand for a subtract,
99 // fall back to the minimum known zeros in both operands.
100 KnownZero |= APInt::getLowBitsSet(BitWidth,
101 std::min(LHSKnownZeroOut,
104 } else if (RHSKnownZeroOut >= LHSKnownZeroOut) {
105 APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut);
106 KnownZero |= LHSKnownZero & Mask;
107 KnownOne |= LHSKnownOne & Mask;
110 // Are we still trying to solve for the sign bit?
111 if (!KnownZero.isNegative() && !KnownOne.isNegative()) {
114 // Adding two positive numbers can't wrap into negative
115 if (LHSKnownZero.isNegative() && KnownZero2.isNegative())
116 KnownZero |= APInt::getSignBit(BitWidth);
117 // and adding two negative numbers can't wrap into positive.
118 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative())
119 KnownOne |= APInt::getSignBit(BitWidth);
121 // Subtracting a negative number from a positive one can't wrap
122 if (LHSKnownZero.isNegative() && KnownOne2.isNegative())
123 KnownZero |= APInt::getSignBit(BitWidth);
124 // neither can subtracting a positive number from a negative one.
125 else if (LHSKnownOne.isNegative() && KnownZero2.isNegative())
126 KnownOne |= APInt::getSignBit(BitWidth);
132 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW,
133 APInt &KnownZero, APInt &KnownOne,
134 APInt &KnownZero2, APInt &KnownOne2,
135 const DataLayout *TD, unsigned Depth) {
136 unsigned BitWidth = KnownZero.getBitWidth();
137 computeKnownBits(Op1, KnownZero, KnownOne, TD, Depth+1);
138 computeKnownBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
140 bool isKnownNegative = false;
141 bool isKnownNonNegative = false;
142 // If the multiplication is known not to overflow, compute the sign bit.
145 // The product of a number with itself is non-negative.
146 isKnownNonNegative = true;
148 bool isKnownNonNegativeOp1 = KnownZero.isNegative();
149 bool isKnownNonNegativeOp0 = KnownZero2.isNegative();
150 bool isKnownNegativeOp1 = KnownOne.isNegative();
151 bool isKnownNegativeOp0 = KnownOne2.isNegative();
152 // The product of two numbers with the same sign is non-negative.
153 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
154 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
155 // The product of a negative number and a non-negative number is either
157 if (!isKnownNonNegative)
158 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
159 isKnownNonZero(Op0, TD, Depth)) ||
160 (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
161 isKnownNonZero(Op1, TD, Depth));
165 // If low bits are zero in either operand, output low known-0 bits.
166 // Also compute a conserative estimate for high known-0 bits.
167 // More trickiness is possible, but this is sufficient for the
168 // interesting case of alignment computation.
169 KnownOne.clearAllBits();
170 unsigned TrailZ = KnownZero.countTrailingOnes() +
171 KnownZero2.countTrailingOnes();
172 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
173 KnownZero2.countLeadingOnes(),
174 BitWidth) - BitWidth;
176 TrailZ = std::min(TrailZ, BitWidth);
177 LeadZ = std::min(LeadZ, BitWidth);
178 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
179 APInt::getHighBitsSet(BitWidth, LeadZ);
181 // Only make use of no-wrap flags if we failed to compute the sign bit
182 // directly. This matters if the multiplication always overflows, in
183 // which case we prefer to follow the result of the direct computation,
184 // though as the program is invoking undefined behaviour we can choose
185 // whatever we like here.
186 if (isKnownNonNegative && !KnownOne.isNegative())
187 KnownZero.setBit(BitWidth - 1);
188 else if (isKnownNegative && !KnownZero.isNegative())
189 KnownOne.setBit(BitWidth - 1);
192 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
194 unsigned BitWidth = KnownZero.getBitWidth();
195 unsigned NumRanges = Ranges.getNumOperands() / 2;
196 assert(NumRanges >= 1);
198 // Use the high end of the ranges to find leading zeros.
199 unsigned MinLeadingZeros = BitWidth;
200 for (unsigned i = 0; i < NumRanges; ++i) {
201 ConstantInt *Lower = cast<ConstantInt>(Ranges.getOperand(2*i + 0));
202 ConstantInt *Upper = cast<ConstantInt>(Ranges.getOperand(2*i + 1));
203 ConstantRange Range(Lower->getValue(), Upper->getValue());
204 if (Range.isWrappedSet())
205 MinLeadingZeros = 0; // -1 has no zeros
206 unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros();
207 MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros);
210 KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros);
213 /// Determine which bits of V are known to be either zero or one and return
214 /// them in the KnownZero/KnownOne bit sets.
216 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
217 /// we cannot optimize based on the assumption that it is zero without changing
218 /// it to be an explicit zero. If we don't change it to zero, other code could
219 /// optimized based on the contradictory assumption that it is non-zero.
220 /// Because instcombine aggressively folds operations with undef args anyway,
221 /// this won't lose us code quality.
223 /// This function is defined on values with integer type, values with pointer
224 /// type (but only if TD is non-null), and vectors of integers. In the case
225 /// where V is a vector, known zero, and known one values are the
226 /// same width as the vector element, and the bit is set only if it is true
227 /// for all of the elements in the vector.
228 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
229 const DataLayout *TD, unsigned Depth) {
230 assert(V && "No Value?");
231 assert(Depth <= MaxDepth && "Limit Search Depth");
232 unsigned BitWidth = KnownZero.getBitWidth();
234 assert((V->getType()->isIntOrIntVectorTy() ||
235 V->getType()->getScalarType()->isPointerTy()) &&
236 "Not integer or pointer type!");
238 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
239 (!V->getType()->isIntOrIntVectorTy() ||
240 V->getType()->getScalarSizeInBits() == BitWidth) &&
241 KnownZero.getBitWidth() == BitWidth &&
242 KnownOne.getBitWidth() == BitWidth &&
243 "V, KnownOne and KnownZero should have same BitWidth");
245 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
246 // We know all of the bits for a constant!
247 KnownOne = CI->getValue();
248 KnownZero = ~KnownOne;
251 // Null and aggregate-zero are all-zeros.
252 if (isa<ConstantPointerNull>(V) ||
253 isa<ConstantAggregateZero>(V)) {
254 KnownOne.clearAllBits();
255 KnownZero = APInt::getAllOnesValue(BitWidth);
258 // Handle a constant vector by taking the intersection of the known bits of
259 // each element. There is no real need to handle ConstantVector here, because
260 // we don't handle undef in any particularly useful way.
261 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
262 // We know that CDS must be a vector of integers. Take the intersection of
264 KnownZero.setAllBits(); KnownOne.setAllBits();
265 APInt Elt(KnownZero.getBitWidth(), 0);
266 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
267 Elt = CDS->getElementAsInteger(i);
274 // The address of an aligned GlobalValue has trailing zeros.
275 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
276 unsigned Align = GV->getAlignment();
277 if (Align == 0 && TD) {
278 if (GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) {
279 Type *ObjectType = GVar->getType()->getElementType();
280 if (ObjectType->isSized()) {
281 // If the object is defined in the current Module, we'll be giving
282 // it the preferred alignment. Otherwise, we have to assume that it
283 // may only have the minimum ABI alignment.
284 if (!GVar->isDeclaration() && !GVar->isWeakForLinker())
285 Align = TD->getPreferredAlignment(GVar);
287 Align = TD->getABITypeAlignment(ObjectType);
292 KnownZero = APInt::getLowBitsSet(BitWidth,
293 countTrailingZeros(Align));
295 KnownZero.clearAllBits();
296 KnownOne.clearAllBits();
299 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
300 // the bits of its aliasee.
301 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
302 if (GA->mayBeOverridden()) {
303 KnownZero.clearAllBits(); KnownOne.clearAllBits();
305 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, TD, Depth+1);
310 if (Argument *A = dyn_cast<Argument>(V)) {
311 unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
313 if (!Align && TD && A->hasStructRetAttr()) {
314 // An sret parameter has at least the ABI alignment of the return type.
315 Type *EltTy = cast<PointerType>(A->getType())->getElementType();
316 if (EltTy->isSized())
317 Align = TD->getABITypeAlignment(EltTy);
321 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
325 // Start out not knowing anything.
326 KnownZero.clearAllBits(); KnownOne.clearAllBits();
328 if (Depth == MaxDepth)
329 return; // Limit search depth.
331 Operator *I = dyn_cast<Operator>(V);
334 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
335 switch (I->getOpcode()) {
337 case Instruction::Load:
338 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
339 computeKnownBitsFromRangeMetadata(*MD, KnownZero);
341 case Instruction::And: {
342 // If either the LHS or the RHS are Zero, the result is zero.
343 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
344 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
346 // Output known-1 bits are only known if set in both the LHS & RHS.
347 KnownOne &= KnownOne2;
348 // Output known-0 are known to be clear if zero in either the LHS | RHS.
349 KnownZero |= KnownZero2;
352 case Instruction::Or: {
353 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
354 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
356 // Output known-0 bits are only known if clear in both the LHS & RHS.
357 KnownZero &= KnownZero2;
358 // Output known-1 are known to be set if set in either the LHS | RHS.
359 KnownOne |= KnownOne2;
362 case Instruction::Xor: {
363 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
364 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
366 // Output known-0 bits are known if clear or set in both the LHS & RHS.
367 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
368 // Output known-1 are known to be set if set in only one of the LHS, RHS.
369 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
370 KnownZero = KnownZeroOut;
373 case Instruction::Mul: {
374 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
375 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW,
376 KnownZero, KnownOne, KnownZero2, KnownOne2, TD, Depth);
379 case Instruction::UDiv: {
380 // For the purposes of computing leading zeros we can conservatively
381 // treat a udiv as a logical right shift by the power of 2 known to
382 // be less than the denominator.
383 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
384 unsigned LeadZ = KnownZero2.countLeadingOnes();
386 KnownOne2.clearAllBits();
387 KnownZero2.clearAllBits();
388 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
389 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
390 if (RHSUnknownLeadingOnes != BitWidth)
391 LeadZ = std::min(BitWidth,
392 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
394 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
397 case Instruction::Select:
398 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1);
399 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD,
402 // Only known if known in both the LHS and RHS.
403 KnownOne &= KnownOne2;
404 KnownZero &= KnownZero2;
406 case Instruction::FPTrunc:
407 case Instruction::FPExt:
408 case Instruction::FPToUI:
409 case Instruction::FPToSI:
410 case Instruction::SIToFP:
411 case Instruction::UIToFP:
412 break; // Can't work with floating point.
413 case Instruction::PtrToInt:
414 case Instruction::IntToPtr:
415 case Instruction::AddrSpaceCast: // Pointers could be different sizes.
416 // We can't handle these if we don't know the pointer size.
418 // FALL THROUGH and handle them the same as zext/trunc.
419 case Instruction::ZExt:
420 case Instruction::Trunc: {
421 Type *SrcTy = I->getOperand(0)->getType();
423 unsigned SrcBitWidth;
424 // Note that we handle pointer operands here because of inttoptr/ptrtoint
425 // which fall through here.
427 SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType());
429 SrcBitWidth = SrcTy->getScalarSizeInBits();
430 if (!SrcBitWidth) break;
433 assert(SrcBitWidth && "SrcBitWidth can't be zero");
434 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
435 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth);
436 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
437 KnownZero = KnownZero.zextOrTrunc(BitWidth);
438 KnownOne = KnownOne.zextOrTrunc(BitWidth);
439 // Any top bits are known to be zero.
440 if (BitWidth > SrcBitWidth)
441 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
444 case Instruction::BitCast: {
445 Type *SrcTy = I->getOperand(0)->getType();
446 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
447 // TODO: For now, not handling conversions like:
448 // (bitcast i64 %x to <2 x i32>)
449 !I->getType()->isVectorTy()) {
450 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
455 case Instruction::SExt: {
456 // Compute the bits in the result that are not present in the input.
457 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
459 KnownZero = KnownZero.trunc(SrcBitWidth);
460 KnownOne = KnownOne.trunc(SrcBitWidth);
461 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
462 KnownZero = KnownZero.zext(BitWidth);
463 KnownOne = KnownOne.zext(BitWidth);
465 // If the sign bit of the input is known set or clear, then we know the
466 // top bits of the result.
467 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero
468 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
469 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
470 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
473 case Instruction::Shl:
474 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
475 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
476 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
477 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
478 KnownZero <<= ShiftAmt;
479 KnownOne <<= ShiftAmt;
480 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
484 case Instruction::LShr:
485 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
486 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
487 // Compute the new bits that are at the top now.
488 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
490 // Unsigned shift right.
491 computeKnownBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1);
492 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
493 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
494 // high bits known zero.
495 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
499 case Instruction::AShr:
500 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
501 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
502 // Compute the new bits that are at the top now.
503 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
505 // Signed shift right.
506 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
507 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
508 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
510 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
511 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero.
512 KnownZero |= HighBits;
513 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one.
514 KnownOne |= HighBits;
518 case Instruction::Sub: {
519 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
520 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
521 KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
525 case Instruction::Add: {
526 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
527 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
528 KnownZero, KnownOne, KnownZero2, KnownOne2, TD,
532 case Instruction::SRem:
533 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
534 APInt RA = Rem->getValue().abs();
535 if (RA.isPowerOf2()) {
536 APInt LowBits = RA - 1;
537 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
539 // The low bits of the first operand are unchanged by the srem.
540 KnownZero = KnownZero2 & LowBits;
541 KnownOne = KnownOne2 & LowBits;
543 // If the first operand is non-negative or has all low bits zero, then
544 // the upper bits are all zero.
545 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
546 KnownZero |= ~LowBits;
548 // If the first operand is negative and not all low bits are zero, then
549 // the upper bits are all one.
550 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
551 KnownOne |= ~LowBits;
553 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
557 // The sign bit is the LHS's sign bit, except when the result of the
558 // remainder is zero.
559 if (KnownZero.isNonNegative()) {
560 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
561 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, TD,
563 // If it's known zero, our sign bit is also zero.
564 if (LHSKnownZero.isNegative())
565 KnownZero.setBit(BitWidth - 1);
569 case Instruction::URem: {
570 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
571 APInt RA = Rem->getValue();
572 if (RA.isPowerOf2()) {
573 APInt LowBits = (RA - 1);
574 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD,
576 KnownZero |= ~LowBits;
582 // Since the result is less than or equal to either operand, any leading
583 // zero bits in either operand must also exist in the result.
584 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
585 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, TD, Depth+1);
587 unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
588 KnownZero2.countLeadingOnes());
589 KnownOne.clearAllBits();
590 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
594 case Instruction::Alloca: {
595 AllocaInst *AI = cast<AllocaInst>(V);
596 unsigned Align = AI->getAlignment();
597 if (Align == 0 && TD)
598 Align = TD->getABITypeAlignment(AI->getType()->getElementType());
601 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
604 case Instruction::GetElementPtr: {
605 // Analyze all of the subscripts of this getelementptr instruction
606 // to determine if we can prove known low zero bits.
607 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
608 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, TD,
610 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
612 gep_type_iterator GTI = gep_type_begin(I);
613 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
614 Value *Index = I->getOperand(i);
615 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
616 // Handle struct member offset arithmetic.
622 // Handle case when index is vector zeroinitializer
623 Constant *CIndex = cast<Constant>(Index);
624 if (CIndex->isZeroValue())
627 if (CIndex->getType()->isVectorTy())
628 Index = CIndex->getSplatValue();
630 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
631 const StructLayout *SL = TD->getStructLayout(STy);
632 uint64_t Offset = SL->getElementOffset(Idx);
633 TrailZ = std::min<unsigned>(TrailZ,
634 countTrailingZeros(Offset));
636 // Handle array index arithmetic.
637 Type *IndexedTy = GTI.getIndexedType();
638 if (!IndexedTy->isSized()) {
642 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
643 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
644 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
645 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
646 TrailZ = std::min(TrailZ,
647 unsigned(countTrailingZeros(TypeSize) +
648 LocalKnownZero.countTrailingOnes()));
652 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
655 case Instruction::PHI: {
656 PHINode *P = cast<PHINode>(I);
657 // Handle the case of a simple two-predecessor recurrence PHI.
658 // There's a lot more that could theoretically be done here, but
659 // this is sufficient to catch some interesting cases.
660 if (P->getNumIncomingValues() == 2) {
661 for (unsigned i = 0; i != 2; ++i) {
662 Value *L = P->getIncomingValue(i);
663 Value *R = P->getIncomingValue(!i);
664 Operator *LU = dyn_cast<Operator>(L);
667 unsigned Opcode = LU->getOpcode();
668 // Check for operations that have the property that if
669 // both their operands have low zero bits, the result
670 // will have low zero bits.
671 if (Opcode == Instruction::Add ||
672 Opcode == Instruction::Sub ||
673 Opcode == Instruction::And ||
674 Opcode == Instruction::Or ||
675 Opcode == Instruction::Mul) {
676 Value *LL = LU->getOperand(0);
677 Value *LR = LU->getOperand(1);
678 // Find a recurrence.
685 // Ok, we have a PHI of the form L op= R. Check for low
687 computeKnownBits(R, KnownZero2, KnownOne2, TD, Depth+1);
689 // We need to take the minimum number of known bits
690 APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
691 computeKnownBits(L, KnownZero3, KnownOne3, TD, Depth+1);
693 KnownZero = APInt::getLowBitsSet(BitWidth,
694 std::min(KnownZero2.countTrailingOnes(),
695 KnownZero3.countTrailingOnes()));
701 // Unreachable blocks may have zero-operand PHI nodes.
702 if (P->getNumIncomingValues() == 0)
705 // Otherwise take the unions of the known bit sets of the operands,
706 // taking conservative care to avoid excessive recursion.
707 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
708 // Skip if every incoming value references to ourself.
709 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
712 KnownZero = APInt::getAllOnesValue(BitWidth);
713 KnownOne = APInt::getAllOnesValue(BitWidth);
714 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
715 // Skip direct self references.
716 if (P->getIncomingValue(i) == P) continue;
718 KnownZero2 = APInt(BitWidth, 0);
719 KnownOne2 = APInt(BitWidth, 0);
720 // Recurse, but cap the recursion to one level, because we don't
721 // want to waste time spinning around in loops.
722 computeKnownBits(P->getIncomingValue(i), KnownZero2, KnownOne2, TD,
724 KnownZero &= KnownZero2;
725 KnownOne &= KnownOne2;
726 // If all bits have been ruled out, there's no need to check
728 if (!KnownZero && !KnownOne)
734 case Instruction::Call:
735 case Instruction::Invoke:
736 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
737 computeKnownBitsFromRangeMetadata(*MD, KnownZero);
738 // If a range metadata is attached to this IntrinsicInst, intersect the
739 // explicit range specified by the metadata and the implicit range of
741 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
742 switch (II->getIntrinsicID()) {
744 case Intrinsic::ctlz:
745 case Intrinsic::cttz: {
746 unsigned LowBits = Log2_32(BitWidth)+1;
747 // If this call is undefined for 0, the result will be less than 2^n.
748 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
750 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
753 case Intrinsic::ctpop: {
754 unsigned LowBits = Log2_32(BitWidth)+1;
755 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
758 case Intrinsic::x86_sse42_crc32_64_64:
759 KnownZero |= APInt::getHighBitsSet(64, 32);
764 case Instruction::ExtractValue:
765 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
766 ExtractValueInst *EVI = cast<ExtractValueInst>(I);
767 if (EVI->getNumIndices() != 1) break;
768 if (EVI->getIndices()[0] == 0) {
769 switch (II->getIntrinsicID()) {
771 case Intrinsic::uadd_with_overflow:
772 case Intrinsic::sadd_with_overflow:
773 computeKnownBitsAddSub(true, II->getArgOperand(0),
774 II->getArgOperand(1), false, KnownZero,
775 KnownOne, KnownZero2, KnownOne2, TD, Depth);
777 case Intrinsic::usub_with_overflow:
778 case Intrinsic::ssub_with_overflow:
779 computeKnownBitsAddSub(false, II->getArgOperand(0),
780 II->getArgOperand(1), false, KnownZero,
781 KnownOne, KnownZero2, KnownOne2, TD, Depth);
783 case Intrinsic::umul_with_overflow:
784 case Intrinsic::smul_with_overflow:
785 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1),
786 false, KnownZero, KnownOne,
787 KnownZero2, KnownOne2, TD, Depth);
794 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
797 /// ComputeSignBit - Determine whether the sign bit is known to be zero or
798 /// one. Convenience wrapper around computeKnownBits.
799 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
800 const DataLayout *TD, unsigned Depth) {
801 unsigned BitWidth = getBitWidth(V->getType(), TD);
807 APInt ZeroBits(BitWidth, 0);
808 APInt OneBits(BitWidth, 0);
809 computeKnownBits(V, ZeroBits, OneBits, TD, Depth);
810 KnownOne = OneBits[BitWidth - 1];
811 KnownZero = ZeroBits[BitWidth - 1];
814 /// isKnownToBeAPowerOfTwo - Return true if the given value is known to have exactly one
815 /// bit set when defined. For vectors return true if every element is known to
816 /// be a power of two when defined. Supports values with integer or pointer
817 /// types and vectors of integers.
818 bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth) {
819 if (Constant *C = dyn_cast<Constant>(V)) {
820 if (C->isNullValue())
822 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
823 return CI->getValue().isPowerOf2();
824 // TODO: Handle vector constants.
827 // 1 << X is clearly a power of two if the one is not shifted off the end. If
828 // it is shifted off the end then the result is undefined.
829 if (match(V, m_Shl(m_One(), m_Value())))
832 // (signbit) >>l X is clearly a power of two if the one is not shifted off the
833 // bottom. If it is shifted off the bottom then the result is undefined.
834 if (match(V, m_LShr(m_SignBit(), m_Value())))
837 // The remaining tests are all recursive, so bail out if we hit the limit.
838 if (Depth++ == MaxDepth)
841 Value *X = nullptr, *Y = nullptr;
842 // A shift of a power of two is a power of two or zero.
843 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
844 match(V, m_Shr(m_Value(X), m_Value()))))
845 return isKnownToBeAPowerOfTwo(X, /*OrZero*/true, Depth);
847 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
848 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth);
850 if (SelectInst *SI = dyn_cast<SelectInst>(V))
851 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth) &&
852 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth);
854 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
855 // A power of two and'd with anything is a power of two or zero.
856 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/true, Depth) ||
857 isKnownToBeAPowerOfTwo(Y, /*OrZero*/true, Depth))
859 // X & (-X) is always a power of two or zero.
860 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
865 // Adding a power-of-two or zero to the same power-of-two or zero yields
866 // either the original power-of-two, a larger power-of-two or zero.
867 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
868 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
869 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
870 if (match(X, m_And(m_Specific(Y), m_Value())) ||
871 match(X, m_And(m_Value(), m_Specific(Y))))
872 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth))
874 if (match(Y, m_And(m_Specific(X), m_Value())) ||
875 match(Y, m_And(m_Value(), m_Specific(X))))
876 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth))
879 unsigned BitWidth = V->getType()->getScalarSizeInBits();
880 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
881 computeKnownBits(X, LHSZeroBits, LHSOneBits, nullptr, Depth);
883 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
884 computeKnownBits(Y, RHSZeroBits, RHSOneBits, nullptr, Depth);
885 // If i8 V is a power of two or zero:
886 // ZeroBits: 1 1 1 0 1 1 1 1
887 // ~ZeroBits: 0 0 0 1 0 0 0 0
888 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
889 // If OrZero isn't set, we cannot give back a zero result.
890 // Make sure either the LHS or RHS has a bit set.
891 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
896 // An exact divide or right shift can only shift off zero bits, so the result
897 // is a power of two only if the first operand is a power of two and not
898 // copying a sign bit (sdiv int_min, 2).
899 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
900 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
901 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, Depth);
907 /// \brief Test whether a GEP's result is known to be non-null.
909 /// Uses properties inherent in a GEP to try to determine whether it is known
912 /// Currently this routine does not support vector GEPs.
913 static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout *DL,
915 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
918 // FIXME: Support vector-GEPs.
919 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
921 // If the base pointer is non-null, we cannot walk to a null address with an
922 // inbounds GEP in address space zero.
923 if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth))
926 // Past this, if we don't have DataLayout, we can't do much.
930 // Walk the GEP operands and see if any operand introduces a non-zero offset.
931 // If so, then the GEP cannot produce a null pointer, as doing so would
932 // inherently violate the inbounds contract within address space zero.
933 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
935 // Struct types are easy -- they must always be indexed by a constant.
936 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
937 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
938 unsigned ElementIdx = OpC->getZExtValue();
939 const StructLayout *SL = DL->getStructLayout(STy);
940 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
941 if (ElementOffset > 0)
946 // If we have a zero-sized type, the index doesn't matter. Keep looping.
947 if (DL->getTypeAllocSize(GTI.getIndexedType()) == 0)
950 // Fast path the constant operand case both for efficiency and so we don't
951 // increment Depth when just zipping down an all-constant GEP.
952 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
958 // We post-increment Depth here because while isKnownNonZero increments it
959 // as well, when we pop back up that increment won't persist. We don't want
960 // to recurse 10k times just because we have 10k GEP operands. We don't
961 // bail completely out because we want to handle constant GEPs regardless
963 if (Depth++ >= MaxDepth)
966 if (isKnownNonZero(GTI.getOperand(), DL, Depth))
973 /// isKnownNonZero - Return true if the given value is known to be non-zero
974 /// when defined. For vectors return true if every element is known to be
975 /// non-zero when defined. Supports values with integer or pointer type and
976 /// vectors of integers.
977 bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
978 if (Constant *C = dyn_cast<Constant>(V)) {
979 if (C->isNullValue())
981 if (isa<ConstantInt>(C))
982 // Must be non-zero due to null test above.
984 // TODO: Handle vectors
988 // The remaining tests are all recursive, so bail out if we hit the limit.
989 if (Depth++ >= MaxDepth)
992 // Check for pointer simplifications.
993 if (V->getType()->isPointerTy()) {
994 if (isKnownNonNull(V))
996 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
997 if (isGEPKnownNonNull(GEP, TD, Depth))
1001 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), TD);
1003 // X | Y != 0 if X != 0 or Y != 0.
1004 Value *X = nullptr, *Y = nullptr;
1005 if (match(V, m_Or(m_Value(X), m_Value(Y))))
1006 return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth);
1008 // ext X != 0 if X != 0.
1009 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1010 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), TD, Depth);
1012 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1013 // if the lowest bit is shifted off the end.
1014 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1015 // shl nuw can't remove any non-zero bits.
1016 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1017 if (BO->hasNoUnsignedWrap())
1018 return isKnownNonZero(X, TD, Depth);
1020 APInt KnownZero(BitWidth, 0);
1021 APInt KnownOne(BitWidth, 0);
1022 computeKnownBits(X, KnownZero, KnownOne, TD, Depth);
1026 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1027 // defined if the sign bit is shifted off the end.
1028 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1029 // shr exact can only shift out zero bits.
1030 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1032 return isKnownNonZero(X, TD, Depth);
1034 bool XKnownNonNegative, XKnownNegative;
1035 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth);
1039 // div exact can only produce a zero if the dividend is zero.
1040 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
1041 return isKnownNonZero(X, TD, Depth);
1044 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1045 bool XKnownNonNegative, XKnownNegative;
1046 bool YKnownNonNegative, YKnownNegative;
1047 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, TD, Depth);
1048 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, TD, Depth);
1050 // If X and Y are both non-negative (as signed values) then their sum is not
1051 // zero unless both X and Y are zero.
1052 if (XKnownNonNegative && YKnownNonNegative)
1053 if (isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth))
1056 // If X and Y are both negative (as signed values) then their sum is not
1057 // zero unless both X and Y equal INT_MIN.
1058 if (BitWidth && XKnownNegative && YKnownNegative) {
1059 APInt KnownZero(BitWidth, 0);
1060 APInt KnownOne(BitWidth, 0);
1061 APInt Mask = APInt::getSignedMaxValue(BitWidth);
1062 // The sign bit of X is set. If some other bit is set then X is not equal
1064 computeKnownBits(X, KnownZero, KnownOne, TD, Depth);
1065 if ((KnownOne & Mask) != 0)
1067 // The sign bit of Y is set. If some other bit is set then Y is not equal
1069 computeKnownBits(Y, KnownZero, KnownOne, TD, Depth);
1070 if ((KnownOne & Mask) != 0)
1074 // The sum of a non-negative number and a power of two is not zero.
1075 if (XKnownNonNegative && isKnownToBeAPowerOfTwo(Y, /*OrZero*/false, Depth))
1077 if (YKnownNonNegative && isKnownToBeAPowerOfTwo(X, /*OrZero*/false, Depth))
1081 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
1082 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1083 // If X and Y are non-zero then so is X * Y as long as the multiplication
1084 // does not overflow.
1085 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
1086 isKnownNonZero(X, TD, Depth) && isKnownNonZero(Y, TD, Depth))
1089 // (C ? X : Y) != 0 if X != 0 and Y != 0.
1090 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1091 if (isKnownNonZero(SI->getTrueValue(), TD, Depth) &&
1092 isKnownNonZero(SI->getFalseValue(), TD, Depth))
1096 if (!BitWidth) return false;
1097 APInt KnownZero(BitWidth, 0);
1098 APInt KnownOne(BitWidth, 0);
1099 computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
1100 return KnownOne != 0;
1103 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1104 /// this predicate to simplify operations downstream. Mask is known to be zero
1105 /// for bits that V cannot have.
1107 /// This function is defined on values with integer type, values with pointer
1108 /// type (but only if TD is non-null), and vectors of integers. In the case
1109 /// where V is a vector, the mask, known zero, and known one values are the
1110 /// same width as the vector element, and the bit is set only if it is true
1111 /// for all of the elements in the vector.
1112 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
1113 const DataLayout *TD, unsigned Depth) {
1114 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
1115 computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
1116 return (KnownZero & Mask) == Mask;
1121 /// ComputeNumSignBits - Return the number of times the sign bit of the
1122 /// register is replicated into the other bits. We know that at least 1 bit
1123 /// is always equal to the sign bit (itself), but other cases can give us
1124 /// information. For example, immediately after an "ashr X, 2", we know that
1125 /// the top 3 bits are all equal to each other, so we return 3.
1127 /// 'Op' must have a scalar integer type.
1129 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
1131 assert((TD || V->getType()->isIntOrIntVectorTy()) &&
1132 "ComputeNumSignBits requires a DataLayout object to operate "
1133 "on non-integer values!");
1134 Type *Ty = V->getType();
1135 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
1136 Ty->getScalarSizeInBits();
1138 unsigned FirstAnswer = 1;
1140 // Note that ConstantInt is handled by the general computeKnownBits case
1144 return 1; // Limit search depth.
1146 Operator *U = dyn_cast<Operator>(V);
1147 switch (Operator::getOpcode(V)) {
1149 case Instruction::SExt:
1150 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
1151 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
1153 case Instruction::AShr: {
1154 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1155 // ashr X, C -> adds C sign bits. Vectors too.
1157 if (match(U->getOperand(1), m_APInt(ShAmt))) {
1158 Tmp += ShAmt->getZExtValue();
1159 if (Tmp > TyBits) Tmp = TyBits;
1163 case Instruction::Shl: {
1165 if (match(U->getOperand(1), m_APInt(ShAmt))) {
1166 // shl destroys sign bits.
1167 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1168 Tmp2 = ShAmt->getZExtValue();
1169 if (Tmp2 >= TyBits || // Bad shift.
1170 Tmp2 >= Tmp) break; // Shifted all sign bits out.
1175 case Instruction::And:
1176 case Instruction::Or:
1177 case Instruction::Xor: // NOT is handled here.
1178 // Logical binary ops preserve the number of sign bits at the worst.
1179 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1181 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
1182 FirstAnswer = std::min(Tmp, Tmp2);
1183 // We computed what we know about the sign bits as our first
1184 // answer. Now proceed to the generic code that uses
1185 // computeKnownBits, and pick whichever answer is better.
1189 case Instruction::Select:
1190 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
1191 if (Tmp == 1) return 1; // Early out.
1192 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1);
1193 return std::min(Tmp, Tmp2);
1195 case Instruction::Add:
1196 // Add can have at most one carry bit. Thus we know that the output
1197 // is, at worst, one more bit than the inputs.
1198 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1199 if (Tmp == 1) return 1; // Early out.
1201 // Special case decrementing a value (ADD X, -1):
1202 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1)))
1203 if (CRHS->isAllOnesValue()) {
1204 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
1205 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
1207 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1209 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
1212 // If we are subtracting one from a positive number, there is no carry
1213 // out of the result.
1214 if (KnownZero.isNegative())
1218 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
1219 if (Tmp2 == 1) return 1;
1220 return std::min(Tmp, Tmp2)-1;
1222 case Instruction::Sub:
1223 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
1224 if (Tmp2 == 1) return 1;
1227 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
1228 if (CLHS->isNullValue()) {
1229 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
1230 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
1231 // If the input is known to be 0 or 1, the output is 0/-1, which is all
1233 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
1236 // If the input is known to be positive (the sign bit is known clear),
1237 // the output of the NEG has the same number of sign bits as the input.
1238 if (KnownZero.isNegative())
1241 // Otherwise, we treat this like a SUB.
1244 // Sub can have at most one carry bit. Thus we know that the output
1245 // is, at worst, one more bit than the inputs.
1246 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
1247 if (Tmp == 1) return 1; // Early out.
1248 return std::min(Tmp, Tmp2)-1;
1250 case Instruction::PHI: {
1251 PHINode *PN = cast<PHINode>(U);
1252 // Don't analyze large in-degree PHIs.
1253 if (PN->getNumIncomingValues() > 4) break;
1255 // Take the minimum of all incoming values. This can't infinitely loop
1256 // because of our depth threshold.
1257 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1);
1258 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
1259 if (Tmp == 1) return Tmp;
1261 ComputeNumSignBits(PN->getIncomingValue(i), TD, Depth+1));
1266 case Instruction::Trunc:
1267 // FIXME: it's tricky to do anything useful for this, but it is an important
1268 // case for targets like X86.
1272 // Finally, if we can prove that the top bits of the result are 0's or 1's,
1273 // use this information.
1274 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
1276 computeKnownBits(V, KnownZero, KnownOne, TD, Depth);
1278 if (KnownZero.isNegative()) { // sign bit is 0
1280 } else if (KnownOne.isNegative()) { // sign bit is 1;
1287 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
1288 // the number of identical bits in the top of the input value.
1290 Mask <<= Mask.getBitWidth()-TyBits;
1291 // Return # leading zeros. We use 'min' here in case Val was zero before
1292 // shifting. We don't want to return '64' as for an i32 "0".
1293 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
1296 /// ComputeMultiple - This function computes the integer multiple of Base that
1297 /// equals V. If successful, it returns true and returns the multiple in
1298 /// Multiple. If unsuccessful, it returns false. It looks
1299 /// through SExt instructions only if LookThroughSExt is true.
1300 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
1301 bool LookThroughSExt, unsigned Depth) {
1302 const unsigned MaxDepth = 6;
1304 assert(V && "No Value?");
1305 assert(Depth <= MaxDepth && "Limit Search Depth");
1306 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
1308 Type *T = V->getType();
1310 ConstantInt *CI = dyn_cast<ConstantInt>(V);
1320 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
1321 Constant *BaseVal = ConstantInt::get(T, Base);
1322 if (CO && CO == BaseVal) {
1324 Multiple = ConstantInt::get(T, 1);
1328 if (CI && CI->getZExtValue() % Base == 0) {
1329 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
1333 if (Depth == MaxDepth) return false; // Limit search depth.
1335 Operator *I = dyn_cast<Operator>(V);
1336 if (!I) return false;
1338 switch (I->getOpcode()) {
1340 case Instruction::SExt:
1341 if (!LookThroughSExt) return false;
1342 // otherwise fall through to ZExt
1343 case Instruction::ZExt:
1344 return ComputeMultiple(I->getOperand(0), Base, Multiple,
1345 LookThroughSExt, Depth+1);
1346 case Instruction::Shl:
1347 case Instruction::Mul: {
1348 Value *Op0 = I->getOperand(0);
1349 Value *Op1 = I->getOperand(1);
1351 if (I->getOpcode() == Instruction::Shl) {
1352 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
1353 if (!Op1CI) return false;
1354 // Turn Op0 << Op1 into Op0 * 2^Op1
1355 APInt Op1Int = Op1CI->getValue();
1356 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
1357 APInt API(Op1Int.getBitWidth(), 0);
1358 API.setBit(BitToSet);
1359 Op1 = ConstantInt::get(V->getContext(), API);
1362 Value *Mul0 = nullptr;
1363 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
1364 if (Constant *Op1C = dyn_cast<Constant>(Op1))
1365 if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
1366 if (Op1C->getType()->getPrimitiveSizeInBits() <
1367 MulC->getType()->getPrimitiveSizeInBits())
1368 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
1369 if (Op1C->getType()->getPrimitiveSizeInBits() >
1370 MulC->getType()->getPrimitiveSizeInBits())
1371 MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
1373 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
1374 Multiple = ConstantExpr::getMul(MulC, Op1C);
1378 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
1379 if (Mul0CI->getValue() == 1) {
1380 // V == Base * Op1, so return Op1
1386 Value *Mul1 = nullptr;
1387 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
1388 if (Constant *Op0C = dyn_cast<Constant>(Op0))
1389 if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
1390 if (Op0C->getType()->getPrimitiveSizeInBits() <
1391 MulC->getType()->getPrimitiveSizeInBits())
1392 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
1393 if (Op0C->getType()->getPrimitiveSizeInBits() >
1394 MulC->getType()->getPrimitiveSizeInBits())
1395 MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
1397 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
1398 Multiple = ConstantExpr::getMul(MulC, Op0C);
1402 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
1403 if (Mul1CI->getValue() == 1) {
1404 // V == Base * Op0, so return Op0
1412 // We could not determine if V is a multiple of Base.
1416 /// CannotBeNegativeZero - Return true if we can prove that the specified FP
1417 /// value is never equal to -0.0.
1419 /// NOTE: this function will need to be revisited when we support non-default
1422 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
1423 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
1424 return !CFP->getValueAPF().isNegZero();
1427 return 1; // Limit search depth.
1429 const Operator *I = dyn_cast<Operator>(V);
1430 if (!I) return false;
1432 // Check if the nsz fast-math flag is set
1433 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
1434 if (FPO->hasNoSignedZeros())
1437 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
1438 if (I->getOpcode() == Instruction::FAdd)
1439 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
1440 if (CFP->isNullValue())
1443 // sitofp and uitofp turn into +0.0 for zero.
1444 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
1447 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1448 // sqrt(-0.0) = -0.0, no other negative results are possible.
1449 if (II->getIntrinsicID() == Intrinsic::sqrt)
1450 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
1452 if (const CallInst *CI = dyn_cast<CallInst>(I))
1453 if (const Function *F = CI->getCalledFunction()) {
1454 if (F->isDeclaration()) {
1456 if (F->getName() == "abs") return true;
1457 // fabs[lf](x) != -0.0
1458 if (F->getName() == "fabs") return true;
1459 if (F->getName() == "fabsf") return true;
1460 if (F->getName() == "fabsl") return true;
1461 if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
1462 F->getName() == "sqrtl")
1463 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
1470 /// isBytewiseValue - If the specified value can be set by repeating the same
1471 /// byte in memory, return the i8 value that it is represented with. This is
1472 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
1473 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
1474 /// byte store (e.g. i16 0x1234), return null.
1475 Value *llvm::isBytewiseValue(Value *V) {
1476 // All byte-wide stores are splatable, even of arbitrary variables.
1477 if (V->getType()->isIntegerTy(8)) return V;
1479 // Handle 'null' ConstantArrayZero etc.
1480 if (Constant *C = dyn_cast<Constant>(V))
1481 if (C->isNullValue())
1482 return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
1484 // Constant float and double values can be handled as integer values if the
1485 // corresponding integer value is "byteable". An important case is 0.0.
1486 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
1487 if (CFP->getType()->isFloatTy())
1488 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
1489 if (CFP->getType()->isDoubleTy())
1490 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
1491 // Don't handle long double formats, which have strange constraints.
1494 // We can handle constant integers that are power of two in size and a
1495 // multiple of 8 bits.
1496 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1497 unsigned Width = CI->getBitWidth();
1498 if (isPowerOf2_32(Width) && Width > 8) {
1499 // We can handle this value if the recursive binary decomposition is the
1500 // same at all levels.
1501 APInt Val = CI->getValue();
1503 while (Val.getBitWidth() != 8) {
1504 unsigned NextWidth = Val.getBitWidth()/2;
1505 Val2 = Val.lshr(NextWidth);
1506 Val2 = Val2.trunc(Val.getBitWidth()/2);
1507 Val = Val.trunc(Val.getBitWidth()/2);
1509 // If the top/bottom halves aren't the same, reject it.
1513 return ConstantInt::get(V->getContext(), Val);
1517 // A ConstantDataArray/Vector is splatable if all its members are equal and
1519 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
1520 Value *Elt = CA->getElementAsConstant(0);
1521 Value *Val = isBytewiseValue(Elt);
1525 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
1526 if (CA->getElementAsConstant(I) != Elt)
1532 // Conceptually, we could handle things like:
1533 // %a = zext i8 %X to i16
1534 // %b = shl i16 %a, 8
1535 // %c = or i16 %a, %b
1536 // but until there is an example that actually needs this, it doesn't seem
1537 // worth worrying about.
1542 // This is the recursive version of BuildSubAggregate. It takes a few different
1543 // arguments. Idxs is the index within the nested struct From that we are
1544 // looking at now (which is of type IndexedType). IdxSkip is the number of
1545 // indices from Idxs that should be left out when inserting into the resulting
1546 // struct. To is the result struct built so far, new insertvalue instructions
1548 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
1549 SmallVectorImpl<unsigned> &Idxs,
1551 Instruction *InsertBefore) {
1552 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
1554 // Save the original To argument so we can modify it
1556 // General case, the type indexed by Idxs is a struct
1557 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1558 // Process each struct element recursively
1561 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
1565 // Couldn't find any inserted value for this index? Cleanup
1566 while (PrevTo != OrigTo) {
1567 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
1568 PrevTo = Del->getAggregateOperand();
1569 Del->eraseFromParent();
1571 // Stop processing elements
1575 // If we successfully found a value for each of our subaggregates
1579 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
1580 // the struct's elements had a value that was inserted directly. In the latter
1581 // case, perhaps we can't determine each of the subelements individually, but
1582 // we might be able to find the complete struct somewhere.
1584 // Find the value that is at that particular spot
1585 Value *V = FindInsertedValue(From, Idxs);
1590 // Insert the value in the new (sub) aggregrate
1591 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
1592 "tmp", InsertBefore);
1595 // This helper takes a nested struct and extracts a part of it (which is again a
1596 // struct) into a new value. For example, given the struct:
1597 // { a, { b, { c, d }, e } }
1598 // and the indices "1, 1" this returns
1601 // It does this by inserting an insertvalue for each element in the resulting
1602 // struct, as opposed to just inserting a single struct. This will only work if
1603 // each of the elements of the substruct are known (ie, inserted into From by an
1604 // insertvalue instruction somewhere).
1606 // All inserted insertvalue instructions are inserted before InsertBefore
1607 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
1608 Instruction *InsertBefore) {
1609 assert(InsertBefore && "Must have someplace to insert!");
1610 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
1612 Value *To = UndefValue::get(IndexedType);
1613 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
1614 unsigned IdxSkip = Idxs.size();
1616 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
1619 /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
1620 /// the scalar value indexed is already around as a register, for example if it
1621 /// were inserted directly into the aggregrate.
1623 /// If InsertBefore is not null, this function will duplicate (modified)
1624 /// insertvalues when a part of a nested struct is extracted.
1625 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
1626 Instruction *InsertBefore) {
1627 // Nothing to index? Just return V then (this is useful at the end of our
1629 if (idx_range.empty())
1631 // We have indices, so V should have an indexable type.
1632 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
1633 "Not looking at a struct or array?");
1634 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
1635 "Invalid indices for type?");
1637 if (Constant *C = dyn_cast<Constant>(V)) {
1638 C = C->getAggregateElement(idx_range[0]);
1639 if (!C) return nullptr;
1640 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
1643 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
1644 // Loop the indices for the insertvalue instruction in parallel with the
1645 // requested indices
1646 const unsigned *req_idx = idx_range.begin();
1647 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
1648 i != e; ++i, ++req_idx) {
1649 if (req_idx == idx_range.end()) {
1650 // We can't handle this without inserting insertvalues
1654 // The requested index identifies a part of a nested aggregate. Handle
1655 // this specially. For example,
1656 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
1657 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
1658 // %C = extractvalue {i32, { i32, i32 } } %B, 1
1659 // This can be changed into
1660 // %A = insertvalue {i32, i32 } undef, i32 10, 0
1661 // %C = insertvalue {i32, i32 } %A, i32 11, 1
1662 // which allows the unused 0,0 element from the nested struct to be
1664 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
1668 // This insert value inserts something else than what we are looking for.
1669 // See if the (aggregrate) value inserted into has the value we are
1670 // looking for, then.
1672 return FindInsertedValue(I->getAggregateOperand(), idx_range,
1675 // If we end up here, the indices of the insertvalue match with those
1676 // requested (though possibly only partially). Now we recursively look at
1677 // the inserted value, passing any remaining indices.
1678 return FindInsertedValue(I->getInsertedValueOperand(),
1679 makeArrayRef(req_idx, idx_range.end()),
1683 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
1684 // If we're extracting a value from an aggregrate that was extracted from
1685 // something else, we can extract from that something else directly instead.
1686 // However, we will need to chain I's indices with the requested indices.
1688 // Calculate the number of indices required
1689 unsigned size = I->getNumIndices() + idx_range.size();
1690 // Allocate some space to put the new indices in
1691 SmallVector<unsigned, 5> Idxs;
1693 // Add indices from the extract value instruction
1694 Idxs.append(I->idx_begin(), I->idx_end());
1696 // Add requested indices
1697 Idxs.append(idx_range.begin(), idx_range.end());
1699 assert(Idxs.size() == size
1700 && "Number of indices added not correct?");
1702 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
1704 // Otherwise, we don't know (such as, extracting from a function return value
1705 // or load instruction)
1709 /// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
1710 /// it can be expressed as a base pointer plus a constant offset. Return the
1711 /// base and offset to the caller.
1712 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
1713 const DataLayout *DL) {
1714 // Without DataLayout, conservatively assume 64-bit offsets, which is
1715 // the widest we support.
1716 unsigned BitWidth = DL ? DL->getPointerTypeSizeInBits(Ptr->getType()) : 64;
1717 APInt ByteOffset(BitWidth, 0);
1719 if (Ptr->getType()->isVectorTy())
1722 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
1724 APInt GEPOffset(BitWidth, 0);
1725 if (!GEP->accumulateConstantOffset(*DL, GEPOffset))
1728 ByteOffset += GEPOffset;
1731 Ptr = GEP->getPointerOperand();
1732 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
1733 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
1734 Ptr = cast<Operator>(Ptr)->getOperand(0);
1735 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
1736 if (GA->mayBeOverridden())
1738 Ptr = GA->getAliasee();
1743 Offset = ByteOffset.getSExtValue();
1748 /// getConstantStringInfo - This function computes the length of a
1749 /// null-terminated C string pointed to by V. If successful, it returns true
1750 /// and returns the string in Str. If unsuccessful, it returns false.
1751 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
1752 uint64_t Offset, bool TrimAtNul) {
1755 // Look through bitcast instructions and geps.
1756 V = V->stripPointerCasts();
1758 // If the value is a GEP instructionor constant expression, treat it as an
1760 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1761 // Make sure the GEP has exactly three arguments.
1762 if (GEP->getNumOperands() != 3)
1765 // Make sure the index-ee is a pointer to array of i8.
1766 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
1767 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
1768 if (!AT || !AT->getElementType()->isIntegerTy(8))
1771 // Check to make sure that the first operand of the GEP is an integer and
1772 // has value 0 so that we are sure we're indexing into the initializer.
1773 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
1774 if (!FirstIdx || !FirstIdx->isZero())
1777 // If the second index isn't a ConstantInt, then this is a variable index
1778 // into the array. If this occurs, we can't say anything meaningful about
1780 uint64_t StartIdx = 0;
1781 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
1782 StartIdx = CI->getZExtValue();
1785 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset);
1788 // The GEP instruction, constant or instruction, must reference a global
1789 // variable that is a constant and is initialized. The referenced constant
1790 // initializer is the array that we'll use for optimization.
1791 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
1792 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
1795 // Handle the all-zeros case
1796 if (GV->getInitializer()->isNullValue()) {
1797 // This is a degenerate case. The initializer is constant zero so the
1798 // length of the string must be zero.
1803 // Must be a Constant Array
1804 const ConstantDataArray *Array =
1805 dyn_cast<ConstantDataArray>(GV->getInitializer());
1806 if (!Array || !Array->isString())
1809 // Get the number of elements in the array
1810 uint64_t NumElts = Array->getType()->getArrayNumElements();
1812 // Start out with the entire array in the StringRef.
1813 Str = Array->getAsString();
1815 if (Offset > NumElts)
1818 // Skip over 'offset' bytes.
1819 Str = Str.substr(Offset);
1822 // Trim off the \0 and anything after it. If the array is not nul
1823 // terminated, we just return the whole end of string. The client may know
1824 // some other way that the string is length-bound.
1825 Str = Str.substr(0, Str.find('\0'));
1830 // These next two are very similar to the above, but also look through PHI
1832 // TODO: See if we can integrate these two together.
1834 /// GetStringLengthH - If we can compute the length of the string pointed to by
1835 /// the specified pointer, return 'len+1'. If we can't, return 0.
1836 static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
1837 // Look through noop bitcast instructions.
1838 V = V->stripPointerCasts();
1840 // If this is a PHI node, there are two cases: either we have already seen it
1842 if (PHINode *PN = dyn_cast<PHINode>(V)) {
1843 if (!PHIs.insert(PN))
1844 return ~0ULL; // already in the set.
1846 // If it was new, see if all the input strings are the same length.
1847 uint64_t LenSoFar = ~0ULL;
1848 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1849 uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs);
1850 if (Len == 0) return 0; // Unknown length -> unknown.
1852 if (Len == ~0ULL) continue;
1854 if (Len != LenSoFar && LenSoFar != ~0ULL)
1855 return 0; // Disagree -> unknown.
1859 // Success, all agree.
1863 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
1864 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1865 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
1866 if (Len1 == 0) return 0;
1867 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
1868 if (Len2 == 0) return 0;
1869 if (Len1 == ~0ULL) return Len2;
1870 if (Len2 == ~0ULL) return Len1;
1871 if (Len1 != Len2) return 0;
1875 // Otherwise, see if we can read the string.
1877 if (!getConstantStringInfo(V, StrData))
1880 return StrData.size()+1;
1883 /// GetStringLength - If we can compute the length of the string pointed to by
1884 /// the specified pointer, return 'len+1'. If we can't, return 0.
1885 uint64_t llvm::GetStringLength(Value *V) {
1886 if (!V->getType()->isPointerTy()) return 0;
1888 SmallPtrSet<PHINode*, 32> PHIs;
1889 uint64_t Len = GetStringLengthH(V, PHIs);
1890 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
1891 // an empty string as a length.
1892 return Len == ~0ULL ? 1 : Len;
1896 llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
1897 if (!V->getType()->isPointerTy())
1899 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
1900 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1901 V = GEP->getPointerOperand();
1902 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
1903 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
1904 V = cast<Operator>(V)->getOperand(0);
1905 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1906 if (GA->mayBeOverridden())
1908 V = GA->getAliasee();
1910 // See if InstructionSimplify knows any relevant tricks.
1911 if (Instruction *I = dyn_cast<Instruction>(V))
1912 // TODO: Acquire a DominatorTree and use it.
1913 if (Value *Simplified = SimplifyInstruction(I, TD, nullptr)) {
1920 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1926 llvm::GetUnderlyingObjects(Value *V,
1927 SmallVectorImpl<Value *> &Objects,
1928 const DataLayout *TD,
1929 unsigned MaxLookup) {
1930 SmallPtrSet<Value *, 4> Visited;
1931 SmallVector<Value *, 4> Worklist;
1932 Worklist.push_back(V);
1934 Value *P = Worklist.pop_back_val();
1935 P = GetUnderlyingObject(P, TD, MaxLookup);
1937 if (!Visited.insert(P))
1940 if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
1941 Worklist.push_back(SI->getTrueValue());
1942 Worklist.push_back(SI->getFalseValue());
1946 if (PHINode *PN = dyn_cast<PHINode>(P)) {
1947 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1948 Worklist.push_back(PN->getIncomingValue(i));
1952 Objects.push_back(P);
1953 } while (!Worklist.empty());
1956 /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
1957 /// are lifetime markers.
1959 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
1960 for (const User *U : V->users()) {
1961 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1962 if (!II) return false;
1964 if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
1965 II->getIntrinsicID() != Intrinsic::lifetime_end)
1971 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
1972 const DataLayout *TD) {
1973 const Operator *Inst = dyn_cast<Operator>(V);
1977 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
1978 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
1982 switch (Inst->getOpcode()) {
1985 case Instruction::UDiv:
1986 case Instruction::URem:
1987 // x / y is undefined if y == 0, but calculations like x / 3 are safe.
1988 return isKnownNonZero(Inst->getOperand(1), TD);
1989 case Instruction::SDiv:
1990 case Instruction::SRem: {
1991 Value *Op = Inst->getOperand(1);
1992 // x / y is undefined if y == 0
1993 if (!isKnownNonZero(Op, TD))
1995 // x / y might be undefined if y == -1
1996 unsigned BitWidth = getBitWidth(Op->getType(), TD);
1999 APInt KnownZero(BitWidth, 0);
2000 APInt KnownOne(BitWidth, 0);
2001 computeKnownBits(Op, KnownZero, KnownOne, TD);
2004 case Instruction::Load: {
2005 const LoadInst *LI = cast<LoadInst>(Inst);
2006 if (!LI->isUnordered() ||
2007 // Speculative load may create a race that did not exist in the source.
2008 LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
2010 return LI->getPointerOperand()->isDereferenceablePointer(TD);
2012 case Instruction::Call: {
2013 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
2014 switch (II->getIntrinsicID()) {
2015 // These synthetic intrinsics have no side-effects and just mark
2016 // information about their operands.
2017 // FIXME: There are other no-op synthetic instructions that potentially
2018 // should be considered at least *safe* to speculate...
2019 case Intrinsic::dbg_declare:
2020 case Intrinsic::dbg_value:
2023 case Intrinsic::bswap:
2024 case Intrinsic::ctlz:
2025 case Intrinsic::ctpop:
2026 case Intrinsic::cttz:
2027 case Intrinsic::objectsize:
2028 case Intrinsic::sadd_with_overflow:
2029 case Intrinsic::smul_with_overflow:
2030 case Intrinsic::ssub_with_overflow:
2031 case Intrinsic::uadd_with_overflow:
2032 case Intrinsic::umul_with_overflow:
2033 case Intrinsic::usub_with_overflow:
2035 // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
2036 // errno like libm sqrt would.
2037 case Intrinsic::sqrt:
2038 case Intrinsic::fma:
2039 case Intrinsic::fmuladd:
2041 // TODO: some fp intrinsics are marked as having the same error handling
2042 // as libm. They're safe to speculate when they won't error.
2043 // TODO: are convert_{from,to}_fp16 safe?
2044 // TODO: can we list target-specific intrinsics here?
2048 return false; // The called function could have undefined behavior or
2049 // side-effects, even if marked readnone nounwind.
2051 case Instruction::VAArg:
2052 case Instruction::Alloca:
2053 case Instruction::Invoke:
2054 case Instruction::PHI:
2055 case Instruction::Store:
2056 case Instruction::Ret:
2057 case Instruction::Br:
2058 case Instruction::IndirectBr:
2059 case Instruction::Switch:
2060 case Instruction::Unreachable:
2061 case Instruction::Fence:
2062 case Instruction::LandingPad:
2063 case Instruction::AtomicRMW:
2064 case Instruction::AtomicCmpXchg:
2065 case Instruction::Resume:
2066 return false; // Misc instructions which have effects
2070 /// isKnownNonNull - Return true if we know that the specified value is never
2072 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
2073 // Alloca never returns null, malloc might.
2074 if (isa<AllocaInst>(V)) return true;
2076 // A byval, inalloca, or nonnull argument is never null.
2077 if (const Argument *A = dyn_cast<Argument>(V))
2078 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
2080 // Global values are not null unless extern weak.
2081 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2082 return !GV->hasExternalWeakLinkage();
2084 if (ImmutableCallSite CS = V)
2085 if (CS.isReturnNonNull())
2088 // operator new never returns null.
2089 if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))