1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/Constants.h"
17 #include "llvm/Instructions.h"
18 #include "llvm/GlobalVariable.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/Target/TargetData.h"
21 #include "llvm/Support/GetElementPtrTypeIterator.h"
22 #include "llvm/Support/MathExtras.h"
26 /// getOpcode - If this is an Instruction or a ConstantExpr, return the
27 /// opcode value. Otherwise return UserOp1.
28 static unsigned getOpcode(const Value *V) {
29 if (const Instruction *I = dyn_cast<Instruction>(V))
30 return I->getOpcode();
31 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
32 return CE->getOpcode();
33 // Use UserOp1 to mean there's no opcode.
34 return Instruction::UserOp1;
38 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
39 /// known to be either zero or one and return them in the KnownZero/KnownOne
40 /// bit sets. This code only analyzes bits in Mask, in order to short-circuit
42 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
43 /// we cannot optimize based on the assumption that it is zero without changing
44 /// it to be an explicit zero. If we don't change it to zero, other code could
45 /// optimized based on the contradictory assumption that it is non-zero.
46 /// Because instcombine aggressively folds operations with undef args anyway,
47 /// this won't lose us code quality.
48 void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
49 APInt &KnownZero, APInt &KnownOne,
50 TargetData *TD, unsigned Depth) {
51 assert(V && "No Value?");
52 assert(Depth <= 6 && "Limit Search Depth");
53 uint32_t BitWidth = Mask.getBitWidth();
54 assert((V->getType()->isInteger() || isa<PointerType>(V->getType())) &&
55 "Not integer or pointer type!");
56 assert((!TD || TD->getTypeSizeInBits(V->getType()) == BitWidth) &&
57 (!isa<IntegerType>(V->getType()) ||
58 V->getType()->getPrimitiveSizeInBits() == BitWidth) &&
59 KnownZero.getBitWidth() == BitWidth &&
60 KnownOne.getBitWidth() == BitWidth &&
61 "V, Mask, KnownOne and KnownZero should have same BitWidth");
63 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
64 // We know all of the bits for a constant!
65 KnownOne = CI->getValue() & Mask;
66 KnownZero = ~KnownOne & Mask;
70 if (isa<ConstantPointerNull>(V)) {
75 // The address of an aligned GlobalValue has trailing zeros.
76 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
77 unsigned Align = GV->getAlignment();
78 if (Align == 0 && TD && GV->getType()->getElementType()->isSized())
79 Align = TD->getPrefTypeAlignment(GV->getType()->getElementType());
81 KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
82 CountTrailingZeros_32(Align));
89 KnownZero.clear(); KnownOne.clear(); // Start out not knowing anything.
91 if (Depth == 6 || Mask == 0)
92 return; // Limit search depth.
94 User *I = dyn_cast<User>(V);
97 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
98 switch (getOpcode(I)) {
100 case Instruction::And: {
101 // If either the LHS or the RHS are Zero, the result is zero.
102 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
103 APInt Mask2(Mask & ~KnownZero);
104 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
106 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
107 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
109 // Output known-1 bits are only known if set in both the LHS & RHS.
110 KnownOne &= KnownOne2;
111 // Output known-0 are known to be clear if zero in either the LHS | RHS.
112 KnownZero |= KnownZero2;
115 case Instruction::Or: {
116 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
117 APInt Mask2(Mask & ~KnownOne);
118 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
120 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
121 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
123 // Output known-0 bits are only known if clear in both the LHS & RHS.
124 KnownZero &= KnownZero2;
125 // Output known-1 are known to be set if set in either the LHS | RHS.
126 KnownOne |= KnownOne2;
129 case Instruction::Xor: {
130 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
131 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, TD,
133 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
134 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
136 // Output known-0 bits are known if clear or set in both the LHS & RHS.
137 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
138 // Output known-1 are known to be set if set in only one of the LHS, RHS.
139 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
140 KnownZero = KnownZeroOut;
143 case Instruction::Mul: {
144 APInt Mask2 = APInt::getAllOnesValue(BitWidth);
145 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, TD,Depth+1);
146 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
148 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
149 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
151 // If low bits are zero in either operand, output low known-0 bits.
152 // Also compute a conserative estimate for high known-0 bits.
153 // More trickiness is possible, but this is sufficient for the
154 // interesting case of alignment computation.
156 unsigned TrailZ = KnownZero.countTrailingOnes() +
157 KnownZero2.countTrailingOnes();
158 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
159 KnownZero2.countLeadingOnes(),
160 BitWidth) - BitWidth;
162 TrailZ = std::min(TrailZ, BitWidth);
163 LeadZ = std::min(LeadZ, BitWidth);
164 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
165 APInt::getHighBitsSet(BitWidth, LeadZ);
169 case Instruction::UDiv: {
170 // For the purposes of computing leading zeros we can conservatively
171 // treat a udiv as a logical right shift by the power of 2 known to
172 // be less than the denominator.
173 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
174 ComputeMaskedBits(I->getOperand(0),
175 AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
176 unsigned LeadZ = KnownZero2.countLeadingOnes();
180 ComputeMaskedBits(I->getOperand(1),
181 AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
182 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
183 if (RHSUnknownLeadingOnes != BitWidth)
184 LeadZ = std::min(BitWidth,
185 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
187 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
190 case Instruction::Select:
191 ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, TD, Depth+1);
192 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, TD,
194 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
195 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
197 // Only known if known in both the LHS and RHS.
198 KnownOne &= KnownOne2;
199 KnownZero &= KnownZero2;
201 case Instruction::FPTrunc:
202 case Instruction::FPExt:
203 case Instruction::FPToUI:
204 case Instruction::FPToSI:
205 case Instruction::SIToFP:
206 case Instruction::UIToFP:
207 return; // Can't work with floating point.
208 case Instruction::PtrToInt:
209 case Instruction::IntToPtr:
210 // We can't handle these if we don't know the pointer size.
212 // FALL THROUGH and handle them the same as zext/trunc.
213 case Instruction::ZExt:
214 case Instruction::Trunc: {
215 // Note that we handle pointer operands here because of inttoptr/ptrtoint
216 // which fall through here.
217 const Type *SrcTy = I->getOperand(0)->getType();
218 uint32_t SrcBitWidth = TD ?
219 TD->getTypeSizeInBits(SrcTy) :
220 SrcTy->getPrimitiveSizeInBits();
222 MaskIn.zextOrTrunc(SrcBitWidth);
223 KnownZero.zextOrTrunc(SrcBitWidth);
224 KnownOne.zextOrTrunc(SrcBitWidth);
225 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
227 KnownZero.zextOrTrunc(BitWidth);
228 KnownOne.zextOrTrunc(BitWidth);
229 // Any top bits are known to be zero.
230 if (BitWidth > SrcBitWidth)
231 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
234 case Instruction::BitCast: {
235 const Type *SrcTy = I->getOperand(0)->getType();
236 if (SrcTy->isInteger() || isa<PointerType>(SrcTy)) {
237 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD,
243 case Instruction::SExt: {
244 // Compute the bits in the result that are not present in the input.
245 const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType());
246 uint32_t SrcBitWidth = SrcTy->getBitWidth();
249 MaskIn.trunc(SrcBitWidth);
250 KnownZero.trunc(SrcBitWidth);
251 KnownOne.trunc(SrcBitWidth);
252 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
254 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
255 KnownZero.zext(BitWidth);
256 KnownOne.zext(BitWidth);
258 // If the sign bit of the input is known set or clear, then we know the
259 // top bits of the result.
260 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero
261 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
262 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
263 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
266 case Instruction::Shl:
267 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
268 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
269 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
270 APInt Mask2(Mask.lshr(ShiftAmt));
271 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
273 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
274 KnownZero <<= ShiftAmt;
275 KnownOne <<= ShiftAmt;
276 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
280 case Instruction::LShr:
281 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
282 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
283 // Compute the new bits that are at the top now.
284 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
286 // Unsigned shift right.
287 APInt Mask2(Mask.shl(ShiftAmt));
288 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD,
290 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
291 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
292 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
293 // high bits known zero.
294 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
298 case Instruction::AShr:
299 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
300 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
301 // Compute the new bits that are at the top now.
302 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
304 // Signed shift right.
305 APInt Mask2(Mask.shl(ShiftAmt));
306 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
308 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
309 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
310 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
312 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
313 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero.
314 KnownZero |= HighBits;
315 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one.
316 KnownOne |= HighBits;
320 case Instruction::Sub: {
321 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(I->getOperand(0))) {
322 // We know that the top bits of C-X are clear if X contains less bits
323 // than C (i.e. no wrap-around can happen). For example, 20-X is
324 // positive if we can prove that X is >= 0 and < 16.
325 if (!CLHS->getValue().isNegative()) {
326 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
327 // NLZ can't be BitWidth with no sign bit
328 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
329 ComputeMaskedBits(I->getOperand(1), MaskV, KnownZero2, KnownOne2,
332 // If all of the MaskV bits are known to be zero, then we know the
333 // output top bits are zero, because we now know that the output is
335 if ((KnownZero2 & MaskV) == MaskV) {
336 unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
337 // Top bits known zero.
338 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
344 case Instruction::Add: {
345 // Output known-0 bits are known if clear or set in both the low clear bits
346 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
348 APInt Mask2 = APInt::getLowBitsSet(BitWidth, Mask.countTrailingOnes());
349 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
351 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
352 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
354 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero2, KnownOne2, TD,
356 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
357 KnownZeroOut = std::min(KnownZeroOut,
358 KnownZero2.countTrailingOnes());
360 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
363 case Instruction::SRem:
364 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
365 APInt RA = Rem->getValue();
366 if (RA.isPowerOf2() || (-RA).isPowerOf2()) {
367 APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) : ~RA;
368 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
369 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
372 // If the sign bit of the first operand is zero, the sign bit of
373 // the result is zero. If the first operand has no one bits below
374 // the second operand's single 1 bit, its sign will be zero.
375 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
376 KnownZero2 |= ~LowBits;
378 KnownZero |= KnownZero2 & Mask;
380 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
384 case Instruction::URem: {
385 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
386 APInt RA = Rem->getValue();
387 if (RA.isPowerOf2()) {
388 APInt LowBits = (RA - 1);
389 APInt Mask2 = LowBits & Mask;
390 KnownZero |= ~LowBits & Mask;
391 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
393 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
398 // Since the result is less than or equal to either operand, any leading
399 // zero bits in either operand must also exist in the result.
400 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
401 ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne,
403 ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2,
406 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
407 KnownZero2.countLeadingOnes());
409 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
413 case Instruction::Alloca:
414 case Instruction::Malloc: {
415 AllocationInst *AI = cast<AllocationInst>(V);
416 unsigned Align = AI->getAlignment();
417 if (Align == 0 && TD) {
418 if (isa<AllocaInst>(AI))
419 Align = TD->getPrefTypeAlignment(AI->getType()->getElementType());
420 else if (isa<MallocInst>(AI)) {
421 // Malloc returns maximally aligned memory.
422 Align = TD->getABITypeAlignment(AI->getType()->getElementType());
425 (unsigned)TD->getABITypeAlignment(Type::DoubleTy));
428 (unsigned)TD->getABITypeAlignment(Type::Int64Ty));
433 KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
434 CountTrailingZeros_32(Align));
437 case Instruction::GetElementPtr: {
438 // Analyze all of the subscripts of this getelementptr instruction
439 // to determine if we can prove known low zero bits.
440 APInt LocalMask = APInt::getAllOnesValue(BitWidth);
441 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
442 ComputeMaskedBits(I->getOperand(0), LocalMask,
443 LocalKnownZero, LocalKnownOne, TD, Depth+1);
444 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
446 gep_type_iterator GTI = gep_type_begin(I);
447 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
448 Value *Index = I->getOperand(i);
449 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
450 // Handle struct member offset arithmetic.
452 const StructLayout *SL = TD->getStructLayout(STy);
453 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
454 uint64_t Offset = SL->getElementOffset(Idx);
455 TrailZ = std::min(TrailZ,
456 CountTrailingZeros_64(Offset));
458 // Handle array index arithmetic.
459 const Type *IndexedTy = GTI.getIndexedType();
460 if (!IndexedTy->isSized()) return;
461 unsigned GEPOpiBits = Index->getType()->getPrimitiveSizeInBits();
462 uint64_t TypeSize = TD ? TD->getABITypeSize(IndexedTy) : 1;
463 LocalMask = APInt::getAllOnesValue(GEPOpiBits);
464 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
465 ComputeMaskedBits(Index, LocalMask,
466 LocalKnownZero, LocalKnownOne, TD, Depth+1);
467 TrailZ = std::min(TrailZ,
468 CountTrailingZeros_64(TypeSize) +
469 LocalKnownZero.countTrailingOnes());
473 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask;
476 case Instruction::PHI: {
477 PHINode *P = cast<PHINode>(I);
478 // Handle the case of a simple two-predecessor recurrence PHI.
479 // There's a lot more that could theoretically be done here, but
480 // this is sufficient to catch some interesting cases.
481 if (P->getNumIncomingValues() == 2) {
482 for (unsigned i = 0; i != 2; ++i) {
483 Value *L = P->getIncomingValue(i);
484 Value *R = P->getIncomingValue(!i);
485 User *LU = dyn_cast<User>(L);
488 unsigned Opcode = getOpcode(LU);
489 // Check for operations that have the property that if
490 // both their operands have low zero bits, the result
491 // will have low zero bits.
492 if (Opcode == Instruction::Add ||
493 Opcode == Instruction::Sub ||
494 Opcode == Instruction::And ||
495 Opcode == Instruction::Or ||
496 Opcode == Instruction::Mul) {
497 Value *LL = LU->getOperand(0);
498 Value *LR = LU->getOperand(1);
499 // Find a recurrence.
506 // Ok, we have a PHI of the form L op= R. Check for low
508 APInt Mask2 = APInt::getAllOnesValue(BitWidth);
509 ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, TD, Depth+1);
510 Mask2 = APInt::getLowBitsSet(BitWidth,
511 KnownZero2.countTrailingOnes());
514 ComputeMaskedBits(L, Mask2, KnownZero2, KnownOne2, TD, Depth+1);
516 APInt::getLowBitsSet(BitWidth,
517 KnownZero2.countTrailingOnes());
524 case Instruction::Call:
525 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
526 switch (II->getIntrinsicID()) {
528 case Intrinsic::ctpop:
529 case Intrinsic::ctlz:
530 case Intrinsic::cttz: {
531 unsigned LowBits = Log2_32(BitWidth)+1;
532 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
541 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
542 /// this predicate to simplify operations downstream. Mask is known to be zero
543 /// for bits that V cannot have.
544 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
545 TargetData *TD, unsigned Depth) {
546 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
547 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
548 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
549 return (KnownZero & Mask) == Mask;
554 /// ComputeNumSignBits - Return the number of times the sign bit of the
555 /// register is replicated into the other bits. We know that at least 1 bit
556 /// is always equal to the sign bit (itself), but other cases can give us
557 /// information. For example, immediately after an "ashr X, 2", we know that
558 /// the top 3 bits are all equal to each other, so we return 3.
560 /// 'Op' must have a scalar integer type.
562 unsigned llvm::ComputeNumSignBits(Value *V, TargetData *TD, unsigned Depth) {
563 const IntegerType *Ty = cast<IntegerType>(V->getType());
564 unsigned TyBits = Ty->getBitWidth();
566 unsigned FirstAnswer = 1;
568 // Note that ConstantInt is handled by the general ComputeMaskedBits case
572 return 1; // Limit search depth.
574 User *U = dyn_cast<User>(V);
575 switch (getOpcode(V)) {
577 case Instruction::SExt:
578 Tmp = TyBits-cast<IntegerType>(U->getOperand(0)->getType())->getBitWidth();
579 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
581 case Instruction::AShr:
582 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
583 // ashr X, C -> adds C sign bits.
584 if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
585 Tmp += C->getZExtValue();
586 if (Tmp > TyBits) Tmp = TyBits;
589 case Instruction::Shl:
590 if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
591 // shl destroys sign bits.
592 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
593 if (C->getZExtValue() >= TyBits || // Bad shift.
594 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
595 return Tmp - C->getZExtValue();
598 case Instruction::And:
599 case Instruction::Or:
600 case Instruction::Xor: // NOT is handled here.
601 // Logical binary ops preserve the number of sign bits at the worst.
602 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
604 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
605 FirstAnswer = std::min(Tmp, Tmp2);
606 // We computed what we know about the sign bits as our first
607 // answer. Now proceed to the generic code that uses
608 // ComputeMaskedBits, and pick whichever answer is better.
612 case Instruction::Select:
613 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
614 if (Tmp == 1) return 1; // Early out.
615 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1);
616 return std::min(Tmp, Tmp2);
618 case Instruction::Add:
619 // Add can have at most one carry bit. Thus we know that the output
620 // is, at worst, one more bit than the inputs.
621 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
622 if (Tmp == 1) return 1; // Early out.
624 // Special case decrementing a value (ADD X, -1):
625 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(0)))
626 if (CRHS->isAllOnesValue()) {
627 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
628 APInt Mask = APInt::getAllOnesValue(TyBits);
629 ComputeMaskedBits(U->getOperand(0), Mask, KnownZero, KnownOne, TD,
632 // If the input is known to be 0 or 1, the output is 0/-1, which is all
634 if ((KnownZero | APInt(TyBits, 1)) == Mask)
637 // If we are subtracting one from a positive number, there is no carry
638 // out of the result.
639 if (KnownZero.isNegative())
643 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
644 if (Tmp2 == 1) return 1;
645 return std::min(Tmp, Tmp2)-1;
648 case Instruction::Sub:
649 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
650 if (Tmp2 == 1) return 1;
653 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
654 if (CLHS->isNullValue()) {
655 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
656 APInt Mask = APInt::getAllOnesValue(TyBits);
657 ComputeMaskedBits(U->getOperand(1), Mask, KnownZero, KnownOne,
659 // If the input is known to be 0 or 1, the output is 0/-1, which is all
661 if ((KnownZero | APInt(TyBits, 1)) == Mask)
664 // If the input is known to be positive (the sign bit is known clear),
665 // the output of the NEG has the same number of sign bits as the input.
666 if (KnownZero.isNegative())
669 // Otherwise, we treat this like a SUB.
672 // Sub can have at most one carry bit. Thus we know that the output
673 // is, at worst, one more bit than the inputs.
674 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
675 if (Tmp == 1) return 1; // Early out.
676 return std::min(Tmp, Tmp2)-1;
678 case Instruction::Trunc:
679 // FIXME: it's tricky to do anything useful for this, but it is an important
680 // case for targets like X86.
684 // Finally, if we can prove that the top bits of the result are 0's or 1's,
685 // use this information.
686 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
687 APInt Mask = APInt::getAllOnesValue(TyBits);
688 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
690 if (KnownZero.isNegative()) { // sign bit is 0
692 } else if (KnownOne.isNegative()) { // sign bit is 1;
699 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
700 // the number of identical bits in the top of the input value.
702 Mask <<= Mask.getBitWidth()-TyBits;
703 // Return # leading zeros. We use 'min' here in case Val was zero before
704 // shifting. We don't want to return '64' as for an i32 "0".
705 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
708 /// CannotBeNegativeZero - Return true if we can prove that the specified FP
709 /// value is never equal to -0.0.
711 /// NOTE: this function will need to be revisited when we support non-default
714 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
715 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
716 return !CFP->getValueAPF().isNegZero();
719 return 1; // Limit search depth.
721 const Instruction *I = dyn_cast<Instruction>(V);
722 if (I == 0) return false;
724 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
725 if (I->getOpcode() == Instruction::Add &&
726 isa<ConstantFP>(I->getOperand(1)) &&
727 cast<ConstantFP>(I->getOperand(1))->isNullValue())
730 // sitofp and uitofp turn into +0.0 for zero.
731 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
734 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
735 // sqrt(-0.0) = -0.0, no other negative results are possible.
736 if (II->getIntrinsicID() == Intrinsic::sqrt)
737 return CannotBeNegativeZero(II->getOperand(1), Depth+1);
739 if (const CallInst *CI = dyn_cast<CallInst>(I))
740 if (const Function *F = CI->getCalledFunction()) {
741 if (F->isDeclaration()) {
742 switch (F->getNameLen()) {
743 case 3: // abs(x) != -0.0
744 if (!strcmp(F->getNameStart(), "abs")) return true;
746 case 4: // abs[lf](x) != -0.0
747 if (!strcmp(F->getNameStart(), "absf")) return true;
748 if (!strcmp(F->getNameStart(), "absl")) return true;
757 // This is the recursive version of BuildSubAggregate. It takes a few different
758 // arguments. Idxs is the index within the nested struct From that we are
759 // looking at now (which is of type IndexedType). IdxSkip is the number of
760 // indices from Idxs that should be left out when inserting into the resulting
761 // struct. To is the result struct built so far, new insertvalue instructions
763 Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
764 SmallVector<unsigned, 10> &Idxs,
766 Instruction *InsertBefore) {
767 const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
769 // Save the original To argument so we can modify it
771 // General case, the type indexed by Idxs is a struct
772 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
773 // Process each struct element recursively
776 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
780 // Couldn't find any inserted value for this index? Cleanup
781 while (PrevTo != OrigTo) {
782 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
783 PrevTo = Del->getAggregateOperand();
784 Del->eraseFromParent();
786 // Stop processing elements
790 // If we succesfully found a value for each of our subaggregates
794 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
795 // the struct's elements had a value that was inserted directly. In the latter
796 // case, perhaps we can't determine each of the subelements individually, but
797 // we might be able to find the complete struct somewhere.
799 // Find the value that is at that particular spot
800 Value *V = FindInsertedValue(From, Idxs.begin(), Idxs.end());
805 // Insert the value in the new (sub) aggregrate
806 return llvm::InsertValueInst::Create(To, V, Idxs.begin() + IdxSkip,
807 Idxs.end(), "tmp", InsertBefore);
810 // This helper takes a nested struct and extracts a part of it (which is again a
811 // struct) into a new value. For example, given the struct:
812 // { a, { b, { c, d }, e } }
813 // and the indices "1, 1" this returns
816 // It does this by inserting an insertvalue for each element in the resulting
817 // struct, as opposed to just inserting a single struct. This will only work if
818 // each of the elements of the substruct are known (ie, inserted into From by an
819 // insertvalue instruction somewhere).
821 // All inserted insertvalue instructions are inserted before InsertBefore
822 Value *BuildSubAggregate(Value *From, const unsigned *idx_begin,
823 const unsigned *idx_end, Instruction *InsertBefore) {
824 assert(InsertBefore && "Must have someplace to insert!");
825 const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
828 Value *To = UndefValue::get(IndexedType);
829 SmallVector<unsigned, 10> Idxs(idx_begin, idx_end);
830 unsigned IdxSkip = Idxs.size();
832 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
835 /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
836 /// the scalar value indexed is already around as a register, for example if it
837 /// were inserted directly into the aggregrate.
839 /// If InsertBefore is not null, this function will duplicate (modified)
840 /// insertvalues when a part of a nested struct is extracted.
841 Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
842 const unsigned *idx_end, Instruction *InsertBefore) {
843 // Nothing to index? Just return V then (this is useful at the end of our
845 if (idx_begin == idx_end)
847 // We have indices, so V should have an indexable type
848 assert((isa<StructType>(V->getType()) || isa<ArrayType>(V->getType()))
849 && "Not looking at a struct or array?");
850 assert(ExtractValueInst::getIndexedType(V->getType(), idx_begin, idx_end)
851 && "Invalid indices for type?");
852 const CompositeType *PTy = cast<CompositeType>(V->getType());
854 if (isa<UndefValue>(V))
855 return UndefValue::get(ExtractValueInst::getIndexedType(PTy,
858 else if (isa<ConstantAggregateZero>(V))
859 return Constant::getNullValue(ExtractValueInst::getIndexedType(PTy,
862 else if (Constant *C = dyn_cast<Constant>(V)) {
863 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C))
864 // Recursively process this constant
865 return FindInsertedValue(C->getOperand(*idx_begin), idx_begin + 1, idx_end,
867 } else if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
868 // Loop the indices for the insertvalue instruction in parallel with the
870 const unsigned *req_idx = idx_begin;
871 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
872 i != e; ++i, ++req_idx) {
873 if (req_idx == idx_end) {
875 // The requested index identifies a part of a nested aggregate. Handle
876 // this specially. For example,
877 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
878 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
879 // %C = extractvalue {i32, { i32, i32 } } %B, 1
880 // This can be changed into
881 // %A = insertvalue {i32, i32 } undef, i32 10, 0
882 // %C = insertvalue {i32, i32 } %A, i32 11, 1
883 // which allows the unused 0,0 element from the nested struct to be
885 return BuildSubAggregate(V, idx_begin, req_idx, InsertBefore);
887 // We can't handle this without inserting insertvalues
891 // This insert value inserts something else than what we are looking for.
892 // See if the (aggregrate) value inserted into has the value we are
893 // looking for, then.
895 return FindInsertedValue(I->getAggregateOperand(), idx_begin, idx_end,
898 // If we end up here, the indices of the insertvalue match with those
899 // requested (though possibly only partially). Now we recursively look at
900 // the inserted value, passing any remaining indices.
901 return FindInsertedValue(I->getInsertedValueOperand(), req_idx, idx_end,
903 } else if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
904 // If we're extracting a value from an aggregrate that was extracted from
905 // something else, we can extract from that something else directly instead.
906 // However, we will need to chain I's indices with the requested indices.
908 // Calculate the number of indices required
909 unsigned size = I->getNumIndices() + (idx_end - idx_begin);
910 // Allocate some space to put the new indices in
911 SmallVector<unsigned, 5> Idxs;
913 // Add indices from the extract value instruction
914 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
918 // Add requested indices
919 for (const unsigned *i = idx_begin, *e = idx_end; i != e; ++i)
922 assert(Idxs.size() == size
923 && "Number of indices added not correct?");
925 return FindInsertedValue(I->getAggregateOperand(), Idxs.begin(), Idxs.end(),
928 // Otherwise, we don't know (such as, extracting from a function return value
929 // or load instruction)
933 /// GetConstantStringInfo - This function computes the length of a
934 /// null-terminated C string pointed to by V. If successful, it returns true
935 /// and returns the string in Str. If unsuccessful, it returns false.
936 bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset,
938 // If V is NULL then return false;
939 if (V == NULL) return false;
941 // Look through bitcast instructions.
942 if (BitCastInst *BCI = dyn_cast<BitCastInst>(V))
943 return GetConstantStringInfo(BCI->getOperand(0), Str, Offset, StopAtNul);
945 // If the value is not a GEP instruction nor a constant expression with a
946 // GEP instruction, then return false because ConstantArray can't occur
949 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
951 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
952 if (CE->getOpcode() == Instruction::BitCast)
953 return GetConstantStringInfo(CE->getOperand(0), Str, Offset, StopAtNul);
954 if (CE->getOpcode() != Instruction::GetElementPtr)
960 // Make sure the GEP has exactly three arguments.
961 if (GEP->getNumOperands() != 3)
964 // Make sure the index-ee is a pointer to array of i8.
965 const PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
966 const ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
967 if (AT == 0 || AT->getElementType() != Type::Int8Ty)
970 // Check to make sure that the first operand of the GEP is an integer and
971 // has value 0 so that we are sure we're indexing into the initializer.
972 ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
973 if (FirstIdx == 0 || !FirstIdx->isZero())
976 // If the second index isn't a ConstantInt, then this is a variable index
977 // into the array. If this occurs, we can't say anything meaningful about
979 uint64_t StartIdx = 0;
980 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
981 StartIdx = CI->getZExtValue();
984 return GetConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset,
988 // The GEP instruction, constant or instruction, must reference a global
989 // variable that is a constant and is initialized. The referenced constant
990 // initializer is the array that we'll use for optimization.
991 GlobalVariable* GV = dyn_cast<GlobalVariable>(V);
992 if (!GV || !GV->isConstant() || !GV->hasInitializer())
994 Constant *GlobalInit = GV->getInitializer();
996 // Handle the ConstantAggregateZero case
997 if (isa<ConstantAggregateZero>(GlobalInit)) {
998 // This is a degenerate case. The initializer is constant zero so the
999 // length of the string must be zero.
1004 // Must be a Constant Array
1005 ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
1006 if (Array == 0 || Array->getType()->getElementType() != Type::Int8Ty)
1009 // Get the number of elements in the array
1010 uint64_t NumElts = Array->getType()->getNumElements();
1012 if (Offset > NumElts)
1015 // Traverse the constant array from 'Offset' which is the place the GEP refers
1017 Str.reserve(NumElts-Offset);
1018 for (unsigned i = Offset; i != NumElts; ++i) {
1019 Constant *Elt = Array->getOperand(i);
1020 ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
1021 if (!CI) // This array isn't suitable, non-int initializer.
1023 if (StopAtNul && CI->isZero())
1024 return true; // we found end of string, success!
1025 Str += (char)CI->getZExtValue();
1028 // The array isn't null terminated, but maybe this is a memcpy, not a strcpy.