1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains routines that help analyze properties that chains of
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/Constants.h"
17 #include "llvm/Instructions.h"
18 #include "llvm/GlobalVariable.h"
19 #include "llvm/GlobalAlias.h"
20 #include "llvm/IntrinsicInst.h"
21 #include "llvm/LLVMContext.h"
22 #include "llvm/Operator.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Support/GetElementPtrTypeIterator.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/ADT/SmallPtrSet.h"
30 /// ComputeMaskedBits - Determine which of the bits specified in Mask are
31 /// known to be either zero or one and return them in the KnownZero/KnownOne
32 /// bit sets. This code only analyzes bits in Mask, in order to short-circuit
34 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
35 /// we cannot optimize based on the assumption that it is zero without changing
36 /// it to be an explicit zero. If we don't change it to zero, other code could
37 /// optimized based on the contradictory assumption that it is non-zero.
38 /// Because instcombine aggressively folds operations with undef args anyway,
39 /// this won't lose us code quality.
41 /// This function is defined on values with integer type, values with pointer
42 /// type (but only if TD is non-null), and vectors of integers. In the case
43 /// where V is a vector, the mask, known zero, and known one values are the
44 /// same width as the vector element, and the bit is set only if it is true
45 /// for all of the elements in the vector.
46 void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
47 APInt &KnownZero, APInt &KnownOne,
48 const TargetData *TD, unsigned Depth) {
49 const unsigned MaxDepth = 6;
50 assert(V && "No Value?");
51 assert(Depth <= MaxDepth && "Limit Search Depth");
52 unsigned BitWidth = Mask.getBitWidth();
53 assert((V->getType()->isIntOrIntVectorTy() || V->getType()->isPointerTy())
54 && "Not integer or pointer type!");
56 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
57 (!V->getType()->isIntOrIntVectorTy() ||
58 V->getType()->getScalarSizeInBits() == BitWidth) &&
59 KnownZero.getBitWidth() == BitWidth &&
60 KnownOne.getBitWidth() == BitWidth &&
61 "V, Mask, KnownOne and KnownZero should have same BitWidth");
63 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
64 // We know all of the bits for a constant!
65 KnownOne = CI->getValue() & Mask;
66 KnownZero = ~KnownOne & Mask;
69 // Null and aggregate-zero are all-zeros.
70 if (isa<ConstantPointerNull>(V) ||
71 isa<ConstantAggregateZero>(V)) {
76 // Handle a constant vector by taking the intersection of the known bits of
78 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
79 KnownZero.set(); KnownOne.set();
80 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
81 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
82 ComputeMaskedBits(CV->getOperand(i), Mask, KnownZero2, KnownOne2,
84 KnownZero &= KnownZero2;
85 KnownOne &= KnownOne2;
89 // The address of an aligned GlobalValue has trailing zeros.
90 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
91 unsigned Align = GV->getAlignment();
92 if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) {
93 const Type *ObjectType = GV->getType()->getElementType();
94 // If the object is defined in the current Module, we'll be giving
95 // it the preferred alignment. Otherwise, we have to assume that it
96 // may only have the minimum ABI alignment.
97 if (!GV->isDeclaration() && !GV->mayBeOverridden())
98 Align = TD->getPrefTypeAlignment(ObjectType);
100 Align = TD->getABITypeAlignment(ObjectType);
103 KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
104 CountTrailingZeros_32(Align));
110 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
111 // the bits of its aliasee.
112 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
113 if (GA->mayBeOverridden()) {
114 KnownZero.clear(); KnownOne.clear();
116 ComputeMaskedBits(GA->getAliasee(), Mask, KnownZero, KnownOne,
122 KnownZero.clear(); KnownOne.clear(); // Start out not knowing anything.
124 if (Depth == MaxDepth || Mask == 0)
125 return; // Limit search depth.
127 Operator *I = dyn_cast<Operator>(V);
130 APInt KnownZero2(KnownZero), KnownOne2(KnownOne);
131 switch (I->getOpcode()) {
133 case Instruction::And: {
134 // If either the LHS or the RHS are Zero, the result is zero.
135 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
136 APInt Mask2(Mask & ~KnownZero);
137 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
139 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
140 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
142 // Output known-1 bits are only known if set in both the LHS & RHS.
143 KnownOne &= KnownOne2;
144 // Output known-0 are known to be clear if zero in either the LHS | RHS.
145 KnownZero |= KnownZero2;
148 case Instruction::Or: {
149 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
150 APInt Mask2(Mask & ~KnownOne);
151 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
153 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
154 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
156 // Output known-0 bits are only known if clear in both the LHS & RHS.
157 KnownZero &= KnownZero2;
158 // Output known-1 are known to be set if set in either the LHS | RHS.
159 KnownOne |= KnownOne2;
162 case Instruction::Xor: {
163 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1);
164 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, TD,
166 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
167 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
169 // Output known-0 bits are known if clear or set in both the LHS & RHS.
170 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
171 // Output known-1 are known to be set if set in only one of the LHS, RHS.
172 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
173 KnownZero = KnownZeroOut;
176 case Instruction::Mul: {
177 APInt Mask2 = APInt::getAllOnesValue(BitWidth);
178 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, TD,Depth+1);
179 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
181 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
182 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
184 // If low bits are zero in either operand, output low known-0 bits.
185 // Also compute a conserative estimate for high known-0 bits.
186 // More trickiness is possible, but this is sufficient for the
187 // interesting case of alignment computation.
189 unsigned TrailZ = KnownZero.countTrailingOnes() +
190 KnownZero2.countTrailingOnes();
191 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
192 KnownZero2.countLeadingOnes(),
193 BitWidth) - BitWidth;
195 TrailZ = std::min(TrailZ, BitWidth);
196 LeadZ = std::min(LeadZ, BitWidth);
197 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
198 APInt::getHighBitsSet(BitWidth, LeadZ);
202 case Instruction::UDiv: {
203 // For the purposes of computing leading zeros we can conservatively
204 // treat a udiv as a logical right shift by the power of 2 known to
205 // be less than the denominator.
206 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
207 ComputeMaskedBits(I->getOperand(0),
208 AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
209 unsigned LeadZ = KnownZero2.countLeadingOnes();
213 ComputeMaskedBits(I->getOperand(1),
214 AllOnes, KnownZero2, KnownOne2, TD, Depth+1);
215 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
216 if (RHSUnknownLeadingOnes != BitWidth)
217 LeadZ = std::min(BitWidth,
218 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
220 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask;
223 case Instruction::Select:
224 ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, TD, Depth+1);
225 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, TD,
227 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
228 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
230 // Only known if known in both the LHS and RHS.
231 KnownOne &= KnownOne2;
232 KnownZero &= KnownZero2;
234 case Instruction::FPTrunc:
235 case Instruction::FPExt:
236 case Instruction::FPToUI:
237 case Instruction::FPToSI:
238 case Instruction::SIToFP:
239 case Instruction::UIToFP:
240 return; // Can't work with floating point.
241 case Instruction::PtrToInt:
242 case Instruction::IntToPtr:
243 // We can't handle these if we don't know the pointer size.
245 // FALL THROUGH and handle them the same as zext/trunc.
246 case Instruction::ZExt:
247 case Instruction::Trunc: {
248 const Type *SrcTy = I->getOperand(0)->getType();
250 unsigned SrcBitWidth;
251 // Note that we handle pointer operands here because of inttoptr/ptrtoint
252 // which fall through here.
253 if (SrcTy->isPointerTy())
254 SrcBitWidth = TD->getTypeSizeInBits(SrcTy);
256 SrcBitWidth = SrcTy->getScalarSizeInBits();
259 MaskIn.zextOrTrunc(SrcBitWidth);
260 KnownZero.zextOrTrunc(SrcBitWidth);
261 KnownOne.zextOrTrunc(SrcBitWidth);
262 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
264 KnownZero.zextOrTrunc(BitWidth);
265 KnownOne.zextOrTrunc(BitWidth);
266 // Any top bits are known to be zero.
267 if (BitWidth > SrcBitWidth)
268 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
271 case Instruction::BitCast: {
272 const Type *SrcTy = I->getOperand(0)->getType();
273 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
274 // TODO: For now, not handling conversions like:
275 // (bitcast i64 %x to <2 x i32>)
276 !I->getType()->isVectorTy()) {
277 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD,
283 case Instruction::SExt: {
284 // Compute the bits in the result that are not present in the input.
285 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
288 MaskIn.trunc(SrcBitWidth);
289 KnownZero.trunc(SrcBitWidth);
290 KnownOne.trunc(SrcBitWidth);
291 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD,
293 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
294 KnownZero.zext(BitWidth);
295 KnownOne.zext(BitWidth);
297 // If the sign bit of the input is known set or clear, then we know the
298 // top bits of the result.
299 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero
300 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
301 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set
302 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
305 case Instruction::Shl:
306 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
307 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
308 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
309 APInt Mask2(Mask.lshr(ShiftAmt));
310 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
312 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
313 KnownZero <<= ShiftAmt;
314 KnownOne <<= ShiftAmt;
315 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
319 case Instruction::LShr:
320 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
321 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
322 // Compute the new bits that are at the top now.
323 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
325 // Unsigned shift right.
326 APInt Mask2(Mask.shl(ShiftAmt));
327 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD,
329 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
330 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
331 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
332 // high bits known zero.
333 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt);
337 case Instruction::AShr:
338 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
339 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
340 // Compute the new bits that are at the top now.
341 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
343 // Signed shift right.
344 APInt Mask2(Mask.shl(ShiftAmt));
345 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
347 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
348 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
349 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
351 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
352 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero.
353 KnownZero |= HighBits;
354 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one.
355 KnownOne |= HighBits;
359 case Instruction::Sub: {
360 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(I->getOperand(0))) {
361 // We know that the top bits of C-X are clear if X contains less bits
362 // than C (i.e. no wrap-around can happen). For example, 20-X is
363 // positive if we can prove that X is >= 0 and < 16.
364 if (!CLHS->getValue().isNegative()) {
365 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros();
366 // NLZ can't be BitWidth with no sign bit
367 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
368 ComputeMaskedBits(I->getOperand(1), MaskV, KnownZero2, KnownOne2,
371 // If all of the MaskV bits are known to be zero, then we know the
372 // output top bits are zero, because we now know that the output is
374 if ((KnownZero2 & MaskV) == MaskV) {
375 unsigned NLZ2 = CLHS->getValue().countLeadingZeros();
376 // Top bits known zero.
377 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask;
383 case Instruction::Add: {
384 // If one of the operands has trailing zeros, then the bits that the
385 // other operand has in those bit positions will be preserved in the
386 // result. For an add, this works with either operand. For a subtract,
387 // this only works if the known zeros are in the right operand.
388 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
389 APInt Mask2 = APInt::getLowBitsSet(BitWidth,
390 BitWidth - Mask.countLeadingZeros());
391 ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne, TD,
393 assert((LHSKnownZero & LHSKnownOne) == 0 &&
394 "Bits known to be one AND zero?");
395 unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes();
397 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero2, KnownOne2, TD,
399 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
400 unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes();
402 // Determine which operand has more trailing zeros, and use that
403 // many bits from the other operand.
404 if (LHSKnownZeroOut > RHSKnownZeroOut) {
405 if (I->getOpcode() == Instruction::Add) {
406 APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut);
407 KnownZero |= KnownZero2 & Mask;
408 KnownOne |= KnownOne2 & Mask;
410 // If the known zeros are in the left operand for a subtract,
411 // fall back to the minimum known zeros in both operands.
412 KnownZero |= APInt::getLowBitsSet(BitWidth,
413 std::min(LHSKnownZeroOut,
416 } else if (RHSKnownZeroOut >= LHSKnownZeroOut) {
417 APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut);
418 KnownZero |= LHSKnownZero & Mask;
419 KnownOne |= LHSKnownOne & Mask;
423 case Instruction::SRem:
424 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
425 APInt RA = Rem->getValue().abs();
426 if (RA.isPowerOf2()) {
427 APInt LowBits = RA - 1;
428 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
429 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD,
432 // The low bits of the first operand are unchanged by the srem.
433 KnownZero = KnownZero2 & LowBits;
434 KnownOne = KnownOne2 & LowBits;
436 // If the first operand is non-negative or has all low bits zero, then
437 // the upper bits are all zero.
438 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
439 KnownZero |= ~LowBits;
441 // If the first operand is negative and not all low bits are zero, then
442 // the upper bits are all one.
443 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
444 KnownOne |= ~LowBits;
449 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
453 case Instruction::URem: {
454 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
455 APInt RA = Rem->getValue();
456 if (RA.isPowerOf2()) {
457 APInt LowBits = (RA - 1);
458 APInt Mask2 = LowBits & Mask;
459 KnownZero |= ~LowBits & Mask;
460 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD,
462 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
467 // Since the result is less than or equal to either operand, any leading
468 // zero bits in either operand must also exist in the result.
469 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
470 ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne,
472 ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2,
475 unsigned Leaders = std::max(KnownZero.countLeadingOnes(),
476 KnownZero2.countLeadingOnes());
478 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask;
482 case Instruction::Alloca: {
483 AllocaInst *AI = cast<AllocaInst>(V);
484 unsigned Align = AI->getAlignment();
485 if (Align == 0 && TD)
486 Align = TD->getABITypeAlignment(AI->getType()->getElementType());
489 KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
490 CountTrailingZeros_32(Align));
493 case Instruction::GetElementPtr: {
494 // Analyze all of the subscripts of this getelementptr instruction
495 // to determine if we can prove known low zero bits.
496 APInt LocalMask = APInt::getAllOnesValue(BitWidth);
497 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0);
498 ComputeMaskedBits(I->getOperand(0), LocalMask,
499 LocalKnownZero, LocalKnownOne, TD, Depth+1);
500 unsigned TrailZ = LocalKnownZero.countTrailingOnes();
502 gep_type_iterator GTI = gep_type_begin(I);
503 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
504 Value *Index = I->getOperand(i);
505 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
506 // Handle struct member offset arithmetic.
508 const StructLayout *SL = TD->getStructLayout(STy);
509 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
510 uint64_t Offset = SL->getElementOffset(Idx);
511 TrailZ = std::min(TrailZ,
512 CountTrailingZeros_64(Offset));
514 // Handle array index arithmetic.
515 const Type *IndexedTy = GTI.getIndexedType();
516 if (!IndexedTy->isSized()) return;
517 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
518 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
519 LocalMask = APInt::getAllOnesValue(GEPOpiBits);
520 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
521 ComputeMaskedBits(Index, LocalMask,
522 LocalKnownZero, LocalKnownOne, TD, Depth+1);
523 TrailZ = std::min(TrailZ,
524 unsigned(CountTrailingZeros_64(TypeSize) +
525 LocalKnownZero.countTrailingOnes()));
529 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask;
532 case Instruction::PHI: {
533 PHINode *P = cast<PHINode>(I);
534 // Handle the case of a simple two-predecessor recurrence PHI.
535 // There's a lot more that could theoretically be done here, but
536 // this is sufficient to catch some interesting cases.
537 if (P->getNumIncomingValues() == 2) {
538 for (unsigned i = 0; i != 2; ++i) {
539 Value *L = P->getIncomingValue(i);
540 Value *R = P->getIncomingValue(!i);
541 Operator *LU = dyn_cast<Operator>(L);
544 unsigned Opcode = LU->getOpcode();
545 // Check for operations that have the property that if
546 // both their operands have low zero bits, the result
547 // will have low zero bits.
548 if (Opcode == Instruction::Add ||
549 Opcode == Instruction::Sub ||
550 Opcode == Instruction::And ||
551 Opcode == Instruction::Or ||
552 Opcode == Instruction::Mul) {
553 Value *LL = LU->getOperand(0);
554 Value *LR = LU->getOperand(1);
555 // Find a recurrence.
562 // Ok, we have a PHI of the form L op= R. Check for low
564 APInt Mask2 = APInt::getAllOnesValue(BitWidth);
565 ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, TD, Depth+1);
566 Mask2 = APInt::getLowBitsSet(BitWidth,
567 KnownZero2.countTrailingOnes());
569 // We need to take the minimum number of known bits
570 APInt KnownZero3(KnownZero), KnownOne3(KnownOne);
571 ComputeMaskedBits(L, Mask2, KnownZero3, KnownOne3, TD, Depth+1);
574 APInt::getLowBitsSet(BitWidth,
575 std::min(KnownZero2.countTrailingOnes(),
576 KnownZero3.countTrailingOnes()));
582 // Otherwise take the unions of the known bit sets of the operands,
583 // taking conservative care to avoid excessive recursion.
584 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) {
585 KnownZero = APInt::getAllOnesValue(BitWidth);
586 KnownOne = APInt::getAllOnesValue(BitWidth);
587 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) {
588 // Skip direct self references.
589 if (P->getIncomingValue(i) == P) continue;
591 KnownZero2 = APInt(BitWidth, 0);
592 KnownOne2 = APInt(BitWidth, 0);
593 // Recurse, but cap the recursion to one level, because we don't
594 // want to waste time spinning around in loops.
595 ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne,
596 KnownZero2, KnownOne2, TD, MaxDepth-1);
597 KnownZero &= KnownZero2;
598 KnownOne &= KnownOne2;
599 // If all bits have been ruled out, there's no need to check
601 if (!KnownZero && !KnownOne)
607 case Instruction::Call:
608 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
609 switch (II->getIntrinsicID()) {
611 case Intrinsic::ctpop:
612 case Intrinsic::ctlz:
613 case Intrinsic::cttz: {
614 unsigned LowBits = Log2_32(BitWidth)+1;
615 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
624 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
625 /// this predicate to simplify operations downstream. Mask is known to be zero
626 /// for bits that V cannot have.
628 /// This function is defined on values with integer type, values with pointer
629 /// type (but only if TD is non-null), and vectors of integers. In the case
630 /// where V is a vector, the mask, known zero, and known one values are the
631 /// same width as the vector element, and the bit is set only if it is true
632 /// for all of the elements in the vector.
633 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
634 const TargetData *TD, unsigned Depth) {
635 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
636 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
637 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
638 return (KnownZero & Mask) == Mask;
643 /// ComputeNumSignBits - Return the number of times the sign bit of the
644 /// register is replicated into the other bits. We know that at least 1 bit
645 /// is always equal to the sign bit (itself), but other cases can give us
646 /// information. For example, immediately after an "ashr X, 2", we know that
647 /// the top 3 bits are all equal to each other, so we return 3.
649 /// 'Op' must have a scalar integer type.
651 unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD,
653 assert((TD || V->getType()->isIntOrIntVectorTy()) &&
654 "ComputeNumSignBits requires a TargetData object to operate "
655 "on non-integer values!");
656 const Type *Ty = V->getType();
657 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
658 Ty->getScalarSizeInBits();
660 unsigned FirstAnswer = 1;
662 // Note that ConstantInt is handled by the general ComputeMaskedBits case
666 return 1; // Limit search depth.
668 Operator *U = dyn_cast<Operator>(V);
669 switch (Operator::getOpcode(V)) {
671 case Instruction::SExt:
672 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
673 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
675 case Instruction::AShr:
676 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
677 // ashr X, C -> adds C sign bits.
678 if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
679 Tmp += C->getZExtValue();
680 if (Tmp > TyBits) Tmp = TyBits;
683 case Instruction::Shl:
684 if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) {
685 // shl destroys sign bits.
686 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
687 if (C->getZExtValue() >= TyBits || // Bad shift.
688 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
689 return Tmp - C->getZExtValue();
692 case Instruction::And:
693 case Instruction::Or:
694 case Instruction::Xor: // NOT is handled here.
695 // Logical binary ops preserve the number of sign bits at the worst.
696 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
698 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
699 FirstAnswer = std::min(Tmp, Tmp2);
700 // We computed what we know about the sign bits as our first
701 // answer. Now proceed to the generic code that uses
702 // ComputeMaskedBits, and pick whichever answer is better.
706 case Instruction::Select:
707 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
708 if (Tmp == 1) return 1; // Early out.
709 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1);
710 return std::min(Tmp, Tmp2);
712 case Instruction::Add:
713 // Add can have at most one carry bit. Thus we know that the output
714 // is, at worst, one more bit than the inputs.
715 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
716 if (Tmp == 1) return 1; // Early out.
718 // Special case decrementing a value (ADD X, -1):
719 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1)))
720 if (CRHS->isAllOnesValue()) {
721 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
722 APInt Mask = APInt::getAllOnesValue(TyBits);
723 ComputeMaskedBits(U->getOperand(0), Mask, KnownZero, KnownOne, TD,
726 // If the input is known to be 0 or 1, the output is 0/-1, which is all
728 if ((KnownZero | APInt(TyBits, 1)) == Mask)
731 // If we are subtracting one from a positive number, there is no carry
732 // out of the result.
733 if (KnownZero.isNegative())
737 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
738 if (Tmp2 == 1) return 1;
739 return std::min(Tmp, Tmp2)-1;
741 case Instruction::Sub:
742 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
743 if (Tmp2 == 1) return 1;
746 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
747 if (CLHS->isNullValue()) {
748 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
749 APInt Mask = APInt::getAllOnesValue(TyBits);
750 ComputeMaskedBits(U->getOperand(1), Mask, KnownZero, KnownOne,
752 // If the input is known to be 0 or 1, the output is 0/-1, which is all
754 if ((KnownZero | APInt(TyBits, 1)) == Mask)
757 // If the input is known to be positive (the sign bit is known clear),
758 // the output of the NEG has the same number of sign bits as the input.
759 if (KnownZero.isNegative())
762 // Otherwise, we treat this like a SUB.
765 // Sub can have at most one carry bit. Thus we know that the output
766 // is, at worst, one more bit than the inputs.
767 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
768 if (Tmp == 1) return 1; // Early out.
769 return std::min(Tmp, Tmp2)-1;
771 case Instruction::PHI: {
772 PHINode *PN = cast<PHINode>(U);
773 // Don't analyze large in-degree PHIs.
774 if (PN->getNumIncomingValues() > 4) break;
776 // Take the minimum of all incoming values. This can't infinitely loop
777 // because of our depth threshold.
778 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1);
779 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
780 if (Tmp == 1) return Tmp;
782 ComputeNumSignBits(PN->getIncomingValue(i), TD, Depth+1));
787 case Instruction::Trunc:
788 // FIXME: it's tricky to do anything useful for this, but it is an important
789 // case for targets like X86.
793 // Finally, if we can prove that the top bits of the result are 0's or 1's,
794 // use this information.
795 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
796 APInt Mask = APInt::getAllOnesValue(TyBits);
797 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
799 if (KnownZero.isNegative()) { // sign bit is 0
801 } else if (KnownOne.isNegative()) { // sign bit is 1;
808 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
809 // the number of identical bits in the top of the input value.
811 Mask <<= Mask.getBitWidth()-TyBits;
812 // Return # leading zeros. We use 'min' here in case Val was zero before
813 // shifting. We don't want to return '64' as for an i32 "0".
814 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros()));
817 /// ComputeMultiple - This function computes the integer multiple of Base that
818 /// equals V. If successful, it returns true and returns the multiple in
819 /// Multiple. If unsuccessful, it returns false. It looks
820 /// through SExt instructions only if LookThroughSExt is true.
821 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
822 bool LookThroughSExt, unsigned Depth) {
823 const unsigned MaxDepth = 6;
825 assert(V && "No Value?");
826 assert(Depth <= MaxDepth && "Limit Search Depth");
827 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
829 const Type *T = V->getType();
831 ConstantInt *CI = dyn_cast<ConstantInt>(V);
841 ConstantExpr *CO = dyn_cast<ConstantExpr>(V);
842 Constant *BaseVal = ConstantInt::get(T, Base);
843 if (CO && CO == BaseVal) {
845 Multiple = ConstantInt::get(T, 1);
849 if (CI && CI->getZExtValue() % Base == 0) {
850 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
854 if (Depth == MaxDepth) return false; // Limit search depth.
856 Operator *I = dyn_cast<Operator>(V);
857 if (!I) return false;
859 switch (I->getOpcode()) {
861 case Instruction::SExt:
862 if (!LookThroughSExt) return false;
863 // otherwise fall through to ZExt
864 case Instruction::ZExt:
865 return ComputeMultiple(I->getOperand(0), Base, Multiple,
866 LookThroughSExt, Depth+1);
867 case Instruction::Shl:
868 case Instruction::Mul: {
869 Value *Op0 = I->getOperand(0);
870 Value *Op1 = I->getOperand(1);
872 if (I->getOpcode() == Instruction::Shl) {
873 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
874 if (!Op1CI) return false;
875 // Turn Op0 << Op1 into Op0 * 2^Op1
876 APInt Op1Int = Op1CI->getValue();
877 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
878 Op1 = ConstantInt::get(V->getContext(),
879 APInt(Op1Int.getBitWidth(), 0).set(BitToSet));
884 bool M0 = ComputeMultiple(Op0, Base, Mul0,
885 LookThroughSExt, Depth+1);
886 bool M1 = ComputeMultiple(Op1, Base, Mul1,
887 LookThroughSExt, Depth+1);
890 if (isa<Constant>(Op1) && isa<Constant>(Mul0)) {
891 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
892 Multiple = ConstantExpr::getMul(cast<Constant>(Mul0),
893 cast<Constant>(Op1));
897 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
898 if (Mul0CI->getValue() == 1) {
899 // V == Base * Op1, so return Op1
906 if (isa<Constant>(Op0) && isa<Constant>(Mul1)) {
907 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
908 Multiple = ConstantExpr::getMul(cast<Constant>(Mul1),
909 cast<Constant>(Op0));
913 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
914 if (Mul1CI->getValue() == 1) {
915 // V == Base * Op0, so return Op0
923 // We could not determine if V is a multiple of Base.
927 /// CannotBeNegativeZero - Return true if we can prove that the specified FP
928 /// value is never equal to -0.0.
930 /// NOTE: this function will need to be revisited when we support non-default
933 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
934 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
935 return !CFP->getValueAPF().isNegZero();
938 return 1; // Limit search depth.
940 const Operator *I = dyn_cast<Operator>(V);
941 if (I == 0) return false;
943 // (add x, 0.0) is guaranteed to return +0.0, not -0.0.
944 if (I->getOpcode() == Instruction::FAdd &&
945 isa<ConstantFP>(I->getOperand(1)) &&
946 cast<ConstantFP>(I->getOperand(1))->isNullValue())
949 // sitofp and uitofp turn into +0.0 for zero.
950 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
953 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
954 // sqrt(-0.0) = -0.0, no other negative results are possible.
955 if (II->getIntrinsicID() == Intrinsic::sqrt)
956 return CannotBeNegativeZero(II->getOperand(1), Depth+1);
958 if (const CallInst *CI = dyn_cast<CallInst>(I))
959 if (const Function *F = CI->getCalledFunction()) {
960 if (F->isDeclaration()) {
962 if (F->getName() == "abs") return true;
963 // fabs[lf](x) != -0.0
964 if (F->getName() == "fabs") return true;
965 if (F->getName() == "fabsf") return true;
966 if (F->getName() == "fabsl") return true;
967 if (F->getName() == "sqrt" || F->getName() == "sqrtf" ||
968 F->getName() == "sqrtl")
969 return CannotBeNegativeZero(CI->getOperand(1), Depth+1);
977 /// GetLinearExpression - Analyze the specified value as a linear expression:
978 /// "A*V + B", where A and B are constant integers. Return the scale and offset
979 /// values as APInts and return V as a Value*. The incoming Value is known to
980 /// have IntegerType. Note that this looks through extends, so the high bits
981 /// may not be represented in the result.
982 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
983 const TargetData *TD, unsigned Depth) {
984 assert(V->getType()->isIntegerTy() && "Not an integer value");
986 // Limit our recursion depth.
993 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
994 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
995 switch (BOp->getOpcode()) {
997 case Instruction::Or:
998 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
1000 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), TD))
1003 case Instruction::Add:
1004 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
1005 Offset += RHSC->getValue();
1007 case Instruction::Mul:
1008 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
1009 Offset *= RHSC->getValue();
1010 Scale *= RHSC->getValue();
1012 case Instruction::Shl:
1013 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, TD, Depth+1);
1014 Offset <<= RHSC->getValue().getLimitedValue();
1015 Scale <<= RHSC->getValue().getLimitedValue();
1021 // Since clients don't care about the high bits of the value, just scales and
1022 // offsets, we can look through extensions.
1023 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
1024 Value *CastOp = cast<CastInst>(V)->getOperand(0);
1025 unsigned OldWidth = Scale.getBitWidth();
1026 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
1027 Scale.trunc(SmallWidth);
1028 Offset.trunc(SmallWidth);
1029 Value *Result = GetLinearExpression(CastOp, Scale, Offset, TD, Depth+1);
1030 Scale.zext(OldWidth);
1031 Offset.zext(OldWidth);
1040 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
1041 /// into a base pointer with a constant offset and a number of scaled symbolic
1044 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
1045 /// the VarIndices vector) are Value*'s that are known to be scaled by the
1046 /// specified amount, but which may have other unrepresented high bits. As such,
1047 /// the gep cannot necessarily be reconstructed from its decomposed form.
1049 /// When TargetData is around, this function is capable of analyzing everything
1050 /// that Value::getUnderlyingObject() can look through. When not, it just looks
1051 /// through pointer casts.
1053 const Value *llvm::DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
1054 SmallVectorImpl<std::pair<const Value*, int64_t> > &VarIndices,
1055 const TargetData *TD) {
1056 // Limit recursion depth to limit compile time in crazy cases.
1057 unsigned MaxLookup = 6;
1061 // See if this is a bitcast or GEP.
1062 const Operator *Op = dyn_cast<Operator>(V);
1064 // The only non-operator case we can handle are GlobalAliases.
1065 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1066 if (!GA->mayBeOverridden()) {
1067 V = GA->getAliasee();
1074 if (Op->getOpcode() == Instruction::BitCast) {
1075 V = Op->getOperand(0);
1079 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
1083 // Don't attempt to analyze GEPs over unsized objects.
1084 if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
1085 ->getElementType()->isSized())
1088 // If we are lacking TargetData information, we can't compute the offets of
1089 // elements computed by GEPs. However, we can handle bitcast equivalent
1092 if (!GEPOp->hasAllZeroIndices())
1094 V = GEPOp->getOperand(0);
1098 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
1099 gep_type_iterator GTI = gep_type_begin(GEPOp);
1100 for (User::const_op_iterator I = GEPOp->op_begin()+1,
1101 E = GEPOp->op_end(); I != E; ++I) {
1103 // Compute the (potentially symbolic) offset in bytes for this index.
1104 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
1105 // For a struct, add the member offset.
1106 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
1107 if (FieldNo == 0) continue;
1109 BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo);
1113 // For an array/pointer, add the element offset, explicitly scaled.
1114 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
1115 if (CIdx->isZero()) continue;
1116 BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
1120 uint64_t Scale = TD->getTypeAllocSize(*GTI);
1122 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
1123 unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
1124 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
1125 Index = GetLinearExpression(Index, IndexScale, IndexOffset, TD, 0);
1127 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
1128 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
1129 BaseOffs += IndexOffset.getZExtValue()*Scale;
1130 Scale *= IndexScale.getZExtValue();
1133 // If we already had an occurrance of this index variable, merge this
1134 // scale into it. For example, we want to handle:
1135 // A[x][x] -> x*16 + x*4 -> x*20
1136 // This also ensures that 'x' only appears in the index list once.
1137 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
1138 if (VarIndices[i].first == Index) {
1139 Scale += VarIndices[i].second;
1140 VarIndices.erase(VarIndices.begin()+i);
1145 // Make sure that we have a scale that makes sense for this target's
1147 if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
1148 Scale <<= ShiftBits;
1149 Scale >>= ShiftBits;
1153 VarIndices.push_back(std::make_pair(Index, Scale));
1156 // Analyze the base pointer next.
1157 V = GEPOp->getOperand(0);
1158 } while (--MaxLookup);
1160 // If the chain of expressions is too deep, just return early.
1165 // This is the recursive version of BuildSubAggregate. It takes a few different
1166 // arguments. Idxs is the index within the nested struct From that we are
1167 // looking at now (which is of type IndexedType). IdxSkip is the number of
1168 // indices from Idxs that should be left out when inserting into the resulting
1169 // struct. To is the result struct built so far, new insertvalue instructions
1171 static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
1172 SmallVector<unsigned, 10> &Idxs,
1174 Instruction *InsertBefore) {
1175 const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
1177 // Save the original To argument so we can modify it
1179 // General case, the type indexed by Idxs is a struct
1180 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1181 // Process each struct element recursively
1184 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
1188 // Couldn't find any inserted value for this index? Cleanup
1189 while (PrevTo != OrigTo) {
1190 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
1191 PrevTo = Del->getAggregateOperand();
1192 Del->eraseFromParent();
1194 // Stop processing elements
1198 // If we succesfully found a value for each of our subaggregates
1202 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
1203 // the struct's elements had a value that was inserted directly. In the latter
1204 // case, perhaps we can't determine each of the subelements individually, but
1205 // we might be able to find the complete struct somewhere.
1207 // Find the value that is at that particular spot
1208 Value *V = FindInsertedValue(From, Idxs.begin(), Idxs.end());
1213 // Insert the value in the new (sub) aggregrate
1214 return llvm::InsertValueInst::Create(To, V, Idxs.begin() + IdxSkip,
1215 Idxs.end(), "tmp", InsertBefore);
1218 // This helper takes a nested struct and extracts a part of it (which is again a
1219 // struct) into a new value. For example, given the struct:
1220 // { a, { b, { c, d }, e } }
1221 // and the indices "1, 1" this returns
1224 // It does this by inserting an insertvalue for each element in the resulting
1225 // struct, as opposed to just inserting a single struct. This will only work if
1226 // each of the elements of the substruct are known (ie, inserted into From by an
1227 // insertvalue instruction somewhere).
1229 // All inserted insertvalue instructions are inserted before InsertBefore
1230 static Value *BuildSubAggregate(Value *From, const unsigned *idx_begin,
1231 const unsigned *idx_end,
1232 Instruction *InsertBefore) {
1233 assert(InsertBefore && "Must have someplace to insert!");
1234 const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
1237 Value *To = UndefValue::get(IndexedType);
1238 SmallVector<unsigned, 10> Idxs(idx_begin, idx_end);
1239 unsigned IdxSkip = Idxs.size();
1241 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
1244 /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
1245 /// the scalar value indexed is already around as a register, for example if it
1246 /// were inserted directly into the aggregrate.
1248 /// If InsertBefore is not null, this function will duplicate (modified)
1249 /// insertvalues when a part of a nested struct is extracted.
1250 Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
1251 const unsigned *idx_end, Instruction *InsertBefore) {
1252 // Nothing to index? Just return V then (this is useful at the end of our
1254 if (idx_begin == idx_end)
1256 // We have indices, so V should have an indexable type
1257 assert((V->getType()->isStructTy() || V->getType()->isArrayTy())
1258 && "Not looking at a struct or array?");
1259 assert(ExtractValueInst::getIndexedType(V->getType(), idx_begin, idx_end)
1260 && "Invalid indices for type?");
1261 const CompositeType *PTy = cast<CompositeType>(V->getType());
1263 if (isa<UndefValue>(V))
1264 return UndefValue::get(ExtractValueInst::getIndexedType(PTy,
1267 else if (isa<ConstantAggregateZero>(V))
1268 return Constant::getNullValue(ExtractValueInst::getIndexedType(PTy,
1271 else if (Constant *C = dyn_cast<Constant>(V)) {
1272 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C))
1273 // Recursively process this constant
1274 return FindInsertedValue(C->getOperand(*idx_begin), idx_begin + 1,
1275 idx_end, InsertBefore);
1276 } else if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
1277 // Loop the indices for the insertvalue instruction in parallel with the
1278 // requested indices
1279 const unsigned *req_idx = idx_begin;
1280 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
1281 i != e; ++i, ++req_idx) {
1282 if (req_idx == idx_end) {
1284 // The requested index identifies a part of a nested aggregate. Handle
1285 // this specially. For example,
1286 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
1287 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
1288 // %C = extractvalue {i32, { i32, i32 } } %B, 1
1289 // This can be changed into
1290 // %A = insertvalue {i32, i32 } undef, i32 10, 0
1291 // %C = insertvalue {i32, i32 } %A, i32 11, 1
1292 // which allows the unused 0,0 element from the nested struct to be
1294 return BuildSubAggregate(V, idx_begin, req_idx, InsertBefore);
1296 // We can't handle this without inserting insertvalues
1300 // This insert value inserts something else than what we are looking for.
1301 // See if the (aggregrate) value inserted into has the value we are
1302 // looking for, then.
1304 return FindInsertedValue(I->getAggregateOperand(), idx_begin, idx_end,
1307 // If we end up here, the indices of the insertvalue match with those
1308 // requested (though possibly only partially). Now we recursively look at
1309 // the inserted value, passing any remaining indices.
1310 return FindInsertedValue(I->getInsertedValueOperand(), req_idx, idx_end,
1312 } else if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
1313 // If we're extracting a value from an aggregrate that was extracted from
1314 // something else, we can extract from that something else directly instead.
1315 // However, we will need to chain I's indices with the requested indices.
1317 // Calculate the number of indices required
1318 unsigned size = I->getNumIndices() + (idx_end - idx_begin);
1319 // Allocate some space to put the new indices in
1320 SmallVector<unsigned, 5> Idxs;
1322 // Add indices from the extract value instruction
1323 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
1327 // Add requested indices
1328 for (const unsigned *i = idx_begin, *e = idx_end; i != e; ++i)
1331 assert(Idxs.size() == size
1332 && "Number of indices added not correct?");
1334 return FindInsertedValue(I->getAggregateOperand(), Idxs.begin(), Idxs.end(),
1337 // Otherwise, we don't know (such as, extracting from a function return value
1338 // or load instruction)
1342 /// GetConstantStringInfo - This function computes the length of a
1343 /// null-terminated C string pointed to by V. If successful, it returns true
1344 /// and returns the string in Str. If unsuccessful, it returns false.
1345 bool llvm::GetConstantStringInfo(const Value *V, std::string &Str,
1348 // If V is NULL then return false;
1349 if (V == NULL) return false;
1351 // Look through bitcast instructions.
1352 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
1353 return GetConstantStringInfo(BCI->getOperand(0), Str, Offset, StopAtNul);
1355 // If the value is not a GEP instruction nor a constant expression with a
1356 // GEP instruction, then return false because ConstantArray can't occur
1358 const User *GEP = 0;
1359 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
1361 } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
1362 if (CE->getOpcode() == Instruction::BitCast)
1363 return GetConstantStringInfo(CE->getOperand(0), Str, Offset, StopAtNul);
1364 if (CE->getOpcode() != Instruction::GetElementPtr)
1370 // Make sure the GEP has exactly three arguments.
1371 if (GEP->getNumOperands() != 3)
1374 // Make sure the index-ee is a pointer to array of i8.
1375 const PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
1376 const ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
1377 if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
1380 // Check to make sure that the first operand of the GEP is an integer and
1381 // has value 0 so that we are sure we're indexing into the initializer.
1382 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
1383 if (FirstIdx == 0 || !FirstIdx->isZero())
1386 // If the second index isn't a ConstantInt, then this is a variable index
1387 // into the array. If this occurs, we can't say anything meaningful about
1389 uint64_t StartIdx = 0;
1390 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
1391 StartIdx = CI->getZExtValue();
1394 return GetConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset,
1398 // The GEP instruction, constant or instruction, must reference a global
1399 // variable that is a constant and is initialized. The referenced constant
1400 // initializer is the array that we'll use for optimization.
1401 const GlobalVariable* GV = dyn_cast<GlobalVariable>(V);
1402 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
1404 const Constant *GlobalInit = GV->getInitializer();
1406 // Handle the ConstantAggregateZero case
1407 if (isa<ConstantAggregateZero>(GlobalInit)) {
1408 // This is a degenerate case. The initializer is constant zero so the
1409 // length of the string must be zero.
1414 // Must be a Constant Array
1415 const ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
1416 if (Array == 0 || !Array->getType()->getElementType()->isIntegerTy(8))
1419 // Get the number of elements in the array
1420 uint64_t NumElts = Array->getType()->getNumElements();
1422 if (Offset > NumElts)
1425 // Traverse the constant array from 'Offset' which is the place the GEP refers
1427 Str.reserve(NumElts-Offset);
1428 for (unsigned i = Offset; i != NumElts; ++i) {
1429 const Constant *Elt = Array->getOperand(i);
1430 const ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
1431 if (!CI) // This array isn't suitable, non-int initializer.
1433 if (StopAtNul && CI->isZero())
1434 return true; // we found end of string, success!
1435 Str += (char)CI->getZExtValue();
1438 // The array isn't null terminated, but maybe this is a memcpy, not a strcpy.
1442 // These next two are very similar to the above, but also look through PHI
1444 // TODO: See if we can integrate these two together.
1446 /// GetStringLengthH - If we can compute the length of the string pointed to by
1447 /// the specified pointer, return 'len+1'. If we can't, return 0.
1448 static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
1449 // Look through noop bitcast instructions.
1450 if (BitCastInst *BCI = dyn_cast<BitCastInst>(V))
1451 return GetStringLengthH(BCI->getOperand(0), PHIs);
1453 // If this is a PHI node, there are two cases: either we have already seen it
1455 if (PHINode *PN = dyn_cast<PHINode>(V)) {
1456 if (!PHIs.insert(PN))
1457 return ~0ULL; // already in the set.
1459 // If it was new, see if all the input strings are the same length.
1460 uint64_t LenSoFar = ~0ULL;
1461 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1462 uint64_t Len = GetStringLengthH(PN->getIncomingValue(i), PHIs);
1463 if (Len == 0) return 0; // Unknown length -> unknown.
1465 if (Len == ~0ULL) continue;
1467 if (Len != LenSoFar && LenSoFar != ~0ULL)
1468 return 0; // Disagree -> unknown.
1472 // Success, all agree.
1476 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
1477 if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1478 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
1479 if (Len1 == 0) return 0;
1480 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
1481 if (Len2 == 0) return 0;
1482 if (Len1 == ~0ULL) return Len2;
1483 if (Len2 == ~0ULL) return Len1;
1484 if (Len1 != Len2) return 0;
1488 // If the value is not a GEP instruction nor a constant expression with a
1489 // GEP instruction, then return unknown.
1491 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) {
1493 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
1494 if (CE->getOpcode() != Instruction::GetElementPtr)
1501 // Make sure the GEP has exactly three arguments.
1502 if (GEP->getNumOperands() != 3)
1505 // Check to make sure that the first operand of the GEP is an integer and
1506 // has value 0 so that we are sure we're indexing into the initializer.
1507 if (ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(1))) {
1513 // If the second index isn't a ConstantInt, then this is a variable index
1514 // into the array. If this occurs, we can't say anything meaningful about
1516 uint64_t StartIdx = 0;
1517 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
1518 StartIdx = CI->getZExtValue();
1522 // The GEP instruction, constant or instruction, must reference a global
1523 // variable that is a constant and is initialized. The referenced constant
1524 // initializer is the array that we'll use for optimization.
1525 GlobalVariable* GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
1526 if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
1527 GV->mayBeOverridden())
1529 Constant *GlobalInit = GV->getInitializer();
1531 // Handle the ConstantAggregateZero case, which is a degenerate case. The
1532 // initializer is constant zero so the length of the string must be zero.
1533 if (isa<ConstantAggregateZero>(GlobalInit))
1534 return 1; // Len = 0 offset by 1.
1536 // Must be a Constant Array
1537 ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
1538 if (!Array || !Array->getType()->getElementType()->isIntegerTy(8))
1541 // Get the number of elements in the array
1542 uint64_t NumElts = Array->getType()->getNumElements();
1544 // Traverse the constant array from StartIdx (derived above) which is
1545 // the place the GEP refers to in the array.
1546 for (unsigned i = StartIdx; i != NumElts; ++i) {
1547 Constant *Elt = Array->getOperand(i);
1548 ConstantInt *CI = dyn_cast<ConstantInt>(Elt);
1549 if (!CI) // This array isn't suitable, non-int initializer.
1552 return i-StartIdx+1; // We found end of string, success!
1555 return 0; // The array isn't null terminated, conservatively return 'unknown'.
1558 /// GetStringLength - If we can compute the length of the string pointed to by
1559 /// the specified pointer, return 'len+1'. If we can't, return 0.
1560 uint64_t llvm::GetStringLength(Value *V) {
1561 if (!V->getType()->isPointerTy()) return 0;
1563 SmallPtrSet<PHINode*, 32> PHIs;
1564 uint64_t Len = GetStringLengthH(V, PHIs);
1565 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
1566 // an empty string as a length.
1567 return Len == ~0ULL ? 1 : Len;