#include "llvm/Analysis/ValueTracking.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/InstructionSimplify.h"
-#include "llvm/Constants.h"
-#include "llvm/DataLayout.h"
-#include "llvm/GlobalAlias.h"
-#include "llvm/GlobalVariable.h"
-#include "llvm/Instructions.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Metadata.h"
-#include "llvm/Operator.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/PatternMatch.h"
#include <cstring>
using namespace llvm;
using namespace llvm::PatternMatch;
static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
- assert(isa<PointerType>(Ty) && "Expected a pointer type!");
- return TD ? TD->getPointerSizeInBits() : 0;
+
+ return TD ? TD->getPointerTypeSizeInBits(Ty) : 0;
}
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
// NLZ can't be BitWidth with no sign bit
APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
-
+
// If all of the MaskV bits are known to be zero, then we know the
// output top bits are zero, because we now know that the output is
// from [0-C].
unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes();
llvm::ComputeMaskedBits(Op1, KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes();
// Determine which operand has more trailing zeros, and use that
for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
Elt = CDS->getElementAsInteger(i);
KnownZero &= ~Elt;
- KnownOne &= Elt;
+ KnownOne &= Elt;
}
return;
}
-
+
// The address of an aligned GlobalValue has trailing zeros.
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
unsigned Align = GV->getAlignment();
}
if (Align > 0)
KnownZero = APInt::getLowBitsSet(BitWidth,
- CountTrailingZeros_32(Align));
+ countTrailingZeros(Align));
else
KnownZero.clearAllBits();
KnownOne.clearAllBits();
}
return;
}
-
+
if (Argument *A = dyn_cast<Argument>(V)) {
unsigned Align = 0;
- if (A->hasByValAttr()) {
- // Get alignment information off byval arguments if specified in the IR.
+ if (A->hasByValOrInAllocaAttr()) {
+ // Get alignment information off byval/inalloca arguments if specified in
+ // the IR.
Align = A->getParamAlignment();
} else if (TD && A->hasStructRetAttr()) {
// An sret parameter has at least the ABI alignment of the return type.
}
if (Align)
- KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
+ KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
return;
}
// If either the LHS or the RHS are Zero, the result is zero.
ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
// Output known-1 bits are only known if set in both the LHS & RHS.
KnownOne &= KnownOne2;
// Output known-0 are known to be clear if zero in either the LHS | RHS.
case Instruction::Or: {
ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
// Output known-0 bits are only known if clear in both the LHS & RHS.
KnownZero &= KnownZero2;
// Output known-1 are known to be set if set in either the LHS | RHS.
case Instruction::Xor: {
ComputeMaskedBits(I->getOperand(1), KnownZero, KnownOne, TD, Depth+1);
ComputeMaskedBits(I->getOperand(0), KnownZero2, KnownOne2, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
-
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+
// Output known-0 bits are known if clear or set in both the LHS & RHS.
APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
// Output known-1 are known to be set if set in only one of the LHS, RHS.
ComputeMaskedBits(I->getOperand(2), KnownZero, KnownOne, TD, Depth+1);
ComputeMaskedBits(I->getOperand(1), KnownZero2, KnownOne2, TD,
Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// Only known if known in both the LHS and RHS.
KnownOne &= KnownOne2;
unsigned SrcBitWidth;
// Note that we handle pointer operands here because of inttoptr/ptrtoint
// which fall through here.
- SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType());
+ if(TD) {
+ SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType());
+ } else {
+ SrcBitWidth = SrcTy->getScalarSizeInBits();
+ if (!SrcBitWidth) return;
+ }
assert(SrcBitWidth && "SrcBitWidth can't be zero");
KnownZero = KnownZero.zextOrTrunc(SrcBitWidth);
case Instruction::SExt: {
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
-
+
KnownZero = KnownZero.trunc(SrcBitWidth);
KnownOne = KnownOne.trunc(SrcBitWidth);
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = KnownZero.zext(BitWidth);
KnownOne = KnownOne.zext(BitWidth);
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero <<= ShiftAmt;
KnownOne <<= ShiftAmt;
KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
// Compute the new bits that are at the top now.
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
-
+
// Unsigned shift right.
ComputeMaskedBits(I->getOperand(0), KnownZero,KnownOne, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
// high bits known zero.
if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
// Compute the new bits that are at the top now.
uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
-
+
// Signed shift right.
ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
KnownZero = APIntOps::lshr(KnownZero, ShiftAmt);
KnownOne = APIntOps::lshr(KnownOne, ShiftAmt);
-
+
APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero.
KnownZero |= HighBits;
if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
KnownOne |= ~LowBits;
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
}
}
unsigned Align = AI->getAlignment();
if (Align == 0 && TD)
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
-
+
if (Align > 0)
- KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
+ KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
break;
}
case Instruction::GetElementPtr: {
Value *Index = I->getOperand(i);
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
// Handle struct member offset arithmetic.
- if (!TD) return;
- const StructLayout *SL = TD->getStructLayout(STy);
+ if (!TD)
+ return;
+
+ // Handle case when index is vector zeroinitializer
+ Constant *CIndex = cast<Constant>(Index);
+ if (CIndex->isZeroValue())
+ continue;
+
+ if (CIndex->getType()->isVectorTy())
+ Index = CIndex->getSplatValue();
+
unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
+ const StructLayout *SL = TD->getStructLayout(STy);
uint64_t Offset = SL->getElementOffset(Idx);
- TrailZ = std::min(TrailZ,
- CountTrailingZeros_64(Offset));
+ TrailZ = std::min<unsigned>(TrailZ,
+ countTrailingZeros(Offset));
} else {
// Handle array index arithmetic.
Type *IndexedTy = GTI.getIndexedType();
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
TrailZ = std::min(TrailZ,
- unsigned(CountTrailingZeros_64(TypeSize) +
+ unsigned(countTrailingZeros(TypeSize) +
LocalKnownZero.countTrailingOnes()));
}
}
-
+
KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ);
break;
}
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
break;
}
- case Intrinsic::x86_sse42_crc32_64_8:
case Intrinsic::x86_sse42_crc32_64_64:
KnownZero = APInt::getHighBitsSet(64, 32);
break;
return false;
}
+ // Adding a power-of-two or zero to the same power-of-two or zero yields
+ // either the original power-of-two, a larger power-of-two or zero.
+ if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
+ OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
+ if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
+ if (match(X, m_And(m_Specific(Y), m_Value())) ||
+ match(X, m_And(m_Value(), m_Specific(Y))))
+ if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth))
+ return true;
+ if (match(Y, m_And(m_Specific(X), m_Value())) ||
+ match(Y, m_And(m_Value(), m_Specific(X))))
+ if (isKnownToBeAPowerOfTwo(X, OrZero, Depth))
+ return true;
+
+ unsigned BitWidth = V->getType()->getScalarSizeInBits();
+ APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
+ ComputeMaskedBits(X, LHSZeroBits, LHSOneBits, 0, Depth);
+
+ APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
+ ComputeMaskedBits(Y, RHSZeroBits, RHSOneBits, 0, Depth);
+ // If i8 V is a power of two or zero:
+ // ZeroBits: 1 1 1 0 1 1 1 1
+ // ~ZeroBits: 0 0 0 1 0 0 0 0
+ if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
+ // If OrZero isn't set, we cannot give back a zero result.
+ // Make sure either the LHS or RHS has a bit set.
+ if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
+ return true;
+ }
+ }
+
// An exact divide or right shift can only shift off zero bits, so the result
// is a power of two only if the first operand is a power of two and not
// copying a sign bit (sdiv int_min, 2).
// Check for pointer simplifications.
if (V->getType()->isPointerTy()) {
+ if (isKnownNonNull(V))
+ return true;
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
if (isGEPKnownNonNull(GEP, TD, Depth))
return true;
const DataLayout *TD, unsigned Depth) {
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
- assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
return (KnownZero & Mask) == Mask;
}
if (Depth == 6)
return 1; // Limit search depth.
-
+
Operator *U = dyn_cast<Operator>(V);
switch (Operator::getOpcode(V)) {
default: break;
case Instruction::SExt:
Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp;
-
+
case Instruction::AShr: {
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
// ashr X, C -> adds C sign bits. Vectors too.
if (Tmp == 1) return 1; // Early out.
Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1);
return std::min(Tmp, Tmp2);
-
+
case Instruction::Add:
// Add can have at most one carry bit. Thus we know that the output
// is, at worst, one more bit than the inputs.
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
if (Tmp == 1) return 1; // Early out.
-
+
// Special case decrementing a value (ADD X, -1):
if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1)))
if (CRHS->isAllOnesValue()) {
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD, Depth+1);
-
+
// If the input is known to be 0 or 1, the output is 0/-1, which is all
// sign bits set.
if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
return TyBits;
-
+
// If we are subtracting one from a positive number, there is no carry
// out of the result.
if (KnownZero.isNegative())
return Tmp;
}
-
+
Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
if (Tmp2 == 1) return 1;
return std::min(Tmp, Tmp2)-1;
-
+
case Instruction::Sub:
Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1);
if (Tmp2 == 1) return 1;
-
+
// Handle NEG.
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0)))
if (CLHS->isNullValue()) {
// sign bits set.
if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue())
return TyBits;
-
+
// If the input is known to be positive (the sign bit is known clear),
// the output of the NEG has the same number of sign bits as the input.
if (KnownZero.isNegative())
return Tmp2;
-
+
// Otherwise, we treat this like a SUB.
}
-
+
// Sub can have at most one carry bit. Thus we know that the output
// is, at worst, one more bit than the inputs.
Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1);
if (Tmp == 1) return 1; // Early out.
return std::min(Tmp, Tmp2)-1;
-
+
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(U);
// Don't analyze large in-degree PHIs.
if (PN->getNumIncomingValues() > 4) break;
-
+
// Take the minimum of all incoming values. This can't infinitely loop
// because of our depth threshold.
Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1);
// case for targets like X86.
break;
}
-
+
// Finally, if we can prove that the top bits of the result are 0's or 1's,
// use this information.
APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0);
APInt Mask;
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
-
+
if (KnownZero.isNegative()) { // sign bit is 0
Mask = KnownZero;
} else if (KnownOne.isNegative()) { // sign bit is 1;
// Nothing known.
return FirstAnswer;
}
-
+
// Okay, we know that the sign bit in Mask is set. Use CLZ to determine
// the number of identical bits in the top of the input value.
Mask = ~Mask;
if (Base == 0)
return false;
-
+
if (Base == 1) {
Multiple = V;
return true;
if (CI && CI->getZExtValue() % Base == 0) {
Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
- return true;
+ return true;
}
-
+
if (Depth == MaxDepth) return false; // Limit search depth.
-
+
Operator *I = dyn_cast<Operator>(V);
if (!I) return false;
if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
if (Constant *Op1C = dyn_cast<Constant>(Op1))
if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
- if (Op1C->getType()->getPrimitiveSizeInBits() <
+ if (Op1C->getType()->getPrimitiveSizeInBits() <
MulC->getType()->getPrimitiveSizeInBits())
Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
- if (Op1C->getType()->getPrimitiveSizeInBits() >
+ if (Op1C->getType()->getPrimitiveSizeInBits() >
MulC->getType()->getPrimitiveSizeInBits())
MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
-
+
// V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
Multiple = ConstantExpr::getMul(MulC, Op1C);
return true;
if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
if (Constant *Op0C = dyn_cast<Constant>(Op0))
if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
- if (Op0C->getType()->getPrimitiveSizeInBits() <
+ if (Op0C->getType()->getPrimitiveSizeInBits() <
MulC->getType()->getPrimitiveSizeInBits())
Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
- if (Op0C->getType()->getPrimitiveSizeInBits() >
+ if (Op0C->getType()->getPrimitiveSizeInBits() >
MulC->getType()->getPrimitiveSizeInBits())
MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
-
+
// V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
Multiple = ConstantExpr::getMul(MulC, Op0C);
return true;
return false;
}
-/// CannotBeNegativeZero - Return true if we can prove that the specified FP
+/// CannotBeNegativeZero - Return true if we can prove that the specified FP
/// value is never equal to -0.0.
///
/// NOTE: this function will need to be revisited when we support non-default
bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
return !CFP->getValueAPF().isNegZero();
-
+
if (Depth == 6)
return 1; // Limit search depth.
return true;
// (add x, 0.0) is guaranteed to return +0.0, not -0.0.
- if (I->getOpcode() == Instruction::FAdd &&
- isa<ConstantFP>(I->getOperand(1)) &&
- cast<ConstantFP>(I->getOperand(1))->isNullValue())
- return true;
-
+ if (I->getOpcode() == Instruction::FAdd)
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
+ if (CFP->isNullValue())
+ return true;
+
// sitofp and uitofp turn into +0.0 for zero.
if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
return true;
-
+
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
// sqrt(-0.0) = -0.0, no other negative results are possible.
if (II->getIntrinsicID() == Intrinsic::sqrt)
return CannotBeNegativeZero(II->getArgOperand(0), Depth+1);
-
+
if (const CallInst *CI = dyn_cast<CallInst>(I))
if (const Function *F = CI->getCalledFunction()) {
if (F->isDeclaration()) {
return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1);
}
}
-
+
return false;
}
if (Constant *C = dyn_cast<Constant>(V))
if (C->isNullValue())
return Constant::getNullValue(Type::getInt8Ty(V->getContext()));
-
+
// Constant float and double values can be handled as integer values if the
- // corresponding integer value is "byteable". An important case is 0.0.
+ // corresponding integer value is "byteable". An important case is 0.0.
if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
if (CFP->getType()->isFloatTy())
V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext()));
V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext()));
// Don't handle long double formats, which have strange constraints.
}
-
- // We can handle constant integers that are power of two in size and a
+
+ // We can handle constant integers that are power of two in size and a
// multiple of 8 bits.
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
unsigned Width = CI->getBitWidth();
Val2 = Val.lshr(NextWidth);
Val2 = Val2.trunc(Val.getBitWidth()/2);
Val = Val.trunc(Val.getBitWidth()/2);
-
+
// If the top/bottom halves aren't the same, reject it.
if (Val != Val2)
return 0;
return ConstantInt::get(V->getContext(), Val);
}
}
-
+
// A ConstantDataArray/Vector is splatable if all its members are equal and
// also splatable.
if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
Value *Val = isBytewiseValue(Elt);
if (!Val)
return 0;
-
+
for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
if (CA->getElementAsConstant(I) != Elt)
return 0;
-
+
return Val;
}
// struct. To is the result struct built so far, new insertvalue instructions
// build on that.
static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
- SmallVector<unsigned, 10> &Idxs,
+ SmallVectorImpl<unsigned> &Idxs,
unsigned IdxSkip,
Instruction *InsertBefore) {
- llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
if (STy) {
// Save the original To argument so we can modify it
Value *OrigTo = To;
// the struct's elements had a value that was inserted directly. In the latter
// case, perhaps we can't determine each of the subelements individually, but
// we might be able to find the complete struct somewhere.
-
+
// Find the value that is at that particular spot
Value *V = FindInsertedValue(From, Idxs);
if (C == 0) return 0;
return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
}
-
+
if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
// Loop the indices for the insertvalue instruction in parallel with the
// requested indices
return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
InsertBefore);
}
-
+
// This insert value inserts something else than what we are looking for.
// See if the (aggregrate) value inserted into has the value we are
// looking for, then.
makeArrayRef(req_idx, idx_range.end()),
InsertBefore);
}
-
+
if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
// If we're extracting a value from an aggregrate that was extracted from
// something else, we can extract from that something else directly instead.
// However, we will need to chain I's indices with the requested indices.
-
- // Calculate the number of indices required
+
+ // Calculate the number of indices required
unsigned size = I->getNumIndices() + idx_range.size();
// Allocate some space to put the new indices in
SmallVector<unsigned, 5> Idxs;
Idxs.reserve(size);
// Add indices from the extract value instruction
Idxs.append(I->idx_begin(), I->idx_end());
-
+
// Add requested indices
Idxs.append(idx_range.begin(), idx_range.end());
- assert(Idxs.size() == size
+ assert(Idxs.size() == size
&& "Number of indices added not correct?");
-
+
return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
}
// Otherwise, we don't know (such as, extracting from a function return value
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const DataLayout &TD) {
- Operator *PtrOp = dyn_cast<Operator>(Ptr);
- if (PtrOp == 0 || Ptr->getType()->isVectorTy())
- return Ptr;
-
- // Just look through bitcasts.
- if (PtrOp->getOpcode() == Instruction::BitCast)
- return GetPointerBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
-
- // If this is a GEP with constant indices, we can look through it.
- GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
- if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
-
- gep_type_iterator GTI = gep_type_begin(GEP);
- for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
- ++I, ++GTI) {
- ConstantInt *OpC = cast<ConstantInt>(*I);
- if (OpC->isZero()) continue;
-
- // Handle a struct and array indices which add their offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
+ const DataLayout *DL) {
+ // Without DataLayout, conservatively assume 64-bit offsets, which is
+ // the widest we support.
+ unsigned BitWidth = DL ? DL->getPointerTypeSizeInBits(Ptr->getType()) : 64;
+ APInt ByteOffset(BitWidth, 0);
+ while (1) {
+ if (Ptr->getType()->isVectorTy())
+ break;
+
+ if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
+ if (DL) {
+ APInt GEPOffset(BitWidth, 0);
+ if (!GEP->accumulateConstantOffset(*DL, GEPOffset))
+ break;
+
+ ByteOffset += GEPOffset;
+ }
+
+ Ptr = GEP->getPointerOperand();
+ } else if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
+ Ptr = cast<Operator>(Ptr)->getOperand(0);
+ } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
+ if (GA->mayBeOverridden())
+ break;
+ Ptr = GA->getAliasee();
} else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
- Offset += OpC->getSExtValue()*Size;
+ break;
}
}
-
- // Re-sign extend from the pointer size if needed to get overflow edge cases
- // right.
- unsigned PtrSize = TD.getPointerSizeInBits();
- if (PtrSize < 64)
- Offset = SignExtend64(Offset, PtrSize);
-
- return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
+ Offset = ByteOffset.getSExtValue();
+ return Ptr;
}
// Look through bitcast instructions and geps.
V = V->stripPointerCasts();
-
+
// If the value is a GEP instructionor constant expression, treat it as an
// offset.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Make sure the GEP has exactly three arguments.
if (GEP->getNumOperands() != 3)
return false;
-
+
// Make sure the index-ee is a pointer to array of i8.
PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
return false;
-
+
// Check to make sure that the first operand of the GEP is an integer and
// has value 0 so that we are sure we're indexing into the initializer.
const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
if (FirstIdx == 0 || !FirstIdx->isZero())
return false;
-
+
// If the second index isn't a ConstantInt, then this is a variable index
// into the array. If this occurs, we can't say anything meaningful about
// the string.
Str = "";
return true;
}
-
+
// Must be a Constant Array
const ConstantDataArray *Array =
dyn_cast<ConstantDataArray>(GV->getInitializer());
if (Array == 0 || !Array->isString())
return false;
-
+
// Get the number of elements in the array
uint64_t NumElts = Array->getType()->getArrayNumElements();
if (Offset > NumElts)
return false;
-
+
// Skip over 'offset' bytes.
Str = Str.substr(Offset);
-
+
if (TrimAtNul) {
// Trim off the \0 and anything after it. If the array is not nul
// terminated, we just return the whole end of string. The client may know
if (Len1 != Len2) return 0;
return Len1;
}
-
+
// Otherwise, see if we can read the string.
StringRef StrData;
if (!getConstantStringInfo(V, StrData))
/// are lifetime markers.
///
bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
- for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI);
+ for (const User *U : V->users()) {
+ const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
if (!II) return false;
if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
}
case Instruction::Load: {
const LoadInst *LI = cast<LoadInst>(Inst);
- if (!LI->isUnordered())
+ if (!LI->isUnordered() ||
+ // Speculative load may create a race that did not exist in the source.
+ LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
return false;
return LI->getPointerOperand()->isDereferenceablePointer();
}
case Intrinsic::umul_with_overflow:
case Intrinsic::usub_with_overflow:
return true;
+ // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
+ // errno like libm sqrt would.
+ case Intrinsic::sqrt:
+ case Intrinsic::fma:
+ case Intrinsic::fmuladd:
+ return true;
// TODO: some fp intrinsics are marked as having the same error handling
// as libm. They're safe to speculate when they won't error.
// TODO: are convert_{from,to}_fp16 safe?
return false; // Misc instructions which have effects
}
}
+
+/// isKnownNonNull - Return true if we know that the specified value is never
+/// null.
+bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
+ // Alloca never returns null, malloc might.
+ if (isa<AllocaInst>(V)) return true;
+
+ // A byval or inalloca argument is never null.
+ if (const Argument *A = dyn_cast<Argument>(V))
+ return A->hasByValOrInAllocaAttr();
+
+ // Global values are not null unless extern weak.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
+ return !GV->hasExternalWeakLinkage();
+
+ // operator new never returns null.
+ if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))
+ return true;
+
+ return false;
+}