X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FAnalysis%2FConstantFolding.cpp;h=bc0dffc473626481a1adc664b2be86285ce62ced;hb=7338de37a802970857079b5a532c5dd50d0a6d5d;hp=91c745e9326072492727f9e43bf69f8f4637e660;hpb=76e2e4a2bc78a0ca2617e3b336081e29cb00d749;p=oota-llvm.git diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index 91c745e9326..bc0dffc4736 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -1,4 +1,4 @@ -//===-- ConstantFolding.cpp - Analyze constant folding possibilities ------===// +//===-- ConstantFolding.cpp - Fold instructions into constants ------------===// // // The LLVM Compiler Infrastructure // @@ -7,22 +7,33 @@ // //===----------------------------------------------------------------------===// // -// This family of functions determines the possibility of performing constant -// folding. +// This file defines routines for folding instructions into constants. +// +// Also, to supplement the basic IR ConstantExpr simplifications, +// this file defines some additional folding routines that can make use of +// DataLayout information. These functions cannot go in IR due to library +// dependency issues. // //===----------------------------------------------------------------------===// #include "llvm/Analysis/ConstantFolding.h" -#include "llvm/Constants.h" -#include "llvm/DerivedTypes.h" -#include "llvm/Function.h" -#include "llvm/Instructions.h" -#include "llvm/Intrinsics.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" -#include "llvm/Target/TargetData.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/DerivedTypes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/Intrinsics.h" +#include "llvm/IR/Operator.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/FEnv.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetLibraryInfo.h" #include #include using namespace llvm; @@ -31,284 +42,876 @@ using namespace llvm; // Constant Folding internal helper functions //===----------------------------------------------------------------------===// +/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with +/// DataLayout. This always returns a non-null constant, but it may be a +/// ConstantExpr if unfoldable. +static Constant *FoldBitCast(Constant *C, Type *DestTy, + const DataLayout &TD) { + // Catch the obvious splat cases. + if (C->isNullValue() && !DestTy->isX86_MMXTy()) + return Constant::getNullValue(DestTy); + if (C->isAllOnesValue() && !DestTy->isX86_MMXTy()) + return Constant::getAllOnesValue(DestTy); + + // Handle a vector->integer cast. + if (IntegerType *IT = dyn_cast(DestTy)) { + VectorType *VTy = dyn_cast(C->getType()); + if (VTy == 0) + return ConstantExpr::getBitCast(C, DestTy); + + unsigned NumSrcElts = VTy->getNumElements(); + Type *SrcEltTy = VTy->getElementType(); + + // If the vector is a vector of floating point, convert it to vector of int + // to simplify things. + if (SrcEltTy->isFloatingPointTy()) { + unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); + Type *SrcIVTy = + VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts); + // Ask IR to do the conversion now that #elts line up. + C = ConstantExpr::getBitCast(C, SrcIVTy); + } + + ConstantDataVector *CDV = dyn_cast(C); + if (CDV == 0) + return ConstantExpr::getBitCast(C, DestTy); + + // Now that we know that the input value is a vector of integers, just shift + // and insert them into our result. + unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy); + APInt Result(IT->getBitWidth(), 0); + for (unsigned i = 0; i != NumSrcElts; ++i) { + Result <<= BitShift; + if (TD.isLittleEndian()) + Result |= CDV->getElementAsInteger(NumSrcElts-i-1); + else + Result |= CDV->getElementAsInteger(i); + } + + return ConstantInt::get(IT, Result); + } + + // The code below only handles casts to vectors currently. + VectorType *DestVTy = dyn_cast(DestTy); + if (DestVTy == 0) + return ConstantExpr::getBitCast(C, DestTy); + + // If this is a scalar -> vector cast, convert the input into a <1 x scalar> + // vector so the code below can handle it uniformly. + if (isa(C) || isa(C)) { + Constant *Ops = C; // don't take the address of C! + return FoldBitCast(ConstantVector::get(Ops), DestTy, TD); + } + + // If this is a bitcast from constant vector -> vector, fold it. + if (!isa(C) && !isa(C)) + return ConstantExpr::getBitCast(C, DestTy); + + // If the element types match, IR can fold it. + unsigned NumDstElt = DestVTy->getNumElements(); + unsigned NumSrcElt = C->getType()->getVectorNumElements(); + if (NumDstElt == NumSrcElt) + return ConstantExpr::getBitCast(C, DestTy); + + Type *SrcEltTy = C->getType()->getVectorElementType(); + Type *DstEltTy = DestVTy->getElementType(); + + // Otherwise, we're changing the number of elements in a vector, which + // requires endianness information to do the right thing. For example, + // bitcast (<2 x i64> to <4 x i32>) + // folds to (little endian): + // <4 x i32> + // and to (big endian): + // <4 x i32> + + // First thing is first. We only want to think about integer here, so if + // we have something in FP form, recast it as integer. + if (DstEltTy->isFloatingPointTy()) { + // Fold to an vector of integers with same size as our FP type. + unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); + Type *DestIVTy = + VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); + // Recursively handle this integer conversion, if possible. + C = FoldBitCast(C, DestIVTy, TD); + + // Finally, IR can handle this now that #elts line up. + return ConstantExpr::getBitCast(C, DestTy); + } + + // Okay, we know the destination is integer, if the input is FP, convert + // it to integer first. + if (SrcEltTy->isFloatingPointTy()) { + unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); + Type *SrcIVTy = + VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt); + // Ask IR to do the conversion now that #elts line up. + C = ConstantExpr::getBitCast(C, SrcIVTy); + // If IR wasn't able to fold it, bail out. + if (!isa(C) && // FIXME: Remove ConstantVector. + !isa(C)) + return C; + } + + // Now we know that the input and output vectors are both integer vectors + // of the same size, and that their #elements is not the same. Do the + // conversion here, which depends on whether the input or output has + // more elements. + bool isLittleEndian = TD.isLittleEndian(); + + SmallVector Result; + if (NumDstElt < NumSrcElt) { + // Handle: bitcast (<4 x i32> to <2 x i64>) + Constant *Zero = Constant::getNullValue(DstEltTy); + unsigned Ratio = NumSrcElt/NumDstElt; + unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); + unsigned SrcElt = 0; + for (unsigned i = 0; i != NumDstElt; ++i) { + // Build each element of the result. + Constant *Elt = Zero; + unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); + for (unsigned j = 0; j != Ratio; ++j) { + Constant *Src =dyn_cast(C->getAggregateElement(SrcElt++)); + if (!Src) // Reject constantexpr elements. + return ConstantExpr::getBitCast(C, DestTy); + + // Zero extend the element to the right size. + Src = ConstantExpr::getZExt(Src, Elt->getType()); + + // Shift it to the right place, depending on endianness. + Src = ConstantExpr::getShl(Src, + ConstantInt::get(Src->getType(), ShiftAmt)); + ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; + + // Mix it in. + Elt = ConstantExpr::getOr(Elt, Src); + } + Result.push_back(Elt); + } + return ConstantVector::get(Result); + } + + // Handle: bitcast (<2 x i64> to <4 x i32>) + unsigned Ratio = NumDstElt/NumSrcElt; + unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); + + // Loop over each source value, expanding into multiple results. + for (unsigned i = 0; i != NumSrcElt; ++i) { + Constant *Src = dyn_cast(C->getAggregateElement(i)); + if (!Src) // Reject constantexpr elements. + return ConstantExpr::getBitCast(C, DestTy); + + unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); + for (unsigned j = 0; j != Ratio; ++j) { + // Shift the piece of the value into the right place, depending on + // endianness. + Constant *Elt = ConstantExpr::getLShr(Src, + ConstantInt::get(Src->getType(), ShiftAmt)); + ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; + + // Truncate and remember this piece. + Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); + } + } + + return ConstantVector::get(Result); +} + + /// IsConstantOffsetFromGlobal - If this constant is actually a constant offset /// from a global, return the global and the constant. Because of /// constantexprs, this function is recursive. static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, - int64_t &Offset, const TargetData &TD) { + APInt &Offset, const DataLayout &TD) { // Trivial case, constant is the global. if ((GV = dyn_cast(C))) { - Offset = 0; + Offset.clearAllBits(); return true; } - + // Otherwise, if this isn't a constant expr, bail out. ConstantExpr *CE = dyn_cast(C); if (!CE) return false; - + // Look through ptr->int and ptr->ptr casts. if (CE->getOpcode() == Instruction::PtrToInt || CE->getOpcode() == Instruction::BitCast) return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD); - - // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) - if (CE->getOpcode() == Instruction::GetElementPtr) { - // Cannot compute this if the element type of the pointer is missing size - // info. - if (!cast(CE->getOperand(0)->getType()) - ->getElementType()->isSized()) - return false; - + + // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) + if (GEPOperator *GEP = dyn_cast(CE)) { // If the base isn't a global+constant, we aren't either. if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD)) return false; - + // Otherwise, add any offset that our operands provide. - gep_type_iterator GTI = gep_type_begin(CE); - for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i, ++GTI) { - ConstantInt *CI = dyn_cast(CE->getOperand(i)); - if (!CI) return false; // Index isn't a simple constant? - if (CI->getZExtValue() == 0) continue; // Not adding anything. - - if (const StructType *ST = dyn_cast(*GTI)) { - // N = N + Offset - Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue()); - } else { - const SequentialType *SQT = cast(*GTI); - Offset += TD.getABITypeSize(SQT->getElementType())*CI->getSExtValue(); - } + return GEP->accumulateConstantOffset(TD, Offset); + } + + return false; +} + +/// ReadDataFromGlobal - Recursive helper to read bits out of global. C is the +/// constant being copied out of. ByteOffset is an offset into C. CurPtr is the +/// pointer to copy results into and BytesLeft is the number of bytes left in +/// the CurPtr buffer. TD is the target data. +static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, + unsigned char *CurPtr, unsigned BytesLeft, + const DataLayout &TD) { + assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) && + "Out of range access"); + + // If this element is zero or undefined, we can just return since *CurPtr is + // zero initialized. + if (isa(C) || isa(C)) + return true; + + if (ConstantInt *CI = dyn_cast(C)) { + if (CI->getBitWidth() > 64 || + (CI->getBitWidth() & 7) != 0) + return false; + + uint64_t Val = CI->getZExtValue(); + unsigned IntBytes = unsigned(CI->getBitWidth()/8); + + for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { + int n = ByteOffset; + if (!TD.isLittleEndian()) + n = IntBytes - n - 1; + CurPtr[i] = (unsigned char)(Val >> (n * 8)); + ++ByteOffset; + } + return true; + } + + if (ConstantFP *CFP = dyn_cast(C)) { + if (CFP->getType()->isDoubleTy()) { + C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD); + return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); + } + if (CFP->getType()->isFloatTy()){ + C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD); + return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); + } + if (CFP->getType()->isHalfTy()){ + C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), TD); + return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD); + } + return false; + } + + if (ConstantStruct *CS = dyn_cast(C)) { + const StructLayout *SL = TD.getStructLayout(CS->getType()); + unsigned Index = SL->getElementContainingOffset(ByteOffset); + uint64_t CurEltOffset = SL->getElementOffset(Index); + ByteOffset -= CurEltOffset; + + while (1) { + // If the element access is to the element itself and not to tail padding, + // read the bytes from the element. + uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType()); + + if (ByteOffset < EltSize && + !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, + BytesLeft, TD)) + return false; + + ++Index; + + // Check to see if we read from the last struct element, if so we're done. + if (Index == CS->getType()->getNumElements()) + return true; + + // If we read all of the bytes we needed from this element we're done. + uint64_t NextEltOffset = SL->getElementOffset(Index); + + if (BytesLeft <= NextEltOffset-CurEltOffset-ByteOffset) + return true; + + // Move to the next element of the struct. + CurPtr += NextEltOffset-CurEltOffset-ByteOffset; + BytesLeft -= NextEltOffset-CurEltOffset-ByteOffset; + ByteOffset = 0; + CurEltOffset = NextEltOffset; + } + // not reached. + } + + if (isa(C) || isa(C) || + isa(C)) { + Type *EltTy = cast(C->getType())->getElementType(); + uint64_t EltSize = TD.getTypeAllocSize(EltTy); + uint64_t Index = ByteOffset / EltSize; + uint64_t Offset = ByteOffset - Index * EltSize; + uint64_t NumElts; + if (ArrayType *AT = dyn_cast(C->getType())) + NumElts = AT->getNumElements(); + else + NumElts = cast(C->getType())->getNumElements(); + + for (; Index != NumElts; ++Index) { + if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr, + BytesLeft, TD)) + return false; + + uint64_t BytesWritten = EltSize - Offset; + assert(BytesWritten <= EltSize && "Not indexing into this element?"); + if (BytesWritten >= BytesLeft) + return true; + + Offset = 0; + BytesLeft -= BytesWritten; + CurPtr += BytesWritten; } return true; } - + + if (ConstantExpr *CE = dyn_cast(C)) { + if (CE->getOpcode() == Instruction::IntToPtr && + CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext())) + return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, + BytesLeft, TD); + } + + // Otherwise, unknown initializer type. return false; } +static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, + const DataLayout &TD) { + Type *LoadTy = cast(C->getType())->getElementType(); + IntegerType *IntType = dyn_cast(LoadTy); + + // If this isn't an integer load we can't fold it directly. + if (!IntType) { + // If this is a float/double load, we can try folding it as an int32/64 load + // and then bitcast the result. This can be useful for union cases. Note + // that address spaces don't matter here since we're not going to result in + // an actual new load. + Type *MapTy; + if (LoadTy->isHalfTy()) + MapTy = Type::getInt16PtrTy(C->getContext()); + else if (LoadTy->isFloatTy()) + MapTy = Type::getInt32PtrTy(C->getContext()); + else if (LoadTy->isDoubleTy()) + MapTy = Type::getInt64PtrTy(C->getContext()); + else if (LoadTy->isVectorTy()) { + MapTy = IntegerType::get(C->getContext(), + TD.getTypeAllocSizeInBits(LoadTy)); + MapTy = PointerType::getUnqual(MapTy); + } else + return 0; + + C = FoldBitCast(C, MapTy, TD); + if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD)) + return FoldBitCast(Res, LoadTy, TD); + return 0; + } + + unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; + if (BytesLoaded > 32 || BytesLoaded == 0) return 0; + + GlobalValue *GVal; + APInt Offset(TD.getPointerSizeInBits(), 0); + if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD)) + return 0; + + GlobalVariable *GV = dyn_cast(GVal); + if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || + !GV->getInitializer()->getType()->isSized()) + return 0; + + // If we're loading off the beginning of the global, some bytes may be valid, + // but we don't try to handle this. + if (Offset.isNegative()) return 0; + + // If we're not accessing anything in this constant, the result is undefined. + if (Offset.getZExtValue() >= + TD.getTypeAllocSize(GV->getInitializer()->getType())) + return UndefValue::get(IntType); + + unsigned char RawBytes[32] = {0}; + if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes, + BytesLoaded, TD)) + return 0; + + APInt ResultVal = APInt(IntType->getBitWidth(), 0); + if (TD.isLittleEndian()) { + ResultVal = RawBytes[BytesLoaded - 1]; + for (unsigned i = 1; i != BytesLoaded; ++i) { + ResultVal <<= 8; + ResultVal |= RawBytes[BytesLoaded-1-i]; + } + } else { + ResultVal = RawBytes[0]; + for (unsigned i = 1; i != BytesLoaded; ++i) { + ResultVal <<= 8; + ResultVal |= RawBytes[i]; + } + } + + return ConstantInt::get(IntType->getContext(), ResultVal); +} + +/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would +/// produce if it is constant and determinable. If this is not determinable, +/// return null. +Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, + const DataLayout *TD) { + // First, try the easy cases: + if (GlobalVariable *GV = dyn_cast(C)) + if (GV->isConstant() && GV->hasDefinitiveInitializer()) + return GV->getInitializer(); + + // If the loaded value isn't a constant expr, we can't handle it. + ConstantExpr *CE = dyn_cast(C); + if (!CE) return 0; + + if (CE->getOpcode() == Instruction::GetElementPtr) { + if (GlobalVariable *GV = dyn_cast(CE->getOperand(0))) + if (GV->isConstant() && GV->hasDefinitiveInitializer()) + if (Constant *V = + ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) + return V; + } + + // Instead of loading constant c string, use corresponding integer value + // directly if string length is small enough. + StringRef Str; + if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) { + unsigned StrLen = Str.size(); + Type *Ty = cast(CE->getType())->getElementType(); + unsigned NumBits = Ty->getPrimitiveSizeInBits(); + // Replace load with immediate integer if the result is an integer or fp + // value. + if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 && + (isa(Ty) || Ty->isFloatingPointTy())) { + APInt StrVal(NumBits, 0); + APInt SingleChar(NumBits, 0); + if (TD->isLittleEndian()) { + for (signed i = StrLen-1; i >= 0; i--) { + SingleChar = (uint64_t) Str[i] & UCHAR_MAX; + StrVal = (StrVal << 8) | SingleChar; + } + } else { + for (unsigned i = 0; i < StrLen; i++) { + SingleChar = (uint64_t) Str[i] & UCHAR_MAX; + StrVal = (StrVal << 8) | SingleChar; + } + // Append NULL at the end. + SingleChar = 0; + StrVal = (StrVal << 8) | SingleChar; + } + + Constant *Res = ConstantInt::get(CE->getContext(), StrVal); + if (Ty->isFloatingPointTy()) + Res = ConstantExpr::getBitCast(Res, Ty); + return Res; + } + } + + // If this load comes from anywhere in a constant global, and if the global + // is all undef or zero, we know what it loads. + if (GlobalVariable *GV = + dyn_cast(GetUnderlyingObject(CE, TD))) { + if (GV->isConstant() && GV->hasDefinitiveInitializer()) { + Type *ResTy = cast(C->getType())->getElementType(); + if (GV->getInitializer()->isNullValue()) + return Constant::getNullValue(ResTy); + if (isa(GV->getInitializer())) + return UndefValue::get(ResTy); + } + } + + // Try hard to fold loads from bitcasted strange and non-type-safe things. + if (TD) + return FoldReinterpretLoadFromConstPtr(CE, *TD); + return 0; +} + +static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){ + if (LI->isVolatile()) return 0; + + if (Constant *C = dyn_cast(LI->getOperand(0))) + return ConstantFoldLoadFromConstPtr(C, TD); + + return 0; +} /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression. -/// Attempt to symbolically evaluate the result of a binary operator merging -/// these together. If target data info is available, it is provided as TD, -/// otherwise TD is null. +/// Attempt to symbolically evaluate the result of a binary operator merging +/// these together. If target data info is available, it is provided as DL, +/// otherwise DL is null. static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, - Constant *Op1, const TargetData *TD){ + Constant *Op1, const DataLayout *DL){ // SROA - + // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute // bits. - - + + + if (Opc == Instruction::And && DL) { + unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType()->getScalarType()); + APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0); + APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0); + ComputeMaskedBits(Op0, KnownZero0, KnownOne0, DL); + ComputeMaskedBits(Op1, KnownZero1, KnownOne1, DL); + if ((KnownOne1 | KnownZero0).isAllOnesValue()) { + // All the bits of Op0 that the 'and' could be masking are already zero. + return Op0; + } + if ((KnownOne0 | KnownZero1).isAllOnesValue()) { + // All the bits of Op1 that the 'and' could be masking are already zero. + return Op1; + } + + APInt KnownZero = KnownZero0 | KnownZero1; + APInt KnownOne = KnownOne0 & KnownOne1; + if ((KnownZero | KnownOne).isAllOnesValue()) { + return ConstantInt::get(Op0->getType(), KnownOne); + } + } + // If the constant expr is something like &A[123] - &A[4].f, fold this into a // constant. This happens frequently when iterating over a global array. - if (Opc == Instruction::Sub && TD) { + if (Opc == Instruction::Sub && DL) { GlobalValue *GV1, *GV2; - int64_t Offs1, Offs2; - - if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD)) - if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) && + unsigned PtrSize = DL->getPointerSizeInBits(); + unsigned OpSize = DL->getTypeSizeInBits(Op0->getType()); + APInt Offs1(PtrSize, 0), Offs2(PtrSize, 0); + + if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *DL)) + if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *DL) && GV1 == GV2) { // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow. - return ConstantInt::get(Op0->getType(), Offs1-Offs2); + // PtrToInt may change the bitwidth so we have convert to the right size + // first. + return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - + Offs2.zextOrTrunc(OpSize)); } } - - // TODO: Fold icmp setne/seteq as well. + return 0; } +/// CastGEPIndices - If array indices are not pointer-sized integers, +/// explicitly cast them so that they aren't implicitly casted by the +/// getelementptr. +static Constant *CastGEPIndices(ArrayRef Ops, + Type *ResultTy, const DataLayout *TD, + const TargetLibraryInfo *TLI) { + if (!TD) return 0; + Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext()); + + bool Any = false; + SmallVector NewIdxs; + for (unsigned i = 1, e = Ops.size(); i != e; ++i) { + if ((i == 1 || + !isa(GetElementPtrInst::getIndexedType(Ops[0]->getType(), + Ops.slice(1, i-1)))) && + Ops[i]->getType() != IntPtrTy) { + Any = true; + NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i], + true, + IntPtrTy, + true), + Ops[i], IntPtrTy)); + } else + NewIdxs.push_back(Ops[i]); + } + if (!Any) return 0; + + Constant *C = + ConstantExpr::getGetElementPtr(Ops[0], NewIdxs); + if (ConstantExpr *CE = dyn_cast(C)) + if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI)) + C = Folded; + return C; +} + +/// Strip the pointer casts, but preserve the address space information. +static Constant* StripPtrCastKeepAS(Constant* Ptr) { + assert(Ptr->getType()->isPointerTy() && "Not a pointer type"); + PointerType *OldPtrTy = cast(Ptr->getType()); + Ptr = cast(Ptr->stripPointerCasts()); + PointerType *NewPtrTy = cast(Ptr->getType()); + + // Preserve the address space number of the pointer. + if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) { + NewPtrTy = NewPtrTy->getElementType()->getPointerTo( + OldPtrTy->getAddressSpace()); + Ptr = ConstantExpr::getBitCast(Ptr, NewPtrTy); + } + return Ptr; +} + /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP /// constant expression, do so. -static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps, - const Type *ResultTy, - const TargetData *TD) { +static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops, + Type *ResultTy, const DataLayout *TD, + const TargetLibraryInfo *TLI) { Constant *Ptr = Ops[0]; - if (!TD || !cast(Ptr->getType())->getElementType()->isSized()) + if (!TD || !cast(Ptr->getType())->getElementType()->isSized() || + !Ptr->getType()->isPointerTy()) return 0; - - uint64_t BasePtr = 0; - if (!Ptr->isNullValue()) { - // If this is a inttoptr from a constant int, we can fold this as the base, - // otherwise we can't. - if (ConstantExpr *CE = dyn_cast(Ptr)) - if (CE->getOpcode() == Instruction::IntToPtr) - if (ConstantInt *Base = dyn_cast(CE->getOperand(0))) - BasePtr = Base->getZExtValue(); - - if (BasePtr == 0) - return 0; - } + + Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext()); // If this is a constant expr gep that is effectively computing an // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' - for (unsigned i = 1; i != NumOps; ++i) - if (!isa(Ops[i])) - return false; - - uint64_t Offset = TD->getIndexedOffset(Ptr->getType(), - (Value**)Ops+1, NumOps-1); - Constant *C = ConstantInt::get(TD->getIntPtrType(), Offset+BasePtr); - return ConstantExpr::getIntToPtr(C, ResultTy); -} + for (unsigned i = 1, e = Ops.size(); i != e; ++i) + if (!isa(Ops[i])) { -/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with -/// targetdata. Return 0 if unfoldable. -static Constant *FoldBitCast(Constant *C, const Type *DestTy, - const TargetData &TD) { - // If this is a bitcast from constant vector -> vector, fold it. - if (ConstantVector *CV = dyn_cast(C)) { - if (const VectorType *DestVTy = dyn_cast(DestTy)) { - // If the element types match, VMCore can fold it. - unsigned NumDstElt = DestVTy->getNumElements(); - unsigned NumSrcElt = CV->getNumOperands(); - if (NumDstElt == NumSrcElt) - return 0; - - const Type *SrcEltTy = CV->getType()->getElementType(); - const Type *DstEltTy = DestVTy->getElementType(); - - // Otherwise, we're changing the number of elements in a vector, which - // requires endianness information to do the right thing. For example, - // bitcast (<2 x i64> to <4 x i32>) - // folds to (little endian): - // <4 x i32> - // and to (big endian): - // <4 x i32> - - // First thing is first. We only want to think about integer here, so if - // we have something in FP form, recast it as integer. - if (DstEltTy->isFloatingPoint()) { - // Fold to an vector of integers with same size as our FP type. - unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits(); - const Type *DestIVTy = VectorType::get(IntegerType::get(FPWidth), - NumDstElt); - // Recursively handle this integer conversion, if possible. - C = FoldBitCast(C, DestIVTy, TD); - if (!C) return 0; - - // Finally, VMCore can handle this now that #elts line up. - return ConstantExpr::getBitCast(C, DestTy); - } - - // Okay, we know the destination is integer, if the input is FP, convert - // it to integer first. - if (SrcEltTy->isFloatingPoint()) { - unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits(); - const Type *SrcIVTy = VectorType::get(IntegerType::get(FPWidth), - NumSrcElt); - // Ask VMCore to do the conversion now that #elts line up. - C = ConstantExpr::getBitCast(C, SrcIVTy); - CV = dyn_cast(C); - if (!CV) return 0; // If VMCore wasn't able to fold it, bail out. - } - - // Now we know that the input and output vectors are both integer vectors - // of the same size, and that their #elements is not the same. Do the - // conversion here, which depends on whether the input or output has - // more elements. - bool isLittleEndian = TD.isLittleEndian(); - - SmallVector Result; - if (NumDstElt < NumSrcElt) { - // Handle: bitcast (<4 x i32> to <2 x i64>) - Constant *Zero = Constant::getNullValue(DstEltTy); - unsigned Ratio = NumSrcElt/NumDstElt; - unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits(); - unsigned SrcElt = 0; - for (unsigned i = 0; i != NumDstElt; ++i) { - // Build each element of the result. - Constant *Elt = Zero; - unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); - for (unsigned j = 0; j != Ratio; ++j) { - Constant *Src = dyn_cast(CV->getOperand(SrcElt++)); - if (!Src) return 0; // Reject constantexpr elements. - - // Zero extend the element to the right size. - Src = ConstantExpr::getZExt(Src, Elt->getType()); - - // Shift it to the right place, depending on endianness. - Src = ConstantExpr::getShl(Src, - ConstantInt::get(Src->getType(), ShiftAmt)); - ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; - - // Mix it in. - Elt = ConstantExpr::getOr(Elt, Src); - } - Result.push_back(Elt); - } - } else { - // Handle: bitcast (<2 x i64> to <4 x i32>) - unsigned Ratio = NumDstElt/NumSrcElt; - unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); - - // Loop over each source value, expanding into multiple results. - for (unsigned i = 0; i != NumSrcElt; ++i) { - Constant *Src = dyn_cast(CV->getOperand(i)); - if (!Src) return 0; // Reject constantexpr elements. - - unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); - for (unsigned j = 0; j != Ratio; ++j) { - // Shift the piece of the value into the right place, depending on - // endianness. - Constant *Elt = ConstantExpr::getLShr(Src, - ConstantInt::get(Src->getType(), ShiftAmt)); - ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; - - // Truncate and remember this piece. - Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); - } + // If this is "gep i8* Ptr, (sub 0, V)", fold this as: + // "inttoptr (sub (ptrtoint Ptr), V)" + if (Ops.size() == 2 && + cast(ResultTy)->getElementType()->isIntegerTy(8)) { + ConstantExpr *CE = dyn_cast(Ops[1]); + assert((CE == 0 || CE->getType() == IntPtrTy) && + "CastGEPIndices didn't canonicalize index types!"); + if (CE && CE->getOpcode() == Instruction::Sub && + CE->getOperand(0)->isNullValue()) { + Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType()); + Res = ConstantExpr::getSub(Res, CE->getOperand(1)); + Res = ConstantExpr::getIntToPtr(Res, ResultTy); + if (ConstantExpr *ResCE = dyn_cast(Res)) + Res = ConstantFoldConstantExpression(ResCE, TD, TLI); + return Res; } } - - return ConstantVector::get(&Result[0], Result.size()); + return 0; } + + unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy); + APInt Offset = + APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(), + makeArrayRef((Value *const*) + Ops.data() + 1, + Ops.size() - 1))); + Ptr = StripPtrCastKeepAS(Ptr); + + // If this is a GEP of a GEP, fold it all into a single GEP. + while (GEPOperator *GEP = dyn_cast(Ptr)) { + SmallVector NestedOps(GEP->op_begin()+1, GEP->op_end()); + + // Do not try the incorporate the sub-GEP if some index is not a number. + bool AllConstantInt = true; + for (unsigned i = 0, e = NestedOps.size(); i != e; ++i) + if (!isa(NestedOps[i])) { + AllConstantInt = false; + break; + } + if (!AllConstantInt) + break; + + Ptr = cast(GEP->getOperand(0)); + Offset += APInt(BitWidth, + TD->getIndexedOffset(Ptr->getType(), NestedOps)); + Ptr = StripPtrCastKeepAS(Ptr); } - - return 0; + + // If the base value for this address is a literal integer value, fold the + // getelementptr to the resulting integer value casted to the pointer type. + APInt BasePtr(BitWidth, 0); + if (ConstantExpr *CE = dyn_cast(Ptr)) + if (CE->getOpcode() == Instruction::IntToPtr) + if (ConstantInt *Base = dyn_cast(CE->getOperand(0))) + BasePtr = Base->getValue().zextOrTrunc(BitWidth); + if (Ptr->isNullValue() || BasePtr != 0) { + Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr); + return ConstantExpr::getIntToPtr(C, ResultTy); + } + + // Otherwise form a regular getelementptr. Recompute the indices so that + // we eliminate over-indexing of the notional static type array bounds. + // This makes it easy to determine if the getelementptr is "inbounds". + // Also, this helps GlobalOpt do SROA on GlobalVariables. + Type *Ty = Ptr->getType(); + assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type"); + SmallVector NewIdxs; + do { + if (SequentialType *ATy = dyn_cast(Ty)) { + if (ATy->isPointerTy()) { + // The only pointer indexing we'll do is on the first index of the GEP. + if (!NewIdxs.empty()) + break; + + // Only handle pointers to sized types, not pointers to functions. + if (!ATy->getElementType()->isSized()) + return 0; + } + + // Determine which element of the array the offset points into. + APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType())); + IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext()); + if (ElemSize == 0) + // The element size is 0. This may be [0 x Ty]*, so just use a zero + // index for this level and proceed to the next level to see if it can + // accommodate the offset. + NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0)); + else { + // The element size is non-zero divide the offset by the element + // size (rounding down), to compute the index at this level. + APInt NewIdx = Offset.udiv(ElemSize); + Offset -= NewIdx * ElemSize; + NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx)); + } + Ty = ATy->getElementType(); + } else if (StructType *STy = dyn_cast(Ty)) { + // If we end up with an offset that isn't valid for this struct type, we + // can't re-form this GEP in a regular form, so bail out. The pointer + // operand likely went through casts that are necessary to make the GEP + // sensible. + const StructLayout &SL = *TD->getStructLayout(STy); + if (Offset.uge(SL.getSizeInBytes())) + break; + + // Determine which field of the struct the offset points into. The + // getZExtValue is fine as we've already ensured that the offset is + // within the range representable by the StructLayout API. + unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue()); + NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), + ElIdx)); + Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx)); + Ty = STy->getTypeAtIndex(ElIdx); + } else { + // We've reached some non-indexable type. + break; + } + } while (Ty != cast(ResultTy)->getElementType()); + + // If we haven't used up the entire offset by descending the static + // type, then the offset is pointing into the middle of an indivisible + // member, so we can't simplify it. + if (Offset != 0) + return 0; + + // Create a GEP. + Constant *C = + ConstantExpr::getGetElementPtr(Ptr, NewIdxs); + assert(cast(C->getType())->getElementType() == Ty && + "Computed GetElementPtr has unexpected type!"); + + // If we ended up indexing a member with a type that doesn't match + // the type of what the original indices indexed, add a cast. + if (Ty != cast(ResultTy)->getElementType()) + C = FoldBitCast(C, ResultTy, *TD); + + return C; } + //===----------------------------------------------------------------------===// // Constant Folding public APIs //===----------------------------------------------------------------------===// - -/// ConstantFoldInstruction - Attempt to constant fold the specified -/// instruction. If successful, the constant result is returned, if not, null -/// is returned. Note that this function can only fail when attempting to fold -/// instructions like loads and stores, which have no constant expression form. -/// -Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) { +/// ConstantFoldInstruction - Try to constant fold the specified instruction. +/// If successful, the constant result is returned, if not, null is returned. +/// Note that this fails if not all of the operands are constant. Otherwise, +/// this function can only fail when attempting to fold instructions like loads +/// and stores, which have no constant expression form. +Constant *llvm::ConstantFoldInstruction(Instruction *I, + const DataLayout *TD, + const TargetLibraryInfo *TLI) { + // Handle PHI nodes quickly here... if (PHINode *PN = dyn_cast(I)) { - if (PN->getNumIncomingValues() == 0) - return Constant::getNullValue(PN->getType()); + Constant *CommonValue = 0; - Constant *Result = dyn_cast(PN->getIncomingValue(0)); - if (Result == 0) return 0; + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + Value *Incoming = PN->getIncomingValue(i); + // If the incoming value is undef then skip it. Note that while we could + // skip the value if it is equal to the phi node itself we choose not to + // because that would break the rule that constant folding only applies if + // all operands are constants. + if (isa(Incoming)) + continue; + // If the incoming value is not a constant, then give up. + Constant *C = dyn_cast(Incoming); + if (!C) + return 0; + // Fold the PHI's operands. + if (ConstantExpr *NewC = dyn_cast(C)) + C = ConstantFoldConstantExpression(NewC, TD, TLI); + // If the incoming value is a different constant to + // the one we saw previously, then give up. + if (CommonValue && C != CommonValue) + return 0; + CommonValue = C; + } - // Handle PHI nodes specially here... - for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) - if (PN->getIncomingValue(i) != Result && PN->getIncomingValue(i) != PN) - return 0; // Not all the same incoming constants... - // If we reach here, all incoming values are the same constant. - return Result; + // If we reach here, all incoming values are the same constant or undef. + return CommonValue ? CommonValue : UndefValue::get(PN->getType()); } // Scan the operand list, checking to see if they are all constants, if so, // hand off to ConstantFoldInstOperands. SmallVector Ops; - for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) - if (Constant *Op = dyn_cast(I->getOperand(i))) - Ops.push_back(Op); - else + for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) { + Constant *Op = dyn_cast(*i); + if (!Op) return 0; // All operands not constant! + // Fold the Instruction's operands. + if (ConstantExpr *NewCE = dyn_cast(Op)) + Op = ConstantFoldConstantExpression(NewCE, TD, TLI); + + Ops.push_back(Op); + } + if (const CmpInst *CI = dyn_cast(I)) - return ConstantFoldCompareInstOperands(CI->getPredicate(), - &Ops[0], Ops.size(), TD); - else - return ConstantFoldInstOperands(I->getOpcode(), I->getType(), - &Ops[0], Ops.size(), TD); + return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], + TD, TLI); + + if (const LoadInst *LI = dyn_cast(I)) + return ConstantFoldLoadInst(LI, TD); + + if (InsertValueInst *IVI = dyn_cast(I)) + return ConstantExpr::getInsertValue( + cast(IVI->getAggregateOperand()), + cast(IVI->getInsertedValueOperand()), + IVI->getIndices()); + + if (ExtractValueInst *EVI = dyn_cast(I)) + return ConstantExpr::getExtractValue( + cast(EVI->getAggregateOperand()), + EVI->getIndices()); + + return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI); +} + +static Constant * +ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD, + const TargetLibraryInfo *TLI, + SmallPtrSet &FoldedOps) { + SmallVector Ops; + for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; + ++i) { + Constant *NewC = cast(*i); + // Recursively fold the ConstantExpr's operands. If we have already folded + // a ConstantExpr, we don't have to process it again. + if (ConstantExpr *NewCE = dyn_cast(NewC)) { + if (FoldedOps.insert(NewCE)) + NewC = ConstantFoldConstantExpressionImpl(NewCE, TD, TLI, FoldedOps); + } + Ops.push_back(NewC); + } + + if (CE->isCompare()) + return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1], + TD, TLI); + return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI); +} + +/// ConstantFoldConstantExpression - Attempt to fold the constant expression +/// using the specified DataLayout. If successful, the constant result is +/// result is returned, if not, null is returned. +Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, + const DataLayout *TD, + const TargetLibraryInfo *TLI) { + SmallPtrSet FoldedOps; + return ConstantFoldConstantExpressionImpl(CE, TD, TLI, FoldedOps); } /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the @@ -317,45 +920,61 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) { /// attempting to fold instructions like loads and stores, which have no /// constant expression form. /// -Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, - Constant* const* Ops, unsigned NumOps, - const TargetData *TD) { +/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc +/// information, due to only being passed an opcode and operands. Constant +/// folding using this function strips this information. +/// +Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, + ArrayRef Ops, + const DataLayout *TD, + const TargetLibraryInfo *TLI) { // Handle easy binops first. if (Instruction::isBinaryOp(Opcode)) { if (isa(Ops[0]) || isa(Ops[1])) if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD)) return C; - + return ConstantExpr::get(Opcode, Ops[0], Ops[1]); } - + switch (Opcode) { default: return 0; + case Instruction::ICmp: + case Instruction::FCmp: llvm_unreachable("Invalid for compares"); case Instruction::Call: - if (Function *F = dyn_cast(Ops[0])) + if (Function *F = dyn_cast(Ops.back())) if (canConstantFoldCallTo(F)) - return ConstantFoldCall(F, Ops+1, NumOps-1); + return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI); return 0; - case Instruction::ICmp: - case Instruction::FCmp: - assert(0 &&"This function is invalid for compares: no predicate specified"); case Instruction::PtrToInt: // If the input is a inttoptr, eliminate the pair. This requires knowing // the width of a pointer, so it can't be done in ConstantExpr::getCast. if (ConstantExpr *CE = dyn_cast(Ops[0])) { if (TD && CE->getOpcode() == Instruction::IntToPtr) { Constant *Input = CE->getOperand(0); - unsigned InWidth = Input->getType()->getPrimitiveSizeInBits(); - Constant *Mask = - ConstantInt::get(APInt::getLowBitsSet(InWidth, - TD->getPointerSizeInBits())); - Input = ConstantExpr::getAnd(Input, Mask); + unsigned InWidth = Input->getType()->getScalarSizeInBits(); + if (TD->getPointerSizeInBits() < InWidth) { + Constant *Mask = + ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth, + TD->getPointerSizeInBits())); + Input = ConstantExpr::getAnd(Input, Mask); + } // Do a zext or trunc to get to the dest size. return ConstantExpr::getIntegerCast(Input, DestTy, false); } } return ConstantExpr::getCast(Opcode, Ops[0], DestTy); case Instruction::IntToPtr: + // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if + // the int size is >= the ptr size. This requires knowing the width of a + // pointer, so it can't be done in ConstantExpr::getCast. + if (ConstantExpr *CE = dyn_cast(Ops[0])) + if (TD && + TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() && + CE->getOpcode() == Instruction::PtrToInt) + return FoldBitCast(CE->getOperand(0), DestTy, *TD); + + return ConstantExpr::getCast(Opcode, Ops[0], DestTy); case Instruction::Trunc: case Instruction::ZExt: case Instruction::SExt: @@ -368,8 +987,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, return ConstantExpr::getCast(Opcode, Ops[0], DestTy); case Instruction::BitCast: if (TD) - if (Constant *C = FoldBitCast(Ops[0], DestTy, *TD)) - return C; + return FoldBitCast(Ops[0], DestTy, *TD); return ConstantExpr::getBitCast(Ops[0], DestTy); case Instruction::Select: return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]); @@ -380,10 +998,12 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, case Instruction::ShuffleVector: return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]); case Instruction::GetElementPtr: - if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, TD)) + if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI)) + return C; + if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI)) return C; - - return ConstantExpr::getGetElementPtr(Ops[0], Ops+1, NumOps-1); + + return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1)); } } @@ -392,116 +1012,112 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy, /// returns a constant expression of the specified operands. /// Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, - Constant*const * Ops, - unsigned NumOps, - const TargetData *TD) { + Constant *Ops0, Constant *Ops1, + const DataLayout *TD, + const TargetLibraryInfo *TLI) { // fold: icmp (inttoptr x), null -> icmp x, 0 // fold: icmp (ptrtoint x), 0 -> icmp x, null - // fold: icmp (inttoptr x), (inttoptr y) -> icmp x, y + // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y // // ConstantExpr::getCompare cannot do this, because it doesn't have TD // around to know if bit truncation is happening. - if (ConstantExpr *CE0 = dyn_cast(Ops[0])) { - if (TD && Ops[1]->isNullValue()) { - const Type *IntPtrTy = TD->getIntPtrType(); + if (ConstantExpr *CE0 = dyn_cast(Ops0)) { + if (TD && Ops1->isNullValue()) { + Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); if (CE0->getOpcode() == Instruction::IntToPtr) { // Convert the integer value to the right size to ensure we get the // proper extension or truncation. Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0), IntPtrTy, false); - Constant *NewOps[] = { C, Constant::getNullValue(C->getType()) }; - return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD); + Constant *Null = Constant::getNullValue(C->getType()); + return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); } - + // Only do this transformation if the int is intptrty in size, otherwise // there is a truncation or extension that we aren't modeling. - if (CE0->getOpcode() == Instruction::PtrToInt && + if (CE0->getOpcode() == Instruction::PtrToInt && CE0->getType() == IntPtrTy) { Constant *C = CE0->getOperand(0); - Constant *NewOps[] = { C, Constant::getNullValue(C->getType()) }; - // FIXME! - return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD); + Constant *Null = Constant::getNullValue(C->getType()); + return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); } } - - if (TD && isa(Ops[1]) && - cast(Ops[1])->getOpcode() == CE0->getOpcode()) { - const Type *IntPtrTy = TD->getIntPtrType(); - // Only do this transformation if the int is intptrty in size, otherwise - // there is a truncation or extension that we aren't modeling. - if ((CE0->getOpcode() == Instruction::IntToPtr && - CE0->getOperand(0)->getType() == IntPtrTy && - Ops[1]->getOperand(0)->getType() == IntPtrTy) || - (CE0->getOpcode() == Instruction::PtrToInt && - CE0->getType() == IntPtrTy && - CE0->getOperand(0)->getType() == Ops[1]->getOperand(0)->getType())) { - Constant *NewOps[] = { - CE0->getOperand(0), cast(Ops[1])->getOperand(0) - }; - return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD); + + if (ConstantExpr *CE1 = dyn_cast(Ops1)) { + if (TD && CE0->getOpcode() == CE1->getOpcode()) { + Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); + + if (CE0->getOpcode() == Instruction::IntToPtr) { + // Convert the integer value to the right size to ensure we get the + // proper extension or truncation. + Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0), + IntPtrTy, false); + Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0), + IntPtrTy, false); + return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI); + } + + // Only do this transformation if the int is intptrty in size, otherwise + // there is a truncation or extension that we aren't modeling. + if ((CE0->getOpcode() == Instruction::PtrToInt && + CE0->getType() == IntPtrTy && + CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())) + return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), + CE1->getOperand(0), TD, TLI); } } + + // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) + // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) + if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && + CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { + Constant *LHS = + ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1, + TD, TLI); + Constant *RHS = + ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1, + TD, TLI); + unsigned OpC = + Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; + Constant *Ops[] = { LHS, RHS }; + return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI); + } } - return ConstantExpr::getCompare(Predicate, Ops[0], Ops[1]); + + return ConstantExpr::getCompare(Predicate, Ops0, Ops1); } /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a /// getelementptr constantexpr, return the constant value being addressed by the /// constant expression, or null if something is funny and we can't decide. -Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, +Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE) { - if (CE->getOperand(1) != Constant::getNullValue(CE->getOperand(1)->getType())) + if (!CE->getOperand(1)->isNullValue()) return 0; // Do not allow stepping over the value! - + // Loop over all of the operands, tracking down which value we are - // addressing... - gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE); - for (++I; I != E; ++I) - if (const StructType *STy = dyn_cast(*I)) { - ConstantInt *CU = cast(I.getOperand()); - assert(CU->getZExtValue() < STy->getNumElements() && - "Struct index out of range!"); - unsigned El = (unsigned)CU->getZExtValue(); - if (ConstantStruct *CS = dyn_cast(C)) { - C = CS->getOperand(El); - } else if (isa(C)) { - C = Constant::getNullValue(STy->getElementType(El)); - } else if (isa(C)) { - C = UndefValue::get(STy->getElementType(El)); - } else { - return 0; - } - } else if (ConstantInt *CI = dyn_cast(I.getOperand())) { - if (const ArrayType *ATy = dyn_cast(*I)) { - if (CI->getZExtValue() >= ATy->getNumElements()) - return 0; - if (ConstantArray *CA = dyn_cast(C)) - C = CA->getOperand(CI->getZExtValue()); - else if (isa(C)) - C = Constant::getNullValue(ATy->getElementType()); - else if (isa(C)) - C = UndefValue::get(ATy->getElementType()); - else - return 0; - } else if (const VectorType *PTy = dyn_cast(*I)) { - if (CI->getZExtValue() >= PTy->getNumElements()) - return 0; - if (ConstantVector *CP = dyn_cast(C)) - C = CP->getOperand(CI->getZExtValue()); - else if (isa(C)) - C = Constant::getNullValue(PTy->getElementType()); - else if (isa(C)) - C = UndefValue::get(PTy->getElementType()); - else - return 0; - } else { - return 0; - } - } else { - return 0; - } + // addressing. + for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) { + C = C->getAggregateElement(CE->getOperand(i)); + if (C == 0) return 0; + } + return C; +} + +/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr +/// indices (with an *implied* zero pointer index that is not in the list), +/// return the constant value being addressed by a virtual load, or null if +/// something is funny and we can't decide. +Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C, + ArrayRef Indices) { + // Loop over all of the operands, tracking down which value we are + // addressing. + for (unsigned i = 0, e = Indices.size(); i != e; ++i) { + C = C->getAggregateElement(Indices[i]); + if (C == 0) return 0; + } return C; } @@ -515,170 +1131,258 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, bool llvm::canConstantFoldCallTo(const Function *F) { switch (F->getIntrinsicID()) { + case Intrinsic::fabs: + case Intrinsic::log: + case Intrinsic::log2: + case Intrinsic::log10: + case Intrinsic::exp: + case Intrinsic::exp2: + case Intrinsic::floor: case Intrinsic::sqrt: + case Intrinsic::pow: case Intrinsic::powi: case Intrinsic::bswap: case Intrinsic::ctpop: case Intrinsic::ctlz: case Intrinsic::cttz: + case Intrinsic::sadd_with_overflow: + case Intrinsic::uadd_with_overflow: + case Intrinsic::ssub_with_overflow: + case Intrinsic::usub_with_overflow: + case Intrinsic::smul_with_overflow: + case Intrinsic::umul_with_overflow: + case Intrinsic::convert_from_fp16: + case Intrinsic::convert_to_fp16: + case Intrinsic::x86_sse_cvtss2si: + case Intrinsic::x86_sse_cvtss2si64: + case Intrinsic::x86_sse_cvttss2si: + case Intrinsic::x86_sse_cvttss2si64: + case Intrinsic::x86_sse2_cvtsd2si: + case Intrinsic::x86_sse2_cvtsd2si64: + case Intrinsic::x86_sse2_cvttsd2si: + case Intrinsic::x86_sse2_cvttsd2si64: return true; - default: break; + default: + return false; + case 0: break; } - const ValueName *NameVal = F->getValueName(); - if (NameVal == 0) return false; - const char *Str = NameVal->getKeyData(); - unsigned Len = NameVal->getKeyLength(); - + if (!F->hasName()) return false; + StringRef Name = F->getName(); + // In these cases, the check of the length is required. We don't want to // return true for a name like "cos\0blah" which strcmp would return equal to // "cos", but has length 8. - switch (Str[0]) { + switch (Name[0]) { default: return false; case 'a': - if (Len == 4) - return !strcmp(Str, "acos") || !strcmp(Str, "asin") || - !strcmp(Str, "atan"); - else if (Len == 5) - return !strcmp(Str, "atan2"); - return false; + return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2"; case 'c': - if (Len == 3) - return !strcmp(Str, "cos"); - else if (Len == 4) - return !strcmp(Str, "ceil") || !strcmp(Str, "cosf") || - !strcmp(Str, "cosh"); - return false; + return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh"; case 'e': - if (Len == 3) - return !strcmp(Str, "exp"); - return false; + return Name == "exp" || Name == "exp2"; case 'f': - if (Len == 4) - return !strcmp(Str, "fabs") || !strcmp(Str, "fmod"); - else if (Len == 5) - return !strcmp(Str, "floor"); - return false; - break; + return Name == "fabs" || Name == "fmod" || Name == "floor"; case 'l': - if (Len == 3 && !strcmp(Str, "log")) - return true; - if (Len == 5 && !strcmp(Str, "log10")) - return true; - return false; + return Name == "log" || Name == "log10"; case 'p': - if (Len == 3 && !strcmp(Str, "pow")) - return true; - return false; + return Name == "pow"; case 's': - if (Len == 3) - return !strcmp(Str, "sin"); - if (Len == 4) - return !strcmp(Str, "sinh") || !strcmp(Str, "sqrt") || - !strcmp(Str, "sinf"); - if (Len == 5) - return !strcmp(Str, "sqrtf"); - return false; + return Name == "sin" || Name == "sinh" || Name == "sqrt" || + Name == "sinf" || Name == "sqrtf"; case 't': - if (Len == 3 && !strcmp(Str, "tan")) - return true; - else if (Len == 4 && !strcmp(Str, "tanh")) - return true; - return false; + return Name == "tan" || Name == "tanh"; } } -static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, - const Type *Ty) { - errno = 0; +static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, + Type *Ty) { + sys::llvm_fenv_clearexcept(); V = NativeFP(V); - if (errno != 0) { - errno = 0; + if (sys::llvm_fenv_testexcept()) { + sys::llvm_fenv_clearexcept(); return 0; } - - if (Ty == Type::FloatTy) - return ConstantFP::get(APFloat((float)V)); - if (Ty == Type::DoubleTy) - return ConstantFP::get(APFloat(V)); - assert(0 && "Can only constant fold float/double"); + + if (Ty->isHalfTy()) { + APFloat APF(V); + bool unused; + APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused); + return ConstantFP::get(Ty->getContext(), APF); + } + if (Ty->isFloatTy()) + return ConstantFP::get(Ty->getContext(), APFloat((float)V)); + if (Ty->isDoubleTy()) + return ConstantFP::get(Ty->getContext(), APFloat(V)); + llvm_unreachable("Can only constant fold half/float/double"); } static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), - double V, double W, - const Type *Ty) { - errno = 0; + double V, double W, Type *Ty) { + sys::llvm_fenv_clearexcept(); V = NativeFP(V, W); - if (errno != 0) { - errno = 0; + if (sys::llvm_fenv_testexcept()) { + sys::llvm_fenv_clearexcept(); return 0; } - - if (Ty == Type::FloatTy) - return ConstantFP::get(APFloat((float)V)); - if (Ty == Type::DoubleTy) - return ConstantFP::get(APFloat(V)); - assert(0 && "Can only constant fold float/double"); + + if (Ty->isHalfTy()) { + APFloat APF(V); + bool unused; + APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused); + return ConstantFP::get(Ty->getContext(), APF); + } + if (Ty->isFloatTy()) + return ConstantFP::get(Ty->getContext(), APFloat((float)V)); + if (Ty->isDoubleTy()) + return ConstantFP::get(Ty->getContext(), APFloat(V)); + llvm_unreachable("Can only constant fold half/float/double"); +} + +/// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer +/// conversion of a constant floating point. If roundTowardZero is false, the +/// default IEEE rounding is used (toward nearest, ties to even). This matches +/// the behavior of the non-truncating SSE instructions in the default rounding +/// mode. The desired integer type Ty is used to select how many bits are +/// available for the result. Returns null if the conversion cannot be +/// performed, otherwise returns the Constant value resulting from the +/// conversion. +static Constant *ConstantFoldConvertToInt(const APFloat &Val, + bool roundTowardZero, Type *Ty) { + // All of these conversion intrinsics form an integer of at most 64bits. + unsigned ResultWidth = cast(Ty)->getBitWidth(); + assert(ResultWidth <= 64 && + "Can only constant fold conversions to 64 and 32 bit ints"); + + uint64_t UIntVal; + bool isExact = false; + APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero + : APFloat::rmNearestTiesToEven; + APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth, + /*isSigned=*/true, mode, + &isExact); + if (status != APFloat::opOK && status != APFloat::opInexact) + return 0; + return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true); } /// ConstantFoldCall - Attempt to constant fold a call to the specified function /// with the specified arguments, returning null if unsuccessful. - Constant * -llvm::ConstantFoldCall(Function *F, - Constant* const* Operands, unsigned NumOperands) { - const ValueName *NameVal = F->getValueName(); - if (NameVal == 0) return 0; - const char *Str = NameVal->getKeyData(); - unsigned Len = NameVal->getKeyLength(); - - const Type *Ty = F->getReturnType(); - if (NumOperands == 1) { +llvm::ConstantFoldCall(Function *F, ArrayRef Operands, + const TargetLibraryInfo *TLI) { + if (!F->hasName()) return 0; + StringRef Name = F->getName(); + + Type *Ty = F->getReturnType(); + if (Operands.size() == 1) { if (ConstantFP *Op = dyn_cast(Operands[0])) { - if (Ty!=Type::FloatTy && Ty!=Type::DoubleTy) + if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) { + APFloat Val(Op->getValueAPF()); + + bool lost = false; + Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost); + + return ConstantInt::get(F->getContext(), Val.bitcastToAPInt()); + } + if (!TLI) + return 0; + + if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) + return 0; + + /// We only fold functions with finite arguments. Folding NaN and inf is + /// likely to be aborted with an exception anyway, and some host libms + /// have known errors raising exceptions. + if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity()) return 0; + /// Currently APFloat versions of these functions do not exist, so we use /// the host native double versions. Float versions are not called /// directly but for all these it is true (float)(f((double)arg)) == /// f(arg). Long double not supported yet. - double V = Ty==Type::FloatTy ? (double)Op->getValueAPF().convertToFloat(): - Op->getValueAPF().convertToDouble(); - switch (Str[0]) { + double V; + if (Ty->isFloatTy()) + V = Op->getValueAPF().convertToFloat(); + else if (Ty->isDoubleTy()) + V = Op->getValueAPF().convertToDouble(); + else { + bool unused; + APFloat APF = Op->getValueAPF(); + APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); + V = APF.convertToDouble(); + } + + switch (F->getIntrinsicID()) { + default: break; + case Intrinsic::fabs: + return ConstantFoldFP(fabs, V, Ty); +#if HAVE_LOG2 + case Intrinsic::log2: + return ConstantFoldFP(log2, V, Ty); +#endif +#if HAVE_LOG + case Intrinsic::log: + return ConstantFoldFP(log, V, Ty); +#endif +#if HAVE_LOG10 + case Intrinsic::log10: + return ConstantFoldFP(log10, V, Ty); +#endif +#if HAVE_EXP + case Intrinsic::exp: + return ConstantFoldFP(exp, V, Ty); +#endif +#if HAVE_EXP2 + case Intrinsic::exp2: + return ConstantFoldFP(exp2, V, Ty); +#endif + case Intrinsic::floor: + return ConstantFoldFP(floor, V, Ty); + } + + switch (Name[0]) { case 'a': - if (Len == 4 && !strcmp(Str, "acos")) + if (Name == "acos" && TLI->has(LibFunc::acos)) return ConstantFoldFP(acos, V, Ty); - else if (Len == 4 && !strcmp(Str, "asin")) + else if (Name == "asin" && TLI->has(LibFunc::asin)) return ConstantFoldFP(asin, V, Ty); - else if (Len == 4 && !strcmp(Str, "atan")) + else if (Name == "atan" && TLI->has(LibFunc::atan)) return ConstantFoldFP(atan, V, Ty); break; case 'c': - if (Len == 4 && !strcmp(Str, "ceil")) + if (Name == "ceil" && TLI->has(LibFunc::ceil)) return ConstantFoldFP(ceil, V, Ty); - else if (Len == 3 && !strcmp(Str, "cos")) + else if (Name == "cos" && TLI->has(LibFunc::cos)) return ConstantFoldFP(cos, V, Ty); - else if (Len == 4 && !strcmp(Str, "cosh")) + else if (Name == "cosh" && TLI->has(LibFunc::cosh)) return ConstantFoldFP(cosh, V, Ty); - else if (Len == 4 && !strcmp(Str, "cosf")) + else if (Name == "cosf" && TLI->has(LibFunc::cosf)) return ConstantFoldFP(cos, V, Ty); break; case 'e': - if (Len == 3 && !strcmp(Str, "exp")) + if (Name == "exp" && TLI->has(LibFunc::exp)) return ConstantFoldFP(exp, V, Ty); + + if (Name == "exp2" && TLI->has(LibFunc::exp2)) { + // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a + // C99 library. + return ConstantFoldBinaryFP(pow, 2.0, V, Ty); + } break; case 'f': - if (Len == 4 && !strcmp(Str, "fabs")) + if (Name == "fabs" && TLI->has(LibFunc::fabs)) return ConstantFoldFP(fabs, V, Ty); - else if (Len == 5 && !strcmp(Str, "floor")) + else if (Name == "floor" && TLI->has(LibFunc::floor)) return ConstantFoldFP(floor, V, Ty); break; case 'l': - if (Len == 3 && !strcmp(Str, "log") && V > 0) + if (Name == "log" && V > 0 && TLI->has(LibFunc::log)) return ConstantFoldFP(log, V, Ty); - else if (Len == 5 && !strcmp(Str, "log10") && V > 0) + else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10)) return ConstantFoldFP(log10, V, Ty); - else if (!strcmp(Str, "llvm.sqrt.f32") || - !strcmp(Str, "llvm.sqrt.f64")) { + else if (F->getIntrinsicID() == Intrinsic::sqrt && + (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) { if (V >= -0.0) return ConstantFoldFP(sqrt, V, Ty); else // Undefined @@ -686,66 +1390,201 @@ llvm::ConstantFoldCall(Function *F, } break; case 's': - if (Len == 3 && !strcmp(Str, "sin")) + if (Name == "sin" && TLI->has(LibFunc::sin)) return ConstantFoldFP(sin, V, Ty); - else if (Len == 4 && !strcmp(Str, "sinh")) + else if (Name == "sinh" && TLI->has(LibFunc::sinh)) return ConstantFoldFP(sinh, V, Ty); - else if (Len == 4 && !strcmp(Str, "sqrt") && V >= 0) + else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt)) return ConstantFoldFP(sqrt, V, Ty); - else if (Len == 5 && !strcmp(Str, "sqrtf") && V >= 0) + else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf)) return ConstantFoldFP(sqrt, V, Ty); - else if (Len == 4 && !strcmp(Str, "sinf")) + else if (Name == "sinf" && TLI->has(LibFunc::sinf)) return ConstantFoldFP(sin, V, Ty); break; case 't': - if (Len == 3 && !strcmp(Str, "tan")) + if (Name == "tan" && TLI->has(LibFunc::tan)) return ConstantFoldFP(tan, V, Ty); - else if (Len == 4 && !strcmp(Str, "tanh")) + else if (Name == "tanh" && TLI->has(LibFunc::tanh)) return ConstantFoldFP(tanh, V, Ty); break; default: break; } - } else if (ConstantInt *Op = dyn_cast(Operands[0])) { - if (Len > 11 && !memcmp(Str, "llvm.bswap", 10)) - return ConstantInt::get(Op->getValue().byteSwap()); - else if (Len > 11 && !memcmp(Str, "llvm.ctpop", 10)) + return 0; + } + + if (ConstantInt *Op = dyn_cast(Operands[0])) { + switch (F->getIntrinsicID()) { + case Intrinsic::bswap: + return ConstantInt::get(F->getContext(), Op->getValue().byteSwap()); + case Intrinsic::ctpop: return ConstantInt::get(Ty, Op->getValue().countPopulation()); - else if (Len > 10 && !memcmp(Str, "llvm.cttz", 9)) - return ConstantInt::get(Ty, Op->getValue().countTrailingZeros()); - else if (Len > 10 && !memcmp(Str, "llvm.ctlz", 9)) - return ConstantInt::get(Ty, Op->getValue().countLeadingZeros()); + case Intrinsic::convert_from_fp16: { + APFloat Val(APFloat::IEEEhalf, Op->getValue()); + + bool lost = false; + APFloat::opStatus status = + Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost); + + // Conversion is always precise. + (void)status; + assert(status == APFloat::opOK && !lost && + "Precision lost during fp16 constfolding"); + + return ConstantFP::get(F->getContext(), Val); + } + default: + return 0; + } + } + + // Support ConstantVector in case we have an Undef in the top. + if (isa(Operands[0]) || + isa(Operands[0])) { + Constant *Op = cast(Operands[0]); + switch (F->getIntrinsicID()) { + default: break; + case Intrinsic::x86_sse_cvtss2si: + case Intrinsic::x86_sse_cvtss2si64: + case Intrinsic::x86_sse2_cvtsd2si: + case Intrinsic::x86_sse2_cvtsd2si64: + if (ConstantFP *FPOp = + dyn_cast_or_null(Op->getAggregateElement(0U))) + return ConstantFoldConvertToInt(FPOp->getValueAPF(), + /*roundTowardZero=*/false, Ty); + case Intrinsic::x86_sse_cvttss2si: + case Intrinsic::x86_sse_cvttss2si64: + case Intrinsic::x86_sse2_cvttsd2si: + case Intrinsic::x86_sse2_cvttsd2si64: + if (ConstantFP *FPOp = + dyn_cast_or_null(Op->getAggregateElement(0U))) + return ConstantFoldConvertToInt(FPOp->getValueAPF(), + /*roundTowardZero=*/true, Ty); + } + } + + if (isa(Operands[0])) { + if (F->getIntrinsicID() == Intrinsic::bswap) + return Operands[0]; + return 0; } - } else if (NumOperands == 2) { + + return 0; + } + + if (Operands.size() == 2) { if (ConstantFP *Op1 = dyn_cast(Operands[0])) { - if (Ty!=Type::FloatTy && Ty!=Type::DoubleTy) + if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy()) return 0; - double Op1V = Ty==Type::FloatTy ? - (double)Op1->getValueAPF().convertToFloat(): - Op1->getValueAPF().convertToDouble(); + double Op1V; + if (Ty->isFloatTy()) + Op1V = Op1->getValueAPF().convertToFloat(); + else if (Ty->isDoubleTy()) + Op1V = Op1->getValueAPF().convertToDouble(); + else { + bool unused; + APFloat APF = Op1->getValueAPF(); + APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); + Op1V = APF.convertToDouble(); + } + if (ConstantFP *Op2 = dyn_cast(Operands[1])) { - double Op2V = Ty==Type::FloatTy ? - (double)Op2->getValueAPF().convertToFloat(): - Op2->getValueAPF().convertToDouble(); + if (Op2->getType() != Op1->getType()) + return 0; + + double Op2V; + if (Ty->isFloatTy()) + Op2V = Op2->getValueAPF().convertToFloat(); + else if (Ty->isDoubleTy()) + Op2V = Op2->getValueAPF().convertToDouble(); + else { + bool unused; + APFloat APF = Op2->getValueAPF(); + APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused); + Op2V = APF.convertToDouble(); + } - if (Len == 3 && !strcmp(Str, "pow")) { + if (F->getIntrinsicID() == Intrinsic::pow) { return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); - } else if (Len == 4 && !strcmp(Str, "fmod")) { + } + if (!TLI) + return 0; + if (Name == "pow" && TLI->has(LibFunc::pow)) + return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty); + if (Name == "fmod" && TLI->has(LibFunc::fmod)) return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty); - } else if (Len == 5 && !strcmp(Str, "atan2")) { + if (Name == "atan2" && TLI->has(LibFunc::atan2)) return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty); - } } else if (ConstantInt *Op2C = dyn_cast(Operands[1])) { - if (!strcmp(Str, "llvm.powi.f32")) { - return ConstantFP::get(APFloat((float)std::pow((float)Op1V, + if (F->getIntrinsicID() == Intrinsic::powi && Ty->isHalfTy()) + return ConstantFP::get(F->getContext(), + APFloat((float)std::pow((float)Op1V, (int)Op2C->getZExtValue()))); - } else if (!strcmp(Str, "llvm.powi.f64")) { - return ConstantFP::get(APFloat((double)std::pow((double)Op1V, + if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy()) + return ConstantFP::get(F->getContext(), + APFloat((float)std::pow((float)Op1V, (int)Op2C->getZExtValue()))); + if (F->getIntrinsicID() == Intrinsic::powi && Ty->isDoubleTy()) + return ConstantFP::get(F->getContext(), + APFloat((double)std::pow((double)Op1V, + (int)Op2C->getZExtValue()))); + } + return 0; + } + + if (ConstantInt *Op1 = dyn_cast(Operands[0])) { + if (ConstantInt *Op2 = dyn_cast(Operands[1])) { + switch (F->getIntrinsicID()) { + default: break; + case Intrinsic::sadd_with_overflow: + case Intrinsic::uadd_with_overflow: + case Intrinsic::ssub_with_overflow: + case Intrinsic::usub_with_overflow: + case Intrinsic::smul_with_overflow: + case Intrinsic::umul_with_overflow: { + APInt Res; + bool Overflow; + switch (F->getIntrinsicID()) { + default: llvm_unreachable("Invalid case"); + case Intrinsic::sadd_with_overflow: + Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow); + break; + case Intrinsic::uadd_with_overflow: + Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow); + break; + case Intrinsic::ssub_with_overflow: + Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow); + break; + case Intrinsic::usub_with_overflow: + Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow); + break; + case Intrinsic::smul_with_overflow: + Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow); + break; + case Intrinsic::umul_with_overflow: + Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow); + break; + } + Constant *Ops[] = { + ConstantInt::get(F->getContext(), Res), + ConstantInt::get(Type::getInt1Ty(F->getContext()), Overflow) + }; + return ConstantStruct::get(cast(F->getReturnType()), Ops); + } + case Intrinsic::cttz: + if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef. + return UndefValue::get(Ty); + return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros()); + case Intrinsic::ctlz: + if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef. + return UndefValue::get(Ty); + return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros()); } } + + return 0; } + return 0; } return 0; } -