1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines routines for folding instructions into constants.
12 // Also, to supplement the basic IR ConstantExpr simplifications,
13 // this file defines some additional folding routines that can make use of
14 // DataLayout information. These functions cannot go in IR due to library
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/Analysis/TargetLibraryInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/Config/config.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/GetElementPtrTypeIterator.h"
31 #include "llvm/IR/GlobalVariable.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
46 //===----------------------------------------------------------------------===//
47 // Constant Folding internal helper functions
48 //===----------------------------------------------------------------------===//
50 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
51 /// This always returns a non-null constant, but it may be a
52 /// ConstantExpr if unfoldable.
53 static Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
54 // Catch the obvious splat cases.
55 if (C->isNullValue() && !DestTy->isX86_MMXTy())
56 return Constant::getNullValue(DestTy);
57 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
58 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
59 return Constant::getAllOnesValue(DestTy);
61 // Handle a vector->integer cast.
62 if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
63 VectorType *VTy = dyn_cast<VectorType>(C->getType());
65 return ConstantExpr::getBitCast(C, DestTy);
67 unsigned NumSrcElts = VTy->getNumElements();
68 Type *SrcEltTy = VTy->getElementType();
70 // If the vector is a vector of floating point, convert it to vector of int
71 // to simplify things.
72 if (SrcEltTy->isFloatingPointTy()) {
73 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
75 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
76 // Ask IR to do the conversion now that #elts line up.
77 C = ConstantExpr::getBitCast(C, SrcIVTy);
80 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
82 return ConstantExpr::getBitCast(C, DestTy);
84 // Now that we know that the input value is a vector of integers, just shift
85 // and insert them into our result.
86 unsigned BitShift = DL.getTypeAllocSizeInBits(SrcEltTy);
87 APInt Result(IT->getBitWidth(), 0);
88 for (unsigned i = 0; i != NumSrcElts; ++i) {
90 if (DL.isLittleEndian())
91 Result |= CDV->getElementAsInteger(NumSrcElts-i-1);
93 Result |= CDV->getElementAsInteger(i);
96 return ConstantInt::get(IT, Result);
99 // The code below only handles casts to vectors currently.
100 VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
102 return ConstantExpr::getBitCast(C, DestTy);
104 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
105 // vector so the code below can handle it uniformly.
106 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
107 Constant *Ops = C; // don't take the address of C!
108 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
111 // If this is a bitcast from constant vector -> vector, fold it.
112 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
113 return ConstantExpr::getBitCast(C, DestTy);
115 // If the element types match, IR can fold it.
116 unsigned NumDstElt = DestVTy->getNumElements();
117 unsigned NumSrcElt = C->getType()->getVectorNumElements();
118 if (NumDstElt == NumSrcElt)
119 return ConstantExpr::getBitCast(C, DestTy);
121 Type *SrcEltTy = C->getType()->getVectorElementType();
122 Type *DstEltTy = DestVTy->getElementType();
124 // Otherwise, we're changing the number of elements in a vector, which
125 // requires endianness information to do the right thing. For example,
126 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
127 // folds to (little endian):
128 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
129 // and to (big endian):
130 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
132 // First thing is first. We only want to think about integer here, so if
133 // we have something in FP form, recast it as integer.
134 if (DstEltTy->isFloatingPointTy()) {
135 // Fold to an vector of integers with same size as our FP type.
136 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
138 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
139 // Recursively handle this integer conversion, if possible.
140 C = FoldBitCast(C, DestIVTy, DL);
142 // Finally, IR can handle this now that #elts line up.
143 return ConstantExpr::getBitCast(C, DestTy);
146 // Okay, we know the destination is integer, if the input is FP, convert
147 // it to integer first.
148 if (SrcEltTy->isFloatingPointTy()) {
149 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
151 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
152 // Ask IR to do the conversion now that #elts line up.
153 C = ConstantExpr::getBitCast(C, SrcIVTy);
154 // If IR wasn't able to fold it, bail out.
155 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
156 !isa<ConstantDataVector>(C))
160 // Now we know that the input and output vectors are both integer vectors
161 // of the same size, and that their #elements is not the same. Do the
162 // conversion here, which depends on whether the input or output has
164 bool isLittleEndian = DL.isLittleEndian();
166 SmallVector<Constant*, 32> Result;
167 if (NumDstElt < NumSrcElt) {
168 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
169 Constant *Zero = Constant::getNullValue(DstEltTy);
170 unsigned Ratio = NumSrcElt/NumDstElt;
171 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
173 for (unsigned i = 0; i != NumDstElt; ++i) {
174 // Build each element of the result.
175 Constant *Elt = Zero;
176 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
177 for (unsigned j = 0; j != Ratio; ++j) {
178 Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
179 if (!Src) // Reject constantexpr elements.
180 return ConstantExpr::getBitCast(C, DestTy);
182 // Zero extend the element to the right size.
183 Src = ConstantExpr::getZExt(Src, Elt->getType());
185 // Shift it to the right place, depending on endianness.
186 Src = ConstantExpr::getShl(Src,
187 ConstantInt::get(Src->getType(), ShiftAmt));
188 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
191 Elt = ConstantExpr::getOr(Elt, Src);
193 Result.push_back(Elt);
195 return ConstantVector::get(Result);
198 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
199 unsigned Ratio = NumDstElt/NumSrcElt;
200 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
202 // Loop over each source value, expanding into multiple results.
203 for (unsigned i = 0; i != NumSrcElt; ++i) {
204 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
205 if (!Src) // Reject constantexpr elements.
206 return ConstantExpr::getBitCast(C, DestTy);
208 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
209 for (unsigned j = 0; j != Ratio; ++j) {
210 // Shift the piece of the value into the right place, depending on
212 Constant *Elt = ConstantExpr::getLShr(Src,
213 ConstantInt::get(Src->getType(), ShiftAmt));
214 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
216 // Truncate the element to an integer with the same pointer size and
217 // convert the element back to a pointer using a inttoptr.
218 if (DstEltTy->isPointerTy()) {
219 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
220 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
221 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
225 // Truncate and remember this piece.
226 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
230 return ConstantVector::get(Result);
234 /// If this constant is a constant offset from a global, return the global and
235 /// the constant. Because of constantexprs, this function is recursive.
236 static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
237 APInt &Offset, const DataLayout &DL) {
238 // Trivial case, constant is the global.
239 if ((GV = dyn_cast<GlobalValue>(C))) {
240 unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType());
241 Offset = APInt(BitWidth, 0);
245 // Otherwise, if this isn't a constant expr, bail out.
246 ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
247 if (!CE) return false;
249 // Look through ptr->int and ptr->ptr casts.
250 if (CE->getOpcode() == Instruction::PtrToInt ||
251 CE->getOpcode() == Instruction::BitCast ||
252 CE->getOpcode() == Instruction::AddrSpaceCast)
253 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
255 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
256 GEPOperator *GEP = dyn_cast<GEPOperator>(CE);
260 unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
261 APInt TmpOffset(BitWidth, 0);
263 // If the base isn't a global+constant, we aren't either.
264 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
267 // Otherwise, add any offset that our operands provide.
268 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
275 /// Recursive helper to read bits out of global. C is the constant being copied
276 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
277 /// results into and BytesLeft is the number of bytes left in
278 /// the CurPtr buffer. DL is the DataLayout.
279 static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
280 unsigned char *CurPtr, unsigned BytesLeft,
281 const DataLayout &DL) {
282 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
283 "Out of range access");
285 // If this element is zero or undefined, we can just return since *CurPtr is
287 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
290 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
291 if (CI->getBitWidth() > 64 ||
292 (CI->getBitWidth() & 7) != 0)
295 uint64_t Val = CI->getZExtValue();
296 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
298 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
300 if (!DL.isLittleEndian())
301 n = IntBytes - n - 1;
302 CurPtr[i] = (unsigned char)(Val >> (n * 8));
308 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
309 if (CFP->getType()->isDoubleTy()) {
310 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
311 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
313 if (CFP->getType()->isFloatTy()){
314 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
315 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
317 if (CFP->getType()->isHalfTy()){
318 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
319 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
324 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
325 const StructLayout *SL = DL.getStructLayout(CS->getType());
326 unsigned Index = SL->getElementContainingOffset(ByteOffset);
327 uint64_t CurEltOffset = SL->getElementOffset(Index);
328 ByteOffset -= CurEltOffset;
331 // If the element access is to the element itself and not to tail padding,
332 // read the bytes from the element.
333 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
335 if (ByteOffset < EltSize &&
336 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
342 // Check to see if we read from the last struct element, if so we're done.
343 if (Index == CS->getType()->getNumElements())
346 // If we read all of the bytes we needed from this element we're done.
347 uint64_t NextEltOffset = SL->getElementOffset(Index);
349 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
352 // Move to the next element of the struct.
353 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
354 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
356 CurEltOffset = NextEltOffset;
361 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
362 isa<ConstantDataSequential>(C)) {
363 Type *EltTy = C->getType()->getSequentialElementType();
364 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
365 uint64_t Index = ByteOffset / EltSize;
366 uint64_t Offset = ByteOffset - Index * EltSize;
368 if (ArrayType *AT = dyn_cast<ArrayType>(C->getType()))
369 NumElts = AT->getNumElements();
371 NumElts = C->getType()->getVectorNumElements();
373 for (; Index != NumElts; ++Index) {
374 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
378 uint64_t BytesWritten = EltSize - Offset;
379 assert(BytesWritten <= EltSize && "Not indexing into this element?");
380 if (BytesWritten >= BytesLeft)
384 BytesLeft -= BytesWritten;
385 CurPtr += BytesWritten;
390 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
391 if (CE->getOpcode() == Instruction::IntToPtr &&
392 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
393 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
398 // Otherwise, unknown initializer type.
402 static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
403 const DataLayout &DL) {
404 PointerType *PTy = cast<PointerType>(C->getType());
405 Type *LoadTy = PTy->getElementType();
406 IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
408 // If this isn't an integer load we can't fold it directly.
410 unsigned AS = PTy->getAddressSpace();
412 // If this is a float/double load, we can try folding it as an int32/64 load
413 // and then bitcast the result. This can be useful for union cases. Note
414 // that address spaces don't matter here since we're not going to result in
415 // an actual new load.
417 if (LoadTy->isHalfTy())
418 MapTy = Type::getInt16PtrTy(C->getContext(), AS);
419 else if (LoadTy->isFloatTy())
420 MapTy = Type::getInt32PtrTy(C->getContext(), AS);
421 else if (LoadTy->isDoubleTy())
422 MapTy = Type::getInt64PtrTy(C->getContext(), AS);
423 else if (LoadTy->isVectorTy()) {
424 MapTy = PointerType::getIntNPtrTy(C->getContext(),
425 DL.getTypeAllocSizeInBits(LoadTy), AS);
429 C = FoldBitCast(C, MapTy, DL);
430 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, DL))
431 return FoldBitCast(Res, LoadTy, DL);
435 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
436 if (BytesLoaded > 32 || BytesLoaded == 0)
441 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, DL))
444 GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
445 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
446 !GV->getInitializer()->getType()->isSized())
449 // If we're loading off the beginning of the global, some bytes may be valid,
450 // but we don't try to handle this.
451 if (Offset.isNegative())
454 // If we're not accessing anything in this constant, the result is undefined.
455 if (Offset.getZExtValue() >=
456 DL.getTypeAllocSize(GV->getInitializer()->getType()))
457 return UndefValue::get(IntType);
459 unsigned char RawBytes[32] = {0};
460 if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes,
464 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
465 if (DL.isLittleEndian()) {
466 ResultVal = RawBytes[BytesLoaded - 1];
467 for (unsigned i = 1; i != BytesLoaded; ++i) {
469 ResultVal |= RawBytes[BytesLoaded - 1 - i];
472 ResultVal = RawBytes[0];
473 for (unsigned i = 1; i != BytesLoaded; ++i) {
475 ResultVal |= RawBytes[i];
479 return ConstantInt::get(IntType->getContext(), ResultVal);
482 static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE,
483 const DataLayout &DL) {
484 auto *DestPtrTy = dyn_cast<PointerType>(CE->getType());
487 Type *DestTy = DestPtrTy->getElementType();
489 Constant *C = ConstantFoldLoadFromConstPtr(CE->getOperand(0), DL);
494 Type *SrcTy = C->getType();
496 // If the type sizes are the same and a cast is legal, just directly
497 // cast the constant.
498 if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
499 Instruction::CastOps Cast = Instruction::BitCast;
500 // If we are going from a pointer to int or vice versa, we spell the cast
502 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
503 Cast = Instruction::IntToPtr;
504 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
505 Cast = Instruction::PtrToInt;
507 if (CastInst::castIsValid(Cast, C, DestTy))
508 return ConstantExpr::getCast(Cast, C, DestTy);
511 // If this isn't an aggregate type, there is nothing we can do to drill down
512 // and find a bitcastable constant.
513 if (!SrcTy->isAggregateType())
516 // We're simulating a load through a pointer that was bitcast to point to
517 // a different type, so we can try to walk down through the initial
518 // elements of an aggregate to see if some part of th e aggregate is
519 // castable to implement the "load" semantic model.
520 C = C->getAggregateElement(0u);
526 /// Return the value that a load from C would produce if it is constant and
527 /// determinable. If this is not determinable, return null.
528 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
529 const DataLayout &DL) {
530 // First, try the easy cases:
531 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
532 if (GV->isConstant() && GV->hasDefinitiveInitializer())
533 return GV->getInitializer();
535 // If the loaded value isn't a constant expr, we can't handle it.
536 ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
540 if (CE->getOpcode() == Instruction::GetElementPtr) {
541 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
542 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
544 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
550 if (CE->getOpcode() == Instruction::BitCast)
551 if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, DL))
554 // Instead of loading constant c string, use corresponding integer value
555 // directly if string length is small enough.
557 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
558 unsigned StrLen = Str.size();
559 Type *Ty = cast<PointerType>(CE->getType())->getElementType();
560 unsigned NumBits = Ty->getPrimitiveSizeInBits();
561 // Replace load with immediate integer if the result is an integer or fp
563 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
564 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
565 APInt StrVal(NumBits, 0);
566 APInt SingleChar(NumBits, 0);
567 if (DL.isLittleEndian()) {
568 for (signed i = StrLen-1; i >= 0; i--) {
569 SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
570 StrVal = (StrVal << 8) | SingleChar;
573 for (unsigned i = 0; i < StrLen; i++) {
574 SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
575 StrVal = (StrVal << 8) | SingleChar;
577 // Append NULL at the end.
579 StrVal = (StrVal << 8) | SingleChar;
582 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
583 if (Ty->isFloatingPointTy())
584 Res = ConstantExpr::getBitCast(Res, Ty);
589 // If this load comes from anywhere in a constant global, and if the global
590 // is all undef or zero, we know what it loads.
591 if (GlobalVariable *GV =
592 dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
593 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
594 Type *ResTy = cast<PointerType>(C->getType())->getElementType();
595 if (GV->getInitializer()->isNullValue())
596 return Constant::getNullValue(ResTy);
597 if (isa<UndefValue>(GV->getInitializer()))
598 return UndefValue::get(ResTy);
602 // Try hard to fold loads from bitcasted strange and non-type-safe things.
603 return FoldReinterpretLoadFromConstPtr(CE, DL);
606 static Constant *ConstantFoldLoadInst(const LoadInst *LI,
607 const DataLayout &DL) {
608 if (LI->isVolatile()) return nullptr;
610 if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
611 return ConstantFoldLoadFromConstPtr(C, DL);
616 /// One of Op0/Op1 is a constant expression.
617 /// Attempt to symbolically evaluate the result of a binary operator merging
618 /// these together. If target data info is available, it is provided as DL,
619 /// otherwise DL is null.
620 static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
622 const DataLayout &DL) {
625 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
626 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
629 if (Opc == Instruction::And) {
630 unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
631 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
632 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
633 computeKnownBits(Op0, KnownZero0, KnownOne0, DL);
634 computeKnownBits(Op1, KnownZero1, KnownOne1, DL);
635 if ((KnownOne1 | KnownZero0).isAllOnesValue()) {
636 // All the bits of Op0 that the 'and' could be masking are already zero.
639 if ((KnownOne0 | KnownZero1).isAllOnesValue()) {
640 // All the bits of Op1 that the 'and' could be masking are already zero.
644 APInt KnownZero = KnownZero0 | KnownZero1;
645 APInt KnownOne = KnownOne0 & KnownOne1;
646 if ((KnownZero | KnownOne).isAllOnesValue()) {
647 return ConstantInt::get(Op0->getType(), KnownOne);
651 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
652 // constant. This happens frequently when iterating over a global array.
653 if (Opc == Instruction::Sub) {
654 GlobalValue *GV1, *GV2;
657 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
658 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
659 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
661 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
662 // PtrToInt may change the bitwidth so we have convert to the right size
664 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
665 Offs2.zextOrTrunc(OpSize));
672 /// If array indices are not pointer-sized integers, explicitly cast them so
673 /// that they aren't implicitly casted by the getelementptr.
674 static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, Type *ResultTy,
675 const DataLayout &DL,
676 const TargetLibraryInfo *TLI) {
677 Type *IntPtrTy = DL.getIntPtrType(ResultTy);
680 SmallVector<Constant*, 32> NewIdxs;
681 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
683 !isa<StructType>(GetElementPtrInst::getIndexedType(
685 Ops.slice(1, i - 1)))) &&
686 Ops[i]->getType() != IntPtrTy) {
688 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
694 NewIdxs.push_back(Ops[i]);
700 Constant *C = ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
701 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
702 if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
709 /// Strip the pointer casts, but preserve the address space information.
710 static Constant* StripPtrCastKeepAS(Constant* Ptr) {
711 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
712 PointerType *OldPtrTy = cast<PointerType>(Ptr->getType());
713 Ptr = Ptr->stripPointerCasts();
714 PointerType *NewPtrTy = cast<PointerType>(Ptr->getType());
716 // Preserve the address space number of the pointer.
717 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
718 NewPtrTy = NewPtrTy->getElementType()->getPointerTo(
719 OldPtrTy->getAddressSpace());
720 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
725 /// If we can symbolically evaluate the GEP constant expression, do so.
726 static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
727 Type *ResultTy, const DataLayout &DL,
728 const TargetLibraryInfo *TLI) {
729 Constant *Ptr = Ops[0];
730 if (!Ptr->getType()->getPointerElementType()->isSized() ||
731 !Ptr->getType()->isPointerTy())
734 Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
735 Type *ResultElementTy = ResultTy->getPointerElementType();
737 // If this is a constant expr gep that is effectively computing an
738 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
739 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
740 if (!isa<ConstantInt>(Ops[i])) {
742 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
743 // "inttoptr (sub (ptrtoint Ptr), V)"
744 if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) {
745 ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]);
746 assert((!CE || CE->getType() == IntPtrTy) &&
747 "CastGEPIndices didn't canonicalize index types!");
748 if (CE && CE->getOpcode() == Instruction::Sub &&
749 CE->getOperand(0)->isNullValue()) {
750 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
751 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
752 Res = ConstantExpr::getIntToPtr(Res, ResultTy);
753 if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
754 Res = ConstantFoldConstantExpression(ResCE, DL, TLI);
761 unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
766 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
767 Ptr = StripPtrCastKeepAS(Ptr);
769 // If this is a GEP of a GEP, fold it all into a single GEP.
770 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
771 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
773 // Do not try the incorporate the sub-GEP if some index is not a number.
774 bool AllConstantInt = true;
775 for (unsigned i = 0, e = NestedOps.size(); i != e; ++i)
776 if (!isa<ConstantInt>(NestedOps[i])) {
777 AllConstantInt = false;
783 Ptr = cast<Constant>(GEP->getOperand(0));
784 Offset += APInt(BitWidth, DL.getIndexedOffset(Ptr->getType(), NestedOps));
785 Ptr = StripPtrCastKeepAS(Ptr);
788 // If the base value for this address is a literal integer value, fold the
789 // getelementptr to the resulting integer value casted to the pointer type.
790 APInt BasePtr(BitWidth, 0);
791 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
792 if (CE->getOpcode() == Instruction::IntToPtr) {
793 if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
794 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
798 if (Ptr->isNullValue() || BasePtr != 0) {
799 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
800 return ConstantExpr::getIntToPtr(C, ResultTy);
803 // Otherwise form a regular getelementptr. Recompute the indices so that
804 // we eliminate over-indexing of the notional static type array bounds.
805 // This makes it easy to determine if the getelementptr is "inbounds".
806 // Also, this helps GlobalOpt do SROA on GlobalVariables.
807 Type *Ty = Ptr->getType();
808 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
809 SmallVector<Constant *, 32> NewIdxs;
812 if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
813 if (ATy->isPointerTy()) {
814 // The only pointer indexing we'll do is on the first index of the GEP.
815 if (!NewIdxs.empty())
818 // Only handle pointers to sized types, not pointers to functions.
819 if (!ATy->getElementType()->isSized())
823 // Determine which element of the array the offset points into.
824 APInt ElemSize(BitWidth, DL.getTypeAllocSize(ATy->getElementType()));
826 // The element size is 0. This may be [0 x Ty]*, so just use a zero
827 // index for this level and proceed to the next level to see if it can
828 // accommodate the offset.
829 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
831 // The element size is non-zero divide the offset by the element
832 // size (rounding down), to compute the index at this level.
833 APInt NewIdx = Offset.udiv(ElemSize);
834 Offset -= NewIdx * ElemSize;
835 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
837 Ty = ATy->getElementType();
838 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
839 // If we end up with an offset that isn't valid for this struct type, we
840 // can't re-form this GEP in a regular form, so bail out. The pointer
841 // operand likely went through casts that are necessary to make the GEP
843 const StructLayout &SL = *DL.getStructLayout(STy);
844 if (Offset.uge(SL.getSizeInBytes()))
847 // Determine which field of the struct the offset points into. The
848 // getZExtValue is fine as we've already ensured that the offset is
849 // within the range representable by the StructLayout API.
850 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
851 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
853 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
854 Ty = STy->getTypeAtIndex(ElIdx);
856 // We've reached some non-indexable type.
859 } while (Ty != ResultElementTy);
861 // If we haven't used up the entire offset by descending the static
862 // type, then the offset is pointing into the middle of an indivisible
863 // member, so we can't simplify it.
868 Constant *C = ConstantExpr::getGetElementPtr(Ptr, NewIdxs);
869 assert(C->getType()->getPointerElementType() == Ty &&
870 "Computed GetElementPtr has unexpected type!");
872 // If we ended up indexing a member with a type that doesn't match
873 // the type of what the original indices indexed, add a cast.
874 if (Ty != ResultElementTy)
875 C = FoldBitCast(C, ResultTy, DL);
882 //===----------------------------------------------------------------------===//
883 // Constant Folding public APIs
884 //===----------------------------------------------------------------------===//
886 /// Try to constant fold the specified instruction.
887 /// If successful, the constant result is returned, if not, null is returned.
888 /// Note that this fails if not all of the operands are constant. Otherwise,
889 /// this function can only fail when attempting to fold instructions like loads
890 /// and stores, which have no constant expression form.
891 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
892 const TargetLibraryInfo *TLI) {
893 // Handle PHI nodes quickly here...
894 if (PHINode *PN = dyn_cast<PHINode>(I)) {
895 Constant *CommonValue = nullptr;
897 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
898 Value *Incoming = PN->getIncomingValue(i);
899 // If the incoming value is undef then skip it. Note that while we could
900 // skip the value if it is equal to the phi node itself we choose not to
901 // because that would break the rule that constant folding only applies if
902 // all operands are constants.
903 if (isa<UndefValue>(Incoming))
905 // If the incoming value is not a constant, then give up.
906 Constant *C = dyn_cast<Constant>(Incoming);
909 // Fold the PHI's operands.
910 if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
911 C = ConstantFoldConstantExpression(NewC, DL, TLI);
912 // If the incoming value is a different constant to
913 // the one we saw previously, then give up.
914 if (CommonValue && C != CommonValue)
920 // If we reach here, all incoming values are the same constant or undef.
921 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
924 // Scan the operand list, checking to see if they are all constants, if so,
925 // hand off to ConstantFoldInstOperands.
926 SmallVector<Constant*, 8> Ops;
927 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
928 Constant *Op = dyn_cast<Constant>(*i);
930 return nullptr; // All operands not constant!
932 // Fold the Instruction's operands.
933 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
934 Op = ConstantFoldConstantExpression(NewCE, DL, TLI);
939 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
940 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
943 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
944 return ConstantFoldLoadInst(LI, DL);
946 if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) {
947 return ConstantExpr::getInsertValue(
948 cast<Constant>(IVI->getAggregateOperand()),
949 cast<Constant>(IVI->getInsertedValueOperand()),
953 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) {
954 return ConstantExpr::getExtractValue(
955 cast<Constant>(EVI->getAggregateOperand()),
959 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, DL, TLI);
963 ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout &DL,
964 const TargetLibraryInfo *TLI,
965 SmallPtrSetImpl<ConstantExpr *> &FoldedOps) {
966 SmallVector<Constant *, 8> Ops;
967 for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e;
969 Constant *NewC = cast<Constant>(*i);
970 // Recursively fold the ConstantExpr's operands. If we have already folded
971 // a ConstantExpr, we don't have to process it again.
972 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) {
973 if (FoldedOps.insert(NewCE).second)
974 NewC = ConstantFoldConstantExpressionImpl(NewCE, DL, TLI, FoldedOps);
980 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
982 return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, DL, TLI);
985 /// Attempt to fold the constant expression
986 /// using the specified DataLayout. If successful, the constant result is
987 /// result is returned, if not, null is returned.
988 Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
989 const DataLayout &DL,
990 const TargetLibraryInfo *TLI) {
991 SmallPtrSet<ConstantExpr *, 4> FoldedOps;
992 return ConstantFoldConstantExpressionImpl(CE, DL, TLI, FoldedOps);
995 /// Attempt to constant fold an instruction with the
996 /// specified opcode and operands. If successful, the constant result is
997 /// returned, if not, null is returned. Note that this function can fail when
998 /// attempting to fold instructions like loads and stores, which have no
999 /// constant expression form.
1001 /// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc
1002 /// information, due to only being passed an opcode and operands. Constant
1003 /// folding using this function strips this information.
1005 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
1006 ArrayRef<Constant *> Ops,
1007 const DataLayout &DL,
1008 const TargetLibraryInfo *TLI) {
1009 // Handle easy binops first.
1010 if (Instruction::isBinaryOp(Opcode)) {
1011 if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) {
1012 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], DL))
1016 return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
1020 default: return nullptr;
1021 case Instruction::ICmp:
1022 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1023 case Instruction::Call:
1024 if (Function *F = dyn_cast<Function>(Ops.back()))
1025 if (canConstantFoldCallTo(F))
1026 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
1028 case Instruction::PtrToInt:
1029 // If the input is a inttoptr, eliminate the pair. This requires knowing
1030 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1031 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
1032 if (CE->getOpcode() == Instruction::IntToPtr) {
1033 Constant *Input = CE->getOperand(0);
1034 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1035 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1036 if (PtrWidth < InWidth) {
1038 ConstantInt::get(CE->getContext(),
1039 APInt::getLowBitsSet(InWidth, PtrWidth));
1040 Input = ConstantExpr::getAnd(Input, Mask);
1042 // Do a zext or trunc to get to the dest size.
1043 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1046 return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
1047 case Instruction::IntToPtr:
1048 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1049 // the int size is >= the ptr size and the address spaces are the same.
1050 // This requires knowing the width of a pointer, so it can't be done in
1051 // ConstantExpr::getCast.
1052 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
1053 if (CE->getOpcode() == Instruction::PtrToInt) {
1054 Constant *SrcPtr = CE->getOperand(0);
1055 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1056 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1058 if (MidIntSize >= SrcPtrSize) {
1059 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1060 if (SrcAS == DestTy->getPointerAddressSpace())
1061 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1066 return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
1067 case Instruction::Trunc:
1068 case Instruction::ZExt:
1069 case Instruction::SExt:
1070 case Instruction::FPTrunc:
1071 case Instruction::FPExt:
1072 case Instruction::UIToFP:
1073 case Instruction::SIToFP:
1074 case Instruction::FPToUI:
1075 case Instruction::FPToSI:
1076 case Instruction::AddrSpaceCast:
1077 return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
1078 case Instruction::BitCast:
1079 return FoldBitCast(Ops[0], DestTy, DL);
1080 case Instruction::Select:
1081 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1082 case Instruction::ExtractElement:
1083 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1084 case Instruction::InsertElement:
1085 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1086 case Instruction::ShuffleVector:
1087 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
1088 case Instruction::GetElementPtr:
1089 if (Constant *C = CastGEPIndices(Ops, DestTy, DL, TLI))
1091 if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, DL, TLI))
1094 return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
1098 /// Attempt to constant fold a compare
1099 /// instruction (icmp/fcmp) with the specified operands. If it fails, it
1100 /// returns a constant expression of the specified operands.
1101 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1102 Constant *Ops0, Constant *Ops1,
1103 const DataLayout &DL,
1104 const TargetLibraryInfo *TLI) {
1105 // fold: icmp (inttoptr x), null -> icmp x, 0
1106 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1107 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1108 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1110 // FIXME: The following comment is out of data and the DataLayout is here now.
1111 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1112 // around to know if bit truncation is happening.
1113 if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1114 if (Ops1->isNullValue()) {
1115 if (CE0->getOpcode() == Instruction::IntToPtr) {
1116 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1117 // Convert the integer value to the right size to ensure we get the
1118 // proper extension or truncation.
1119 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1121 Constant *Null = Constant::getNullValue(C->getType());
1122 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1125 // Only do this transformation if the int is intptrty in size, otherwise
1126 // there is a truncation or extension that we aren't modeling.
1127 if (CE0->getOpcode() == Instruction::PtrToInt) {
1128 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1129 if (CE0->getType() == IntPtrTy) {
1130 Constant *C = CE0->getOperand(0);
1131 Constant *Null = Constant::getNullValue(C->getType());
1132 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1137 if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1138 if (CE0->getOpcode() == CE1->getOpcode()) {
1139 if (CE0->getOpcode() == Instruction::IntToPtr) {
1140 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1142 // Convert the integer value to the right size to ensure we get the
1143 // proper extension or truncation.
1144 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1146 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1148 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1151 // Only do this transformation if the int is intptrty in size, otherwise
1152 // there is a truncation or extension that we aren't modeling.
1153 if (CE0->getOpcode() == Instruction::PtrToInt) {
1154 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1155 if (CE0->getType() == IntPtrTy &&
1156 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1157 return ConstantFoldCompareInstOperands(
1158 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1164 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1165 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1166 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1167 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1168 Constant *LHS = ConstantFoldCompareInstOperands(
1169 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1170 Constant *RHS = ConstantFoldCompareInstOperands(
1171 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1173 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1174 Constant *Ops[] = { LHS, RHS };
1175 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, DL, TLI);
1179 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1183 /// Given a constant and a getelementptr constantexpr, return the constant value
1184 /// being addressed by the constant expression, or null if something is funny
1185 /// and we can't decide.
1186 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1188 if (!CE->getOperand(1)->isNullValue())
1189 return nullptr; // Do not allow stepping over the value!
1191 // Loop over all of the operands, tracking down which value we are
1193 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1194 C = C->getAggregateElement(CE->getOperand(i));
1201 /// Given a constant and getelementptr indices (with an *implied* zero pointer
1202 /// index that is not in the list), return the constant value being addressed by
1203 /// a virtual load, or null if something is funny and we can't decide.
1204 Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1205 ArrayRef<Constant*> Indices) {
1206 // Loop over all of the operands, tracking down which value we are
1208 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
1209 C = C->getAggregateElement(Indices[i]);
1217 //===----------------------------------------------------------------------===//
1218 // Constant Folding for Calls
1221 /// Return true if it's even possible to fold a call to the specified function.
1222 bool llvm::canConstantFoldCallTo(const Function *F) {
1223 switch (F->getIntrinsicID()) {
1224 case Intrinsic::fabs:
1225 case Intrinsic::minnum:
1226 case Intrinsic::maxnum:
1227 case Intrinsic::log:
1228 case Intrinsic::log2:
1229 case Intrinsic::log10:
1230 case Intrinsic::exp:
1231 case Intrinsic::exp2:
1232 case Intrinsic::floor:
1233 case Intrinsic::ceil:
1234 case Intrinsic::sqrt:
1235 case Intrinsic::pow:
1236 case Intrinsic::powi:
1237 case Intrinsic::bswap:
1238 case Intrinsic::ctpop:
1239 case Intrinsic::ctlz:
1240 case Intrinsic::cttz:
1241 case Intrinsic::fma:
1242 case Intrinsic::fmuladd:
1243 case Intrinsic::copysign:
1244 case Intrinsic::round:
1245 case Intrinsic::sadd_with_overflow:
1246 case Intrinsic::uadd_with_overflow:
1247 case Intrinsic::ssub_with_overflow:
1248 case Intrinsic::usub_with_overflow:
1249 case Intrinsic::smul_with_overflow:
1250 case Intrinsic::umul_with_overflow:
1251 case Intrinsic::convert_from_fp16:
1252 case Intrinsic::convert_to_fp16:
1253 case Intrinsic::x86_sse_cvtss2si:
1254 case Intrinsic::x86_sse_cvtss2si64:
1255 case Intrinsic::x86_sse_cvttss2si:
1256 case Intrinsic::x86_sse_cvttss2si64:
1257 case Intrinsic::x86_sse2_cvtsd2si:
1258 case Intrinsic::x86_sse2_cvtsd2si64:
1259 case Intrinsic::x86_sse2_cvttsd2si:
1260 case Intrinsic::x86_sse2_cvttsd2si64:
1269 StringRef Name = F->getName();
1271 // In these cases, the check of the length is required. We don't want to
1272 // return true for a name like "cos\0blah" which strcmp would return equal to
1273 // "cos", but has length 8.
1275 default: return false;
1277 return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2";
1279 return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
1281 return Name == "exp" || Name == "exp2";
1283 return Name == "fabs" || Name == "fmod" || Name == "floor";
1285 return Name == "log" || Name == "log10";
1287 return Name == "pow";
1289 return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1290 Name == "sinf" || Name == "sqrtf";
1292 return Name == "tan" || Name == "tanh";
1296 static Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1297 if (Ty->isHalfTy()) {
1300 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused);
1301 return ConstantFP::get(Ty->getContext(), APF);
1303 if (Ty->isFloatTy())
1304 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
1305 if (Ty->isDoubleTy())
1306 return ConstantFP::get(Ty->getContext(), APFloat(V));
1307 llvm_unreachable("Can only constant fold half/float/double");
1312 /// Clear the floating-point exception state.
1313 static inline void llvm_fenv_clearexcept() {
1314 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1315 feclearexcept(FE_ALL_EXCEPT);
1320 /// Test if a floating-point exception was raised.
1321 static inline bool llvm_fenv_testexcept() {
1322 int errno_val = errno;
1323 if (errno_val == ERANGE || errno_val == EDOM)
1325 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1326 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1333 static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
1335 llvm_fenv_clearexcept();
1337 if (llvm_fenv_testexcept()) {
1338 llvm_fenv_clearexcept();
1342 return GetConstantFoldFPValue(V, Ty);
1345 static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1346 double V, double W, Type *Ty) {
1347 llvm_fenv_clearexcept();
1349 if (llvm_fenv_testexcept()) {
1350 llvm_fenv_clearexcept();
1354 return GetConstantFoldFPValue(V, Ty);
1357 /// Attempt to fold an SSE floating point to integer conversion of a constant
1358 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1359 /// used (toward nearest, ties to even). This matches the behavior of the
1360 /// non-truncating SSE instructions in the default rounding mode. The desired
1361 /// integer type Ty is used to select how many bits are available for the
1362 /// result. Returns null if the conversion cannot be performed, otherwise
1363 /// returns the Constant value resulting from the conversion.
1364 static Constant *ConstantFoldConvertToInt(const APFloat &Val,
1365 bool roundTowardZero, Type *Ty) {
1366 // All of these conversion intrinsics form an integer of at most 64bits.
1367 unsigned ResultWidth = Ty->getIntegerBitWidth();
1368 assert(ResultWidth <= 64 &&
1369 "Can only constant fold conversions to 64 and 32 bit ints");
1372 bool isExact = false;
1373 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1374 : APFloat::rmNearestTiesToEven;
1375 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth,
1376 /*isSigned=*/true, mode,
1378 if (status != APFloat::opOK && status != APFloat::opInexact)
1380 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
1383 static double getValueAsDouble(ConstantFP *Op) {
1384 Type *Ty = Op->getType();
1386 if (Ty->isFloatTy())
1387 return Op->getValueAPF().convertToFloat();
1389 if (Ty->isDoubleTy())
1390 return Op->getValueAPF().convertToDouble();
1393 APFloat APF = Op->getValueAPF();
1394 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
1395 return APF.convertToDouble();
1398 static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
1399 Type *Ty, ArrayRef<Constant *> Operands,
1400 const TargetLibraryInfo *TLI) {
1401 if (Operands.size() == 1) {
1402 if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
1403 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1404 APFloat Val(Op->getValueAPF());
1407 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost);
1409 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1412 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1415 if (IntrinsicID == Intrinsic::round) {
1416 APFloat V = Op->getValueAPF();
1417 V.roundToIntegral(APFloat::rmNearestTiesToAway);
1418 return ConstantFP::get(Ty->getContext(), V);
1421 /// We only fold functions with finite arguments. Folding NaN and inf is
1422 /// likely to be aborted with an exception anyway, and some host libms
1423 /// have known errors raising exceptions.
1424 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1427 /// Currently APFloat versions of these functions do not exist, so we use
1428 /// the host native double versions. Float versions are not called
1429 /// directly but for all these it is true (float)(f((double)arg)) ==
1430 /// f(arg). Long double not supported yet.
1431 double V = getValueAsDouble(Op);
1433 switch (IntrinsicID) {
1435 case Intrinsic::fabs:
1436 return ConstantFoldFP(fabs, V, Ty);
1437 case Intrinsic::log2:
1438 return ConstantFoldFP(log2, V, Ty);
1439 case Intrinsic::log:
1440 return ConstantFoldFP(log, V, Ty);
1441 case Intrinsic::log10:
1442 return ConstantFoldFP(log10, V, Ty);
1443 case Intrinsic::exp:
1444 return ConstantFoldFP(exp, V, Ty);
1445 case Intrinsic::exp2:
1446 return ConstantFoldFP(exp2, V, Ty);
1447 case Intrinsic::floor:
1448 return ConstantFoldFP(floor, V, Ty);
1449 case Intrinsic::ceil:
1450 return ConstantFoldFP(ceil, V, Ty);
1458 if (Name == "acos" && TLI->has(LibFunc::acos))
1459 return ConstantFoldFP(acos, V, Ty);
1460 else if (Name == "asin" && TLI->has(LibFunc::asin))
1461 return ConstantFoldFP(asin, V, Ty);
1462 else if (Name == "atan" && TLI->has(LibFunc::atan))
1463 return ConstantFoldFP(atan, V, Ty);
1466 if (Name == "ceil" && TLI->has(LibFunc::ceil))
1467 return ConstantFoldFP(ceil, V, Ty);
1468 else if (Name == "cos" && TLI->has(LibFunc::cos))
1469 return ConstantFoldFP(cos, V, Ty);
1470 else if (Name == "cosh" && TLI->has(LibFunc::cosh))
1471 return ConstantFoldFP(cosh, V, Ty);
1472 else if (Name == "cosf" && TLI->has(LibFunc::cosf))
1473 return ConstantFoldFP(cos, V, Ty);
1476 if (Name == "exp" && TLI->has(LibFunc::exp))
1477 return ConstantFoldFP(exp, V, Ty);
1479 if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
1480 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1482 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1486 if (Name == "fabs" && TLI->has(LibFunc::fabs))
1487 return ConstantFoldFP(fabs, V, Ty);
1488 else if (Name == "floor" && TLI->has(LibFunc::floor))
1489 return ConstantFoldFP(floor, V, Ty);
1492 if (Name == "log" && V > 0 && TLI->has(LibFunc::log))
1493 return ConstantFoldFP(log, V, Ty);
1494 else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10))
1495 return ConstantFoldFP(log10, V, Ty);
1496 else if (IntrinsicID == Intrinsic::sqrt &&
1497 (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) {
1499 return ConstantFoldFP(sqrt, V, Ty);
1501 // Unlike the sqrt definitions in C/C++, POSIX, and IEEE-754 - which
1502 // all guarantee or favor returning NaN - the square root of a
1503 // negative number is not defined for the LLVM sqrt intrinsic.
1504 // This is because the intrinsic should only be emitted in place of
1505 // libm's sqrt function when using "no-nans-fp-math".
1506 return UndefValue::get(Ty);
1511 if (Name == "sin" && TLI->has(LibFunc::sin))
1512 return ConstantFoldFP(sin, V, Ty);
1513 else if (Name == "sinh" && TLI->has(LibFunc::sinh))
1514 return ConstantFoldFP(sinh, V, Ty);
1515 else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt))
1516 return ConstantFoldFP(sqrt, V, Ty);
1517 else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf))
1518 return ConstantFoldFP(sqrt, V, Ty);
1519 else if (Name == "sinf" && TLI->has(LibFunc::sinf))
1520 return ConstantFoldFP(sin, V, Ty);
1523 if (Name == "tan" && TLI->has(LibFunc::tan))
1524 return ConstantFoldFP(tan, V, Ty);
1525 else if (Name == "tanh" && TLI->has(LibFunc::tanh))
1526 return ConstantFoldFP(tanh, V, Ty);
1534 if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
1535 switch (IntrinsicID) {
1536 case Intrinsic::bswap:
1537 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
1538 case Intrinsic::ctpop:
1539 return ConstantInt::get(Ty, Op->getValue().countPopulation());
1540 case Intrinsic::convert_from_fp16: {
1541 APFloat Val(APFloat::IEEEhalf, Op->getValue());
1544 APFloat::opStatus status =
1545 Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
1547 // Conversion is always precise.
1549 assert(status == APFloat::opOK && !lost &&
1550 "Precision lost during fp16 constfolding");
1552 return ConstantFP::get(Ty->getContext(), Val);
1559 // Support ConstantVector in case we have an Undef in the top.
1560 if (isa<ConstantVector>(Operands[0]) ||
1561 isa<ConstantDataVector>(Operands[0])) {
1562 Constant *Op = cast<Constant>(Operands[0]);
1563 switch (IntrinsicID) {
1565 case Intrinsic::x86_sse_cvtss2si:
1566 case Intrinsic::x86_sse_cvtss2si64:
1567 case Intrinsic::x86_sse2_cvtsd2si:
1568 case Intrinsic::x86_sse2_cvtsd2si64:
1569 if (ConstantFP *FPOp =
1570 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1571 return ConstantFoldConvertToInt(FPOp->getValueAPF(),
1572 /*roundTowardZero=*/false, Ty);
1573 case Intrinsic::x86_sse_cvttss2si:
1574 case Intrinsic::x86_sse_cvttss2si64:
1575 case Intrinsic::x86_sse2_cvttsd2si:
1576 case Intrinsic::x86_sse2_cvttsd2si64:
1577 if (ConstantFP *FPOp =
1578 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1579 return ConstantFoldConvertToInt(FPOp->getValueAPF(),
1580 /*roundTowardZero=*/true, Ty);
1584 if (isa<UndefValue>(Operands[0])) {
1585 if (IntrinsicID == Intrinsic::bswap)
1593 if (Operands.size() == 2) {
1594 if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1595 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1597 double Op1V = getValueAsDouble(Op1);
1599 if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1600 if (Op2->getType() != Op1->getType())
1603 double Op2V = getValueAsDouble(Op2);
1604 if (IntrinsicID == Intrinsic::pow) {
1605 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1607 if (IntrinsicID == Intrinsic::copysign) {
1608 APFloat V1 = Op1->getValueAPF();
1609 APFloat V2 = Op2->getValueAPF();
1611 return ConstantFP::get(Ty->getContext(), V1);
1614 if (IntrinsicID == Intrinsic::minnum) {
1615 const APFloat &C1 = Op1->getValueAPF();
1616 const APFloat &C2 = Op2->getValueAPF();
1617 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
1620 if (IntrinsicID == Intrinsic::maxnum) {
1621 const APFloat &C1 = Op1->getValueAPF();
1622 const APFloat &C2 = Op2->getValueAPF();
1623 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
1628 if (Name == "pow" && TLI->has(LibFunc::pow))
1629 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1630 if (Name == "fmod" && TLI->has(LibFunc::fmod))
1631 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
1632 if (Name == "atan2" && TLI->has(LibFunc::atan2))
1633 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
1634 } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
1635 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
1636 return ConstantFP::get(Ty->getContext(),
1637 APFloat((float)std::pow((float)Op1V,
1638 (int)Op2C->getZExtValue())));
1639 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
1640 return ConstantFP::get(Ty->getContext(),
1641 APFloat((float)std::pow((float)Op1V,
1642 (int)Op2C->getZExtValue())));
1643 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
1644 return ConstantFP::get(Ty->getContext(),
1645 APFloat((double)std::pow((double)Op1V,
1646 (int)Op2C->getZExtValue())));
1651 if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
1652 if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
1653 switch (IntrinsicID) {
1655 case Intrinsic::sadd_with_overflow:
1656 case Intrinsic::uadd_with_overflow:
1657 case Intrinsic::ssub_with_overflow:
1658 case Intrinsic::usub_with_overflow:
1659 case Intrinsic::smul_with_overflow:
1660 case Intrinsic::umul_with_overflow: {
1663 switch (IntrinsicID) {
1664 default: llvm_unreachable("Invalid case");
1665 case Intrinsic::sadd_with_overflow:
1666 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
1668 case Intrinsic::uadd_with_overflow:
1669 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
1671 case Intrinsic::ssub_with_overflow:
1672 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
1674 case Intrinsic::usub_with_overflow:
1675 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
1677 case Intrinsic::smul_with_overflow:
1678 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
1680 case Intrinsic::umul_with_overflow:
1681 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
1685 ConstantInt::get(Ty->getContext(), Res),
1686 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
1688 return ConstantStruct::get(cast<StructType>(Ty), Ops);
1690 case Intrinsic::cttz:
1691 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
1692 return UndefValue::get(Ty);
1693 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
1694 case Intrinsic::ctlz:
1695 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
1696 return UndefValue::get(Ty);
1697 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
1706 if (Operands.size() != 3)
1709 if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1710 if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1711 if (const ConstantFP *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
1712 switch (IntrinsicID) {
1714 case Intrinsic::fma:
1715 case Intrinsic::fmuladd: {
1716 APFloat V = Op1->getValueAPF();
1717 APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
1719 APFloat::rmNearestTiesToEven);
1720 if (s != APFloat::opInvalidOp)
1721 return ConstantFP::get(Ty->getContext(), V);
1733 static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
1735 ArrayRef<Constant *> Operands,
1736 const TargetLibraryInfo *TLI) {
1737 SmallVector<Constant *, 4> Result(VTy->getNumElements());
1738 SmallVector<Constant *, 4> Lane(Operands.size());
1739 Type *Ty = VTy->getElementType();
1741 for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
1742 // Gather a column of constants.
1743 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
1744 Constant *Agg = Operands[J]->getAggregateElement(I);
1751 // Use the regular scalar folding to simplify this column.
1752 Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI);
1758 return ConstantVector::get(Result);
1761 /// Attempt to constant fold a call to the specified function
1762 /// with the specified arguments, returning null if unsuccessful.
1764 llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
1765 const TargetLibraryInfo *TLI) {
1768 StringRef Name = F->getName();
1770 Type *Ty = F->getReturnType();
1772 if (VectorType *VTy = dyn_cast<VectorType>(Ty))
1773 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, TLI);
1775 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);