1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/MemoryBuiltins.h"
17 #include "llvm/IR/CallSite.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Transforms/Utils/BuildLibCalls.h"
21 #include "llvm/Transforms/Utils/Local.h"
23 using namespace PatternMatch;
25 #define DEBUG_TYPE "instcombine"
27 STATISTIC(NumSimplified, "Number of library calls simplified");
29 /// getPromotedType - Return the specified type promoted as it would be to pass
30 /// though a va_arg area.
31 static Type *getPromotedType(Type *Ty) {
32 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
33 if (ITy->getBitWidth() < 32)
34 return Type::getInt32Ty(Ty->getContext());
39 /// reduceToSingleValueType - Given an aggregate type which ultimately holds a
40 /// single scalar element, like {{{type}}} or [1 x type], return type.
41 static Type *reduceToSingleValueType(Type *T) {
42 while (!T->isSingleValueType()) {
43 if (StructType *STy = dyn_cast<StructType>(T)) {
44 if (STy->getNumElements() == 1)
45 T = STy->getElementType(0);
48 } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
49 if (ATy->getNumElements() == 1)
50 T = ATy->getElementType();
60 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
61 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL);
62 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL);
63 unsigned MinAlign = std::min(DstAlign, SrcAlign);
64 unsigned CopyAlign = MI->getAlignment();
66 if (CopyAlign < MinAlign) {
67 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
72 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
74 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
75 if (!MemOpLength) return nullptr;
77 // Source and destination pointer types are always "i8*" for intrinsic. See
78 // if the size is something we can handle with a single primitive load/store.
79 // A single load+store correctly handles overlapping memory in the memmove
81 uint64_t Size = MemOpLength->getLimitedValue();
82 assert(Size && "0-sized memory transferring should be removed already.");
84 if (Size > 8 || (Size&(Size-1)))
85 return nullptr; // If not 1/2/4/8 bytes, exit.
87 // Use an integer load+store unless we can find something better.
89 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
91 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
93 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
94 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
95 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
97 // Memcpy forces the use of i8* for the source and destination. That means
98 // that if you're using memcpy to move one double around, you'll get a cast
99 // from double* to i8*. We'd much rather use a double load+store rather than
100 // an i64 load+store, here because this improves the odds that the source or
101 // dest address will be promotable. See if we can find a better type than the
103 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
104 MDNode *CopyMD = nullptr;
105 if (StrippedDest != MI->getArgOperand(0)) {
106 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
108 if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {
109 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
110 // down through these levels if so.
111 SrcETy = reduceToSingleValueType(SrcETy);
113 if (SrcETy->isSingleValueType()) {
114 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
115 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
117 // If the memcpy has metadata describing the members, see if we can
118 // get the TBAA tag describing our copy.
119 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
120 if (M->getNumOperands() == 3 &&
122 isa<ConstantInt>(M->getOperand(0)) &&
123 cast<ConstantInt>(M->getOperand(0))->isNullValue() &&
125 isa<ConstantInt>(M->getOperand(1)) &&
126 cast<ConstantInt>(M->getOperand(1))->getValue() == Size &&
128 isa<MDNode>(M->getOperand(2)))
129 CopyMD = cast<MDNode>(M->getOperand(2));
135 // If the memcpy/memmove provides better alignment info than we can
137 SrcAlign = std::max(SrcAlign, CopyAlign);
138 DstAlign = std::max(DstAlign, CopyAlign);
140 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
141 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
142 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
143 L->setAlignment(SrcAlign);
145 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
146 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
147 S->setAlignment(DstAlign);
149 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
151 // Set the size of the copy to 0, it will be deleted on the next iteration.
152 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
156 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
157 unsigned Alignment = getKnownAlignment(MI->getDest(), DL);
158 if (MI->getAlignment() < Alignment) {
159 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
164 // Extract the length and alignment and fill if they are constant.
165 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
166 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
167 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
169 uint64_t Len = LenC->getLimitedValue();
170 Alignment = MI->getAlignment();
171 assert(Len && "0-sized memory setting should be removed already.");
173 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
174 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
175 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
177 Value *Dest = MI->getDest();
178 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
179 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
180 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
182 // Alignment 0 is identity for alignment 1 for memset, but not store.
183 if (Alignment == 0) Alignment = 1;
185 // Extract the fill value and store.
186 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
187 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
189 S->setAlignment(Alignment);
191 // Set the size of the copy to 0, it will be deleted on the next iteration.
192 MI->setLength(Constant::getNullValue(LenC->getType()));
199 /// visitCallInst - CallInst simplification. This mostly only handles folding
200 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
201 /// the heavy lifting.
203 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
204 if (isFreeCall(&CI, TLI))
205 return visitFree(CI);
207 // If the caller function is nounwind, mark the call as nounwind, even if the
209 if (CI.getParent()->getParent()->doesNotThrow() &&
210 !CI.doesNotThrow()) {
211 CI.setDoesNotThrow();
215 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
216 if (!II) return visitCallSite(&CI);
218 // Intrinsics cannot occur in an invoke, so handle them here instead of in
220 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
221 bool Changed = false;
223 // memmove/cpy/set of zero bytes is a noop.
224 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
225 if (NumBytes->isNullValue())
226 return EraseInstFromFunction(CI);
228 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
229 if (CI->getZExtValue() == 1) {
230 // Replace the instruction with just byte operations. We would
231 // transform other cases to loads/stores, but we don't know if
232 // alignment is sufficient.
236 // No other transformations apply to volatile transfers.
237 if (MI->isVolatile())
240 // If we have a memmove and the source operation is a constant global,
241 // then the source and dest pointers can't alias, so we can change this
242 // into a call to memcpy.
243 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
244 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
245 if (GVSrc->isConstant()) {
246 Module *M = CI.getParent()->getParent()->getParent();
247 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
248 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
249 CI.getArgOperand(1)->getType(),
250 CI.getArgOperand(2)->getType() };
251 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
256 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
257 // memmove(x,x,size) -> noop.
258 if (MTI->getSource() == MTI->getDest())
259 return EraseInstFromFunction(CI);
262 // If we can determine a pointer alignment that is bigger than currently
263 // set, update the alignment.
264 if (isa<MemTransferInst>(MI)) {
265 if (Instruction *I = SimplifyMemTransfer(MI))
267 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
268 if (Instruction *I = SimplifyMemSet(MSI))
272 if (Changed) return II;
275 switch (II->getIntrinsicID()) {
277 case Intrinsic::objectsize: {
279 if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
280 return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
283 case Intrinsic::bswap: {
284 Value *IIOperand = II->getArgOperand(0);
287 // bswap(bswap(x)) -> x
288 if (match(IIOperand, m_BSwap(m_Value(X))))
289 return ReplaceInstUsesWith(CI, X);
291 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
292 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
293 unsigned C = X->getType()->getPrimitiveSizeInBits() -
294 IIOperand->getType()->getPrimitiveSizeInBits();
295 Value *CV = ConstantInt::get(X->getType(), C);
296 Value *V = Builder->CreateLShr(X, CV);
297 return new TruncInst(V, IIOperand->getType());
302 case Intrinsic::powi:
303 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
306 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
309 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
310 // powi(x, -1) -> 1/x
311 if (Power->isAllOnesValue())
312 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
313 II->getArgOperand(0));
316 case Intrinsic::cttz: {
317 // If all bits below the first known one are known zero,
318 // this value is constant.
319 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
320 // FIXME: Try to simplify vectors of integers.
322 uint32_t BitWidth = IT->getBitWidth();
323 APInt KnownZero(BitWidth, 0);
324 APInt KnownOne(BitWidth, 0);
325 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne);
326 unsigned TrailingZeros = KnownOne.countTrailingZeros();
327 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
328 if ((Mask & KnownZero) == Mask)
329 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
330 APInt(BitWidth, TrailingZeros)));
334 case Intrinsic::ctlz: {
335 // If all bits above the first known one are known zero,
336 // this value is constant.
337 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
338 // FIXME: Try to simplify vectors of integers.
340 uint32_t BitWidth = IT->getBitWidth();
341 APInt KnownZero(BitWidth, 0);
342 APInt KnownOne(BitWidth, 0);
343 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne);
344 unsigned LeadingZeros = KnownOne.countLeadingZeros();
345 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
346 if ((Mask & KnownZero) == Mask)
347 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
348 APInt(BitWidth, LeadingZeros)));
352 case Intrinsic::uadd_with_overflow: {
353 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
354 IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
355 uint32_t BitWidth = IT->getBitWidth();
356 APInt LHSKnownZero(BitWidth, 0);
357 APInt LHSKnownOne(BitWidth, 0);
358 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
359 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
360 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
362 if (LHSKnownNegative || LHSKnownPositive) {
363 APInt RHSKnownZero(BitWidth, 0);
364 APInt RHSKnownOne(BitWidth, 0);
365 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
366 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
367 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
368 if (LHSKnownNegative && RHSKnownNegative) {
369 // The sign bit is set in both cases: this MUST overflow.
370 // Create a simple add instruction, and insert it into the struct.
371 Value *Add = Builder->CreateAdd(LHS, RHS);
374 UndefValue::get(LHS->getType()),
375 ConstantInt::getTrue(II->getContext())
377 StructType *ST = cast<StructType>(II->getType());
378 Constant *Struct = ConstantStruct::get(ST, V);
379 return InsertValueInst::Create(Struct, Add, 0);
382 if (LHSKnownPositive && RHSKnownPositive) {
383 // The sign bit is clear in both cases: this CANNOT overflow.
384 // Create a simple add instruction, and insert it into the struct.
385 Value *Add = Builder->CreateNUWAdd(LHS, RHS);
388 UndefValue::get(LHS->getType()),
389 ConstantInt::getFalse(II->getContext())
391 StructType *ST = cast<StructType>(II->getType());
392 Constant *Struct = ConstantStruct::get(ST, V);
393 return InsertValueInst::Create(Struct, Add, 0);
397 // FALL THROUGH uadd into sadd
398 case Intrinsic::sadd_with_overflow:
399 // Canonicalize constants into the RHS.
400 if (isa<Constant>(II->getArgOperand(0)) &&
401 !isa<Constant>(II->getArgOperand(1))) {
402 Value *LHS = II->getArgOperand(0);
403 II->setArgOperand(0, II->getArgOperand(1));
404 II->setArgOperand(1, LHS);
408 // X + undef -> undef
409 if (isa<UndefValue>(II->getArgOperand(1)))
410 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
412 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
413 // X + 0 -> {X, false}
416 UndefValue::get(II->getArgOperand(0)->getType()),
417 ConstantInt::getFalse(II->getContext())
420 ConstantStruct::get(cast<StructType>(II->getType()), V);
421 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
425 // We can strength reduce reduce this signed add into a regular add if we
426 // can prove that it will never overflow.
427 if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow) {
428 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
429 if (WillNotOverflowSignedAdd(LHS, RHS)) {
430 Value *Add = Builder->CreateNSWAdd(LHS, RHS);
432 Constant *V[] = {UndefValue::get(Add->getType()), Builder->getFalse()};
433 StructType *ST = cast<StructType>(II->getType());
434 Constant *Struct = ConstantStruct::get(ST, V);
435 return InsertValueInst::Create(Struct, Add, 0);
440 case Intrinsic::usub_with_overflow:
441 case Intrinsic::ssub_with_overflow:
442 // undef - X -> undef
443 // X - undef -> undef
444 if (isa<UndefValue>(II->getArgOperand(0)) ||
445 isa<UndefValue>(II->getArgOperand(1)))
446 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
448 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
449 // X - 0 -> {X, false}
452 UndefValue::get(II->getArgOperand(0)->getType()),
453 ConstantInt::getFalse(II->getContext())
456 ConstantStruct::get(cast<StructType>(II->getType()), V);
457 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
461 case Intrinsic::umul_with_overflow: {
462 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
463 unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
465 APInt LHSKnownZero(BitWidth, 0);
466 APInt LHSKnownOne(BitWidth, 0);
467 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne);
468 APInt RHSKnownZero(BitWidth, 0);
469 APInt RHSKnownOne(BitWidth, 0);
470 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne);
472 // Get the largest possible values for each operand.
473 APInt LHSMax = ~LHSKnownZero;
474 APInt RHSMax = ~RHSKnownZero;
476 // If multiplying the maximum values does not overflow then we can turn
477 // this into a plain NUW mul.
479 LHSMax.umul_ov(RHSMax, Overflow);
481 Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow");
483 UndefValue::get(LHS->getType()),
486 Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V);
487 return InsertValueInst::Create(Struct, Mul, 0);
490 case Intrinsic::smul_with_overflow:
491 // Canonicalize constants into the RHS.
492 if (isa<Constant>(II->getArgOperand(0)) &&
493 !isa<Constant>(II->getArgOperand(1))) {
494 Value *LHS = II->getArgOperand(0);
495 II->setArgOperand(0, II->getArgOperand(1));
496 II->setArgOperand(1, LHS);
500 // X * undef -> undef
501 if (isa<UndefValue>(II->getArgOperand(1)))
502 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
504 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
507 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
509 // X * 1 -> {X, false}
510 if (RHSI->equalsInt(1)) {
512 UndefValue::get(II->getArgOperand(0)->getType()),
513 ConstantInt::getFalse(II->getContext())
516 ConstantStruct::get(cast<StructType>(II->getType()), V);
517 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
521 case Intrinsic::ppc_altivec_lvx:
522 case Intrinsic::ppc_altivec_lvxl:
523 // Turn PPC lvx -> load if the pointer is known aligned.
524 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
525 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
526 PointerType::getUnqual(II->getType()));
527 return new LoadInst(Ptr);
530 case Intrinsic::ppc_altivec_stvx:
531 case Intrinsic::ppc_altivec_stvxl:
532 // Turn stvx -> store if the pointer is known aligned.
533 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL) >= 16) {
535 PointerType::getUnqual(II->getArgOperand(0)->getType());
536 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
537 return new StoreInst(II->getArgOperand(0), Ptr);
540 case Intrinsic::x86_sse_storeu_ps:
541 case Intrinsic::x86_sse2_storeu_pd:
542 case Intrinsic::x86_sse2_storeu_dq:
543 // Turn X86 storeu -> store if the pointer is known aligned.
544 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
546 PointerType::getUnqual(II->getArgOperand(1)->getType());
547 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
548 return new StoreInst(II->getArgOperand(1), Ptr);
552 case Intrinsic::x86_sse_cvtss2si:
553 case Intrinsic::x86_sse_cvtss2si64:
554 case Intrinsic::x86_sse_cvttss2si:
555 case Intrinsic::x86_sse_cvttss2si64:
556 case Intrinsic::x86_sse2_cvtsd2si:
557 case Intrinsic::x86_sse2_cvtsd2si64:
558 case Intrinsic::x86_sse2_cvttsd2si:
559 case Intrinsic::x86_sse2_cvttsd2si64: {
560 // These intrinsics only demand the 0th element of their input vectors. If
561 // we can simplify the input based on that, do so now.
563 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
564 APInt DemandedElts(VWidth, 1);
565 APInt UndefElts(VWidth, 0);
566 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
567 DemandedElts, UndefElts)) {
568 II->setArgOperand(0, V);
574 // Constant fold <A x Bi> << Ci.
575 // FIXME: We don't handle _dq because it's a shift of an i128, but is
576 // represented in the IR as <2 x i64>. A per element shift is wrong.
577 case Intrinsic::x86_sse2_psll_d:
578 case Intrinsic::x86_sse2_psll_q:
579 case Intrinsic::x86_sse2_psll_w:
580 case Intrinsic::x86_sse2_pslli_d:
581 case Intrinsic::x86_sse2_pslli_q:
582 case Intrinsic::x86_sse2_pslli_w:
583 case Intrinsic::x86_avx2_psll_d:
584 case Intrinsic::x86_avx2_psll_q:
585 case Intrinsic::x86_avx2_psll_w:
586 case Intrinsic::x86_avx2_pslli_d:
587 case Intrinsic::x86_avx2_pslli_q:
588 case Intrinsic::x86_avx2_pslli_w:
589 case Intrinsic::x86_sse2_psrl_d:
590 case Intrinsic::x86_sse2_psrl_q:
591 case Intrinsic::x86_sse2_psrl_w:
592 case Intrinsic::x86_sse2_psrli_d:
593 case Intrinsic::x86_sse2_psrli_q:
594 case Intrinsic::x86_sse2_psrli_w:
595 case Intrinsic::x86_avx2_psrl_d:
596 case Intrinsic::x86_avx2_psrl_q:
597 case Intrinsic::x86_avx2_psrl_w:
598 case Intrinsic::x86_avx2_psrli_d:
599 case Intrinsic::x86_avx2_psrli_q:
600 case Intrinsic::x86_avx2_psrli_w: {
601 // Simplify if count is constant. To 0 if >= BitWidth,
602 // otherwise to shl/lshr.
603 auto CDV = dyn_cast<ConstantDataVector>(II->getArgOperand(1));
604 auto CInt = dyn_cast<ConstantInt>(II->getArgOperand(1));
609 Count = cast<ConstantInt>(CDV->getElementAsConstant(0));
613 auto Vec = II->getArgOperand(0);
614 auto VT = cast<VectorType>(Vec->getType());
615 if (Count->getZExtValue() >
616 VT->getElementType()->getPrimitiveSizeInBits() - 1)
617 return ReplaceInstUsesWith(
618 CI, ConstantAggregateZero::get(Vec->getType()));
620 bool isPackedShiftLeft = true;
621 switch (II->getIntrinsicID()) {
623 case Intrinsic::x86_sse2_psrl_d:
624 case Intrinsic::x86_sse2_psrl_q:
625 case Intrinsic::x86_sse2_psrl_w:
626 case Intrinsic::x86_sse2_psrli_d:
627 case Intrinsic::x86_sse2_psrli_q:
628 case Intrinsic::x86_sse2_psrli_w:
629 case Intrinsic::x86_avx2_psrl_d:
630 case Intrinsic::x86_avx2_psrl_q:
631 case Intrinsic::x86_avx2_psrl_w:
632 case Intrinsic::x86_avx2_psrli_d:
633 case Intrinsic::x86_avx2_psrli_q:
634 case Intrinsic::x86_avx2_psrli_w: isPackedShiftLeft = false; break;
637 unsigned VWidth = VT->getNumElements();
638 // Get a constant vector of the same type as the first operand.
639 auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
640 if (isPackedShiftLeft)
641 return BinaryOperator::CreateShl(Vec,
642 Builder->CreateVectorSplat(VWidth, VTCI));
644 return BinaryOperator::CreateLShr(Vec,
645 Builder->CreateVectorSplat(VWidth, VTCI));
648 case Intrinsic::x86_sse41_pmovsxbw:
649 case Intrinsic::x86_sse41_pmovsxwd:
650 case Intrinsic::x86_sse41_pmovsxdq:
651 case Intrinsic::x86_sse41_pmovzxbw:
652 case Intrinsic::x86_sse41_pmovzxwd:
653 case Intrinsic::x86_sse41_pmovzxdq: {
654 // pmov{s|z}x ignores the upper half of their input vectors.
656 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
657 unsigned LowHalfElts = VWidth / 2;
658 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
659 APInt UndefElts(VWidth, 0);
660 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
663 II->setArgOperand(0, TmpV);
669 case Intrinsic::x86_sse4a_insertqi: {
670 // insertqi x, y, 64, 0 can just copy y's lower bits and leave the top
672 // TODO: eventually we should lower this intrinsic to IR
673 if (auto CIWidth = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
674 if (auto CIStart = dyn_cast<ConstantInt>(II->getArgOperand(3))) {
675 if (CIWidth->equalsInt(64) && CIStart->isZero()) {
676 Value *Vec = II->getArgOperand(1);
677 Value *Undef = UndefValue::get(Vec->getType());
678 const uint32_t Mask[] = { 0, 2 };
679 return ReplaceInstUsesWith(
681 Builder->CreateShuffleVector(
682 Vec, Undef, ConstantDataVector::get(
683 II->getContext(), ArrayRef<uint32_t>(Mask))));
685 } else if (auto Source =
686 dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
687 if (Source->hasOneUse() &&
688 Source->getArgOperand(1) == II->getArgOperand(1)) {
689 // If the source of the insert has only one use and it's another
690 // insert (and they're both inserting from the same vector), try to
691 // bundle both together.
693 dyn_cast<ConstantInt>(Source->getArgOperand(2));
695 dyn_cast<ConstantInt>(Source->getArgOperand(3));
696 if (CISourceStart && CISourceWidth) {
697 unsigned Start = CIStart->getZExtValue();
698 unsigned Width = CIWidth->getZExtValue();
699 unsigned End = Start + Width;
700 unsigned SourceStart = CISourceStart->getZExtValue();
701 unsigned SourceWidth = CISourceWidth->getZExtValue();
702 unsigned SourceEnd = SourceStart + SourceWidth;
703 unsigned NewStart, NewWidth;
704 bool ShouldReplace = false;
705 if (Start <= SourceStart && SourceStart <= End) {
707 NewWidth = std::max(End, SourceEnd) - NewStart;
708 ShouldReplace = true;
709 } else if (SourceStart <= Start && Start <= SourceEnd) {
710 NewStart = SourceStart;
711 NewWidth = std::max(SourceEnd, End) - NewStart;
712 ShouldReplace = true;
716 Constant *ConstantWidth = ConstantInt::get(
717 II->getArgOperand(2)->getType(), NewWidth, false);
718 Constant *ConstantStart = ConstantInt::get(
719 II->getArgOperand(3)->getType(), NewStart, false);
720 Value *Args[4] = { Source->getArgOperand(0),
721 II->getArgOperand(1), ConstantWidth,
723 Module *M = CI.getParent()->getParent()->getParent();
725 Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
726 return ReplaceInstUsesWith(CI, Builder->CreateCall(F, Args));
736 case Intrinsic::x86_sse41_pblendvb:
737 case Intrinsic::x86_sse41_blendvps:
738 case Intrinsic::x86_sse41_blendvpd:
739 case Intrinsic::x86_avx_blendv_ps_256:
740 case Intrinsic::x86_avx_blendv_pd_256:
741 case Intrinsic::x86_avx2_pblendvb: {
742 // Convert blendv* to vector selects if the mask is constant.
743 // This optimization is convoluted because the intrinsic is defined as
744 // getting a vector of floats or doubles for the ps and pd versions.
745 // FIXME: That should be changed.
746 Value *Mask = II->getArgOperand(2);
747 if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
748 auto Tyi1 = Builder->getInt1Ty();
749 auto SelectorType = cast<VectorType>(Mask->getType());
750 auto EltTy = SelectorType->getElementType();
751 unsigned Size = SelectorType->getNumElements();
755 : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
756 assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
757 "Wrong arguments for variable blend intrinsic");
758 SmallVector<Constant *, 32> Selectors;
759 for (unsigned I = 0; I < Size; ++I) {
760 // The intrinsics only read the top bit
763 Selector = C->getElementAsInteger(I);
765 Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
766 Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
768 auto NewSelector = ConstantVector::get(Selectors);
769 return SelectInst::Create(NewSelector, II->getArgOperand(1),
770 II->getArgOperand(0), "blendv");
776 case Intrinsic::x86_avx_vpermilvar_ps:
777 case Intrinsic::x86_avx_vpermilvar_ps_256:
778 case Intrinsic::x86_avx_vpermilvar_pd:
779 case Intrinsic::x86_avx_vpermilvar_pd_256: {
780 // Convert vpermil* to shufflevector if the mask is constant.
781 Value *V = II->getArgOperand(1);
782 unsigned Size = cast<VectorType>(V->getType())->getNumElements();
783 assert(Size == 8 || Size == 4 || Size == 2);
785 if (auto C = dyn_cast<ConstantDataVector>(V)) {
786 // The intrinsics only read one or two bits, clear the rest.
787 for (unsigned I = 0; I < Size; ++I) {
788 uint32_t Index = C->getElementAsInteger(I) & 0x3;
789 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
790 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
794 } else if (isa<ConstantAggregateZero>(V)) {
795 for (unsigned I = 0; I < Size; ++I)
800 // The _256 variants are a bit trickier since the mask bits always index
801 // into the corresponding 128 half. In order to convert to a generic
802 // shuffle, we have to make that explicit.
803 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
804 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
805 for (unsigned I = Size / 2; I < Size; ++I)
806 Indexes[I] += Size / 2;
809 ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
810 auto V1 = II->getArgOperand(0);
811 auto V2 = UndefValue::get(V1->getType());
812 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
813 return ReplaceInstUsesWith(CI, Shuffle);
816 case Intrinsic::ppc_altivec_vperm:
817 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
818 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
819 // a vectorshuffle for little endian, we must undo the transformation
820 // performed on vec_perm in altivec.h. That is, we must complement
821 // the permutation mask with respect to 31 and reverse the order of
823 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
824 assert(Mask->getType()->getVectorNumElements() == 16 &&
825 "Bad type for intrinsic!");
827 // Check that all of the elements are integer constants or undefs.
828 bool AllEltsOk = true;
829 for (unsigned i = 0; i != 16; ++i) {
830 Constant *Elt = Mask->getAggregateElement(i);
831 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
838 // Cast the input vectors to byte vectors.
839 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
841 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
843 Value *Result = UndefValue::get(Op0->getType());
845 // Only extract each element once.
846 Value *ExtractedElts[32];
847 memset(ExtractedElts, 0, sizeof(ExtractedElts));
849 for (unsigned i = 0; i != 16; ++i) {
850 if (isa<UndefValue>(Mask->getAggregateElement(i)))
853 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
854 Idx &= 31; // Match the hardware behavior.
855 if (DL && DL->isLittleEndian())
858 if (!ExtractedElts[Idx]) {
859 Value *Op0ToUse = (DL && DL->isLittleEndian()) ? Op1 : Op0;
860 Value *Op1ToUse = (DL && DL->isLittleEndian()) ? Op0 : Op1;
862 Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
863 Builder->getInt32(Idx&15));
866 // Insert this value into the result vector.
867 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
868 Builder->getInt32(i));
870 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
875 case Intrinsic::arm_neon_vld1:
876 case Intrinsic::arm_neon_vld2:
877 case Intrinsic::arm_neon_vld3:
878 case Intrinsic::arm_neon_vld4:
879 case Intrinsic::arm_neon_vld2lane:
880 case Intrinsic::arm_neon_vld3lane:
881 case Intrinsic::arm_neon_vld4lane:
882 case Intrinsic::arm_neon_vst1:
883 case Intrinsic::arm_neon_vst2:
884 case Intrinsic::arm_neon_vst3:
885 case Intrinsic::arm_neon_vst4:
886 case Intrinsic::arm_neon_vst2lane:
887 case Intrinsic::arm_neon_vst3lane:
888 case Intrinsic::arm_neon_vst4lane: {
889 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL);
890 unsigned AlignArg = II->getNumArgOperands() - 1;
891 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
892 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
893 II->setArgOperand(AlignArg,
894 ConstantInt::get(Type::getInt32Ty(II->getContext()),
901 case Intrinsic::arm_neon_vmulls:
902 case Intrinsic::arm_neon_vmullu:
903 case Intrinsic::aarch64_neon_smull:
904 case Intrinsic::aarch64_neon_umull: {
905 Value *Arg0 = II->getArgOperand(0);
906 Value *Arg1 = II->getArgOperand(1);
908 // Handle mul by zero first:
909 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
910 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
913 // Check for constant LHS & RHS - in this case we just simplify.
914 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
915 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
916 VectorType *NewVT = cast<VectorType>(II->getType());
917 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
918 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
919 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
920 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
922 return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
925 // Couldn't simplify - canonicalize constant to the RHS.
926 std::swap(Arg0, Arg1);
929 // Handle mul by one:
930 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
931 if (ConstantInt *Splat =
932 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
934 return CastInst::CreateIntegerCast(Arg0, II->getType(),
940 case Intrinsic::AMDGPU_rcp: {
941 if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
942 const APFloat &ArgVal = C->getValueAPF();
943 APFloat Val(ArgVal.getSemantics(), 1.0);
944 APFloat::opStatus Status = Val.divide(ArgVal,
945 APFloat::rmNearestTiesToEven);
946 // Only do this if it was exact and therefore not dependent on the
948 if (Status == APFloat::opOK)
949 return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
954 case Intrinsic::stackrestore: {
955 // If the save is right next to the restore, remove the restore. This can
956 // happen when variable allocas are DCE'd.
957 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
958 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
959 BasicBlock::iterator BI = SS;
961 return EraseInstFromFunction(CI);
965 // Scan down this block to see if there is another stack restore in the
966 // same block without an intervening call/alloca.
967 BasicBlock::iterator BI = II;
968 TerminatorInst *TI = II->getParent()->getTerminator();
969 bool CannotRemove = false;
970 for (++BI; &*BI != TI; ++BI) {
971 if (isa<AllocaInst>(BI)) {
975 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
976 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
977 // If there is a stackrestore below this one, remove this one.
978 if (II->getIntrinsicID() == Intrinsic::stackrestore)
979 return EraseInstFromFunction(CI);
980 // Otherwise, ignore the intrinsic.
982 // If we found a non-intrinsic call, we can't remove the stack
990 // If the stack restore is in a return, resume, or unwind block and if there
991 // are no allocas or calls between the restore and the return, nuke the
993 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
994 return EraseInstFromFunction(CI);
997 case Intrinsic::assume: {
998 // Canonicalize assume(a && b) -> assume(a); assume(b);
999 Value *IIOperand = II->getArgOperand(0), *A, *B,
1000 *AssumeIntrinsic = II->getCalledValue();
1001 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
1002 Builder->CreateCall(AssumeIntrinsic, A, II->getName());
1003 Builder->CreateCall(AssumeIntrinsic, B, II->getName());
1004 return EraseInstFromFunction(*II);
1006 // assume(!(a || b)) -> assume(!a); assume(!b);
1007 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
1008 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A), II->getName());
1009 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B), II->getName());
1010 return EraseInstFromFunction(*II);
1016 return visitCallSite(II);
1019 // InvokeInst simplification
1021 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
1022 return visitCallSite(&II);
1025 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
1026 /// passed through the varargs area, we can eliminate the use of the cast.
1027 static bool isSafeToEliminateVarargsCast(const CallSite CS,
1028 const CastInst * const CI,
1029 const DataLayout * const DL,
1031 if (!CI->isLosslessCast())
1034 // The size of ByVal or InAlloca arguments is derived from the type, so we
1035 // can't change to a type with a different size. If the size were
1036 // passed explicitly we could avoid this check.
1037 if (!CS.isByValOrInAllocaArgument(ix))
1041 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
1042 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
1043 if (!SrcTy->isSized() || !DstTy->isSized())
1045 if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))
1050 // Try to fold some different type of calls here.
1051 // Currently we're only working with the checking functions, memcpy_chk,
1052 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
1053 // strcat_chk and strncat_chk.
1054 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {
1055 if (!CI->getCalledFunction()) return nullptr;
1057 if (Value *With = Simplifier->optimizeCall(CI)) {
1059 return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
1065 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
1066 // Strip off at most one level of pointer casts, looking for an alloca. This
1067 // is good enough in practice and simpler than handling any number of casts.
1068 Value *Underlying = TrampMem->stripPointerCasts();
1069 if (Underlying != TrampMem &&
1070 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
1072 if (!isa<AllocaInst>(Underlying))
1075 IntrinsicInst *InitTrampoline = nullptr;
1076 for (User *U : TrampMem->users()) {
1077 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1080 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
1082 // More than one init_trampoline writes to this value. Give up.
1084 InitTrampoline = II;
1087 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
1088 // Allow any number of calls to adjust.trampoline.
1093 // No call to init.trampoline found.
1094 if (!InitTrampoline)
1097 // Check that the alloca is being used in the expected way.
1098 if (InitTrampoline->getOperand(0) != TrampMem)
1101 return InitTrampoline;
1104 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
1106 // Visit all the previous instructions in the basic block, and try to find a
1107 // init.trampoline which has a direct path to the adjust.trampoline.
1108 for (BasicBlock::iterator I = AdjustTramp,
1109 E = AdjustTramp->getParent()->begin(); I != E; ) {
1110 Instruction *Inst = --I;
1111 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1112 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
1113 II->getOperand(0) == TrampMem)
1115 if (Inst->mayWriteToMemory())
1121 // Given a call to llvm.adjust.trampoline, find and return the corresponding
1122 // call to llvm.init.trampoline if the call to the trampoline can be optimized
1123 // to a direct call to a function. Otherwise return NULL.
1125 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
1126 Callee = Callee->stripPointerCasts();
1127 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
1129 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
1132 Value *TrampMem = AdjustTramp->getOperand(0);
1134 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
1136 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
1141 // visitCallSite - Improvements for call and invoke instructions.
1143 Instruction *InstCombiner::visitCallSite(CallSite CS) {
1144 if (isAllocLikeFn(CS.getInstruction(), TLI))
1145 return visitAllocSite(*CS.getInstruction());
1147 bool Changed = false;
1149 // If the callee is a pointer to a function, attempt to move any casts to the
1150 // arguments of the call/invoke.
1151 Value *Callee = CS.getCalledValue();
1152 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
1155 if (Function *CalleeF = dyn_cast<Function>(Callee))
1156 // If the call and callee calling conventions don't match, this call must
1157 // be unreachable, as the call is undefined.
1158 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
1159 // Only do this for calls to a function with a body. A prototype may
1160 // not actually end up matching the implementation's calling conv for a
1161 // variety of reasons (e.g. it may be written in assembly).
1162 !CalleeF->isDeclaration()) {
1163 Instruction *OldCall = CS.getInstruction();
1164 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1165 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1167 // If OldCall does not return void then replaceAllUsesWith undef.
1168 // This allows ValueHandlers and custom metadata to adjust itself.
1169 if (!OldCall->getType()->isVoidTy())
1170 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
1171 if (isa<CallInst>(OldCall))
1172 return EraseInstFromFunction(*OldCall);
1174 // We cannot remove an invoke, because it would change the CFG, just
1175 // change the callee to a null pointer.
1176 cast<InvokeInst>(OldCall)->setCalledFunction(
1177 Constant::getNullValue(CalleeF->getType()));
1181 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1182 // If CS does not return void then replaceAllUsesWith undef.
1183 // This allows ValueHandlers and custom metadata to adjust itself.
1184 if (!CS.getInstruction()->getType()->isVoidTy())
1185 ReplaceInstUsesWith(*CS.getInstruction(),
1186 UndefValue::get(CS.getInstruction()->getType()));
1188 if (isa<InvokeInst>(CS.getInstruction())) {
1189 // Can't remove an invoke because we cannot change the CFG.
1193 // This instruction is not reachable, just remove it. We insert a store to
1194 // undef so that we know that this code is not reachable, despite the fact
1195 // that we can't modify the CFG here.
1196 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1197 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1198 CS.getInstruction());
1200 return EraseInstFromFunction(*CS.getInstruction());
1203 if (IntrinsicInst *II = FindInitTrampoline(Callee))
1204 return transformCallThroughTrampoline(CS, II);
1206 PointerType *PTy = cast<PointerType>(Callee->getType());
1207 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1208 if (FTy->isVarArg()) {
1209 int ix = FTy->getNumParams();
1210 // See if we can optimize any arguments passed through the varargs area of
1212 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
1213 E = CS.arg_end(); I != E; ++I, ++ix) {
1214 CastInst *CI = dyn_cast<CastInst>(*I);
1215 if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {
1216 *I = CI->getOperand(0);
1222 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
1223 // Inline asm calls cannot throw - mark them 'nounwind'.
1224 CS.setDoesNotThrow();
1228 // Try to optimize the call if possible, we require DataLayout for most of
1229 // this. None of these calls are seen as possibly dead so go ahead and
1230 // delete the instruction now.
1231 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1232 Instruction *I = tryOptimizeCall(CI, DL);
1233 // If we changed something return the result, etc. Otherwise let
1234 // the fallthrough check.
1235 if (I) return EraseInstFromFunction(*I);
1238 return Changed ? CS.getInstruction() : nullptr;
1241 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
1242 // attempt to move the cast to the arguments of the call/invoke.
1244 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
1246 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1249 Instruction *Caller = CS.getInstruction();
1250 const AttributeSet &CallerPAL = CS.getAttributes();
1252 // Okay, this is a cast from a function to a different type. Unless doing so
1253 // would cause a type conversion of one of our arguments, change this call to
1254 // be a direct call with arguments casted to the appropriate types.
1256 FunctionType *FT = Callee->getFunctionType();
1257 Type *OldRetTy = Caller->getType();
1258 Type *NewRetTy = FT->getReturnType();
1260 // Check to see if we are changing the return type...
1261 if (OldRetTy != NewRetTy) {
1263 if (NewRetTy->isStructTy())
1264 return false; // TODO: Handle multiple return values.
1266 if (!CastInst::isBitCastable(NewRetTy, OldRetTy)) {
1267 if (Callee->isDeclaration())
1268 return false; // Cannot transform this return value.
1270 if (!Caller->use_empty() &&
1271 // void -> non-void is handled specially
1272 !NewRetTy->isVoidTy())
1273 return false; // Cannot transform this return value.
1276 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1277 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1279 hasAttributes(AttributeFuncs::
1280 typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
1281 AttributeSet::ReturnIndex))
1282 return false; // Attribute not compatible with transformed value.
1285 // If the callsite is an invoke instruction, and the return value is used by
1286 // a PHI node in a successor, we cannot change the return type of the call
1287 // because there is no place to put the cast instruction (without breaking
1288 // the critical edge). Bail out in this case.
1289 if (!Caller->use_empty())
1290 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1291 for (User *U : II->users())
1292 if (PHINode *PN = dyn_cast<PHINode>(U))
1293 if (PN->getParent() == II->getNormalDest() ||
1294 PN->getParent() == II->getUnwindDest())
1298 unsigned NumActualArgs = CS.arg_size();
1299 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1301 CallSite::arg_iterator AI = CS.arg_begin();
1302 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1303 Type *ParamTy = FT->getParamType(i);
1304 Type *ActTy = (*AI)->getType();
1306 if (!CastInst::isBitCastable(ActTy, ParamTy))
1307 return false; // Cannot transform this parameter value.
1309 if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
1310 hasAttributes(AttributeFuncs::
1311 typeIncompatible(ParamTy, i + 1), i + 1))
1312 return false; // Attribute not compatible with transformed value.
1314 if (CS.isInAllocaArgument(i))
1315 return false; // Cannot transform to and from inalloca.
1317 // If the parameter is passed as a byval argument, then we have to have a
1318 // sized type and the sized type has to have the same size as the old type.
1319 if (ParamTy != ActTy &&
1320 CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
1321 Attribute::ByVal)) {
1322 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1323 if (!ParamPTy || !ParamPTy->getElementType()->isSized() || !DL)
1326 Type *CurElTy = ActTy->getPointerElementType();
1327 if (DL->getTypeAllocSize(CurElTy) !=
1328 DL->getTypeAllocSize(ParamPTy->getElementType()))
1333 if (Callee->isDeclaration()) {
1334 // Do not delete arguments unless we have a function body.
1335 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1338 // If the callee is just a declaration, don't change the varargsness of the
1339 // call. We don't want to introduce a varargs call where one doesn't
1341 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1342 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1345 // If both the callee and the cast type are varargs, we still have to make
1346 // sure the number of fixed parameters are the same or we have the same
1347 // ABI issues as if we introduce a varargs call.
1348 if (FT->isVarArg() &&
1349 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1350 FT->getNumParams() !=
1351 cast<FunctionType>(APTy->getElementType())->getNumParams())
1355 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1356 !CallerPAL.isEmpty())
1357 // In this case we have more arguments than the new function type, but we
1358 // won't be dropping them. Check that these extra arguments have attributes
1359 // that are compatible with being a vararg call argument.
1360 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1361 unsigned Index = CallerPAL.getSlotIndex(i - 1);
1362 if (Index <= FT->getNumParams())
1365 // Check if it has an attribute that's incompatible with varargs.
1366 AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
1367 if (PAttrs.hasAttribute(Index, Attribute::StructRet))
1372 // Okay, we decided that this is a safe thing to do: go ahead and start
1373 // inserting cast instructions as necessary.
1374 std::vector<Value*> Args;
1375 Args.reserve(NumActualArgs);
1376 SmallVector<AttributeSet, 8> attrVec;
1377 attrVec.reserve(NumCommonArgs);
1379 // Get any return attributes.
1380 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1382 // If the return value is not being used, the type may not be compatible
1383 // with the existing attributes. Wipe out any problematic attributes.
1385 removeAttributes(AttributeFuncs::
1386 typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
1387 AttributeSet::ReturnIndex);
1389 // Add the new return attributes.
1390 if (RAttrs.hasAttributes())
1391 attrVec.push_back(AttributeSet::get(Caller->getContext(),
1392 AttributeSet::ReturnIndex, RAttrs));
1394 AI = CS.arg_begin();
1395 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1396 Type *ParamTy = FT->getParamType(i);
1398 if ((*AI)->getType() == ParamTy) {
1399 Args.push_back(*AI);
1401 Args.push_back(Builder->CreateBitCast(*AI, ParamTy));
1404 // Add any parameter attributes.
1405 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1406 if (PAttrs.hasAttributes())
1407 attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
1411 // If the function takes more arguments than the call was taking, add them
1413 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1414 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1416 // If we are removing arguments to the function, emit an obnoxious warning.
1417 if (FT->getNumParams() < NumActualArgs) {
1418 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
1419 if (FT->isVarArg()) {
1420 // Add all of the arguments in their promoted form to the arg list.
1421 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1422 Type *PTy = getPromotedType((*AI)->getType());
1423 if (PTy != (*AI)->getType()) {
1424 // Must promote to pass through va_arg area!
1425 Instruction::CastOps opcode =
1426 CastInst::getCastOpcode(*AI, false, PTy, false);
1427 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
1429 Args.push_back(*AI);
1432 // Add any parameter attributes.
1433 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1434 if (PAttrs.hasAttributes())
1435 attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
1441 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
1442 if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
1443 attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
1445 if (NewRetTy->isVoidTy())
1446 Caller->setName(""); // Void type should not have a name.
1448 const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
1452 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1453 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
1454 II->getUnwindDest(), Args);
1456 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1457 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1459 CallInst *CI = cast<CallInst>(Caller);
1460 NC = Builder->CreateCall(Callee, Args);
1462 if (CI->isTailCall())
1463 cast<CallInst>(NC)->setTailCall();
1464 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1465 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1468 // Insert a cast of the return type as necessary.
1470 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1471 if (!NV->getType()->isVoidTy()) {
1472 NV = NC = CastInst::Create(CastInst::BitCast, NC, OldRetTy);
1473 NC->setDebugLoc(Caller->getDebugLoc());
1475 // If this is an invoke instruction, we should insert it after the first
1476 // non-phi, instruction in the normal successor block.
1477 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1478 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
1479 InsertNewInstBefore(NC, *I);
1481 // Otherwise, it's a call, just insert cast right after the call.
1482 InsertNewInstBefore(NC, *Caller);
1484 Worklist.AddUsersToWorkList(*Caller);
1486 NV = UndefValue::get(Caller->getType());
1490 if (!Caller->use_empty())
1491 ReplaceInstUsesWith(*Caller, NV);
1492 else if (Caller->hasValueHandle())
1493 ValueHandleBase::ValueIsRAUWd(Caller, NV);
1495 EraseInstFromFunction(*Caller);
1499 // transformCallThroughTrampoline - Turn a call to a function created by
1500 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
1501 // underlying function.
1504 InstCombiner::transformCallThroughTrampoline(CallSite CS,
1505 IntrinsicInst *Tramp) {
1506 Value *Callee = CS.getCalledValue();
1507 PointerType *PTy = cast<PointerType>(Callee->getType());
1508 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1509 const AttributeSet &Attrs = CS.getAttributes();
1511 // If the call already has the 'nest' attribute somewhere then give up -
1512 // otherwise 'nest' would occur twice after splicing in the chain.
1513 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1517 "transformCallThroughTrampoline called with incorrect CallSite.");
1519 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1520 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1521 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1523 const AttributeSet &NestAttrs = NestF->getAttributes();
1524 if (!NestAttrs.isEmpty()) {
1525 unsigned NestIdx = 1;
1526 Type *NestTy = nullptr;
1527 AttributeSet NestAttr;
1529 // Look for a parameter marked with the 'nest' attribute.
1530 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1531 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1532 if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
1533 // Record the parameter type and any other attributes.
1535 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1540 Instruction *Caller = CS.getInstruction();
1541 std::vector<Value*> NewArgs;
1542 NewArgs.reserve(CS.arg_size() + 1);
1544 SmallVector<AttributeSet, 8> NewAttrs;
1545 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1547 // Insert the nest argument into the call argument list, which may
1548 // mean appending it. Likewise for attributes.
1550 // Add any result attributes.
1551 if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
1552 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1553 Attrs.getRetAttributes()));
1557 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1559 if (Idx == NestIdx) {
1560 // Add the chain argument and attributes.
1561 Value *NestVal = Tramp->getArgOperand(2);
1562 if (NestVal->getType() != NestTy)
1563 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
1564 NewArgs.push_back(NestVal);
1565 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1572 // Add the original argument and attributes.
1573 NewArgs.push_back(*I);
1574 AttributeSet Attr = Attrs.getParamAttributes(Idx);
1575 if (Attr.hasAttributes(Idx)) {
1576 AttrBuilder B(Attr, Idx);
1577 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1578 Idx + (Idx >= NestIdx), B));
1585 // Add any function attributes.
1586 if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
1587 NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
1588 Attrs.getFnAttributes()));
1590 // The trampoline may have been bitcast to a bogus type (FTy).
1591 // Handle this by synthesizing a new function type, equal to FTy
1592 // with the chain parameter inserted.
1594 std::vector<Type*> NewTypes;
1595 NewTypes.reserve(FTy->getNumParams()+1);
1597 // Insert the chain's type into the list of parameter types, which may
1598 // mean appending it.
1601 FunctionType::param_iterator I = FTy->param_begin(),
1602 E = FTy->param_end();
1606 // Add the chain's type.
1607 NewTypes.push_back(NestTy);
1612 // Add the original type.
1613 NewTypes.push_back(*I);
1619 // Replace the trampoline call with a direct call. Let the generic
1620 // code sort out any function type mismatches.
1621 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1623 Constant *NewCallee =
1624 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1625 NestF : ConstantExpr::getBitCast(NestF,
1626 PointerType::getUnqual(NewFTy));
1627 const AttributeSet &NewPAL =
1628 AttributeSet::get(FTy->getContext(), NewAttrs);
1630 Instruction *NewCaller;
1631 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1632 NewCaller = InvokeInst::Create(NewCallee,
1633 II->getNormalDest(), II->getUnwindDest(),
1635 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1636 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1638 NewCaller = CallInst::Create(NewCallee, NewArgs);
1639 if (cast<CallInst>(Caller)->isTailCall())
1640 cast<CallInst>(NewCaller)->setTailCall();
1641 cast<CallInst>(NewCaller)->
1642 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1643 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1650 // Replace the trampoline call with a direct call. Since there is no 'nest'
1651 // parameter, there is no need to adjust the argument list. Let the generic
1652 // code sort out any function type mismatches.
1653 Constant *NewCallee =
1654 NestF->getType() == PTy ? NestF :
1655 ConstantExpr::getBitCast(NestF, PTy);
1656 CS.setCalledFunction(NewCallee);
1657 return CS.getInstruction();