1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/MemoryBuiltins.h"
17 #include "llvm/IR/CallSite.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/PatternMatch.h"
21 #include "llvm/IR/Statepoint.h"
22 #include "llvm/Transforms/Utils/BuildLibCalls.h"
23 #include "llvm/Transforms/Utils/Local.h"
25 using namespace PatternMatch;
27 #define DEBUG_TYPE "instcombine"
29 STATISTIC(NumSimplified, "Number of library calls simplified");
31 /// getPromotedType - Return the specified type promoted as it would be to pass
32 /// though a va_arg area.
33 static Type *getPromotedType(Type *Ty) {
34 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
35 if (ITy->getBitWidth() < 32)
36 return Type::getInt32Ty(Ty->getContext());
41 /// reduceToSingleValueType - Given an aggregate type which ultimately holds a
42 /// single scalar element, like {{{type}}} or [1 x type], return type.
43 static Type *reduceToSingleValueType(Type *T) {
44 while (!T->isSingleValueType()) {
45 if (StructType *STy = dyn_cast<StructType>(T)) {
46 if (STy->getNumElements() == 1)
47 T = STy->getElementType(0);
50 } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
51 if (ATy->getNumElements() == 1)
52 T = ATy->getElementType();
62 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
63 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, AT, MI, DT);
64 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, AT, MI, DT);
65 unsigned MinAlign = std::min(DstAlign, SrcAlign);
66 unsigned CopyAlign = MI->getAlignment();
68 if (CopyAlign < MinAlign) {
69 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
74 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
76 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
77 if (!MemOpLength) return nullptr;
79 // Source and destination pointer types are always "i8*" for intrinsic. See
80 // if the size is something we can handle with a single primitive load/store.
81 // A single load+store correctly handles overlapping memory in the memmove
83 uint64_t Size = MemOpLength->getLimitedValue();
84 assert(Size && "0-sized memory transferring should be removed already.");
86 if (Size > 8 || (Size&(Size-1)))
87 return nullptr; // If not 1/2/4/8 bytes, exit.
89 // Use an integer load+store unless we can find something better.
91 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
93 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
95 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
96 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
97 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
99 // Memcpy forces the use of i8* for the source and destination. That means
100 // that if you're using memcpy to move one double around, you'll get a cast
101 // from double* to i8*. We'd much rather use a double load+store rather than
102 // an i64 load+store, here because this improves the odds that the source or
103 // dest address will be promotable. See if we can find a better type than the
105 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
106 MDNode *CopyMD = nullptr;
107 if (StrippedDest != MI->getArgOperand(0)) {
108 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
110 if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {
111 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
112 // down through these levels if so.
113 SrcETy = reduceToSingleValueType(SrcETy);
115 if (SrcETy->isSingleValueType()) {
116 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
117 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
119 // If the memcpy has metadata describing the members, see if we can
120 // get the TBAA tag describing our copy.
121 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
122 if (M->getNumOperands() == 3 && M->getOperand(0) &&
123 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
124 mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
126 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
127 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
129 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
130 CopyMD = cast<MDNode>(M->getOperand(2));
136 // If the memcpy/memmove provides better alignment info than we can
138 SrcAlign = std::max(SrcAlign, CopyAlign);
139 DstAlign = std::max(DstAlign, CopyAlign);
141 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
142 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
143 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
144 L->setAlignment(SrcAlign);
146 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
147 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
148 S->setAlignment(DstAlign);
150 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
152 // Set the size of the copy to 0, it will be deleted on the next iteration.
153 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
157 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
158 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, AT, MI, DT);
159 if (MI->getAlignment() < Alignment) {
160 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
165 // Extract the length and alignment and fill if they are constant.
166 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
167 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
168 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
170 uint64_t Len = LenC->getLimitedValue();
171 Alignment = MI->getAlignment();
172 assert(Len && "0-sized memory setting should be removed already.");
174 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
175 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
176 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
178 Value *Dest = MI->getDest();
179 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
180 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
181 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
183 // Alignment 0 is identity for alignment 1 for memset, but not store.
184 if (Alignment == 0) Alignment = 1;
186 // Extract the fill value and store.
187 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
188 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
190 S->setAlignment(Alignment);
192 // Set the size of the copy to 0, it will be deleted on the next iteration.
193 MI->setLength(Constant::getNullValue(LenC->getType()));
200 /// visitCallInst - CallInst simplification. This mostly only handles folding
201 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
202 /// the heavy lifting.
204 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
205 if (isFreeCall(&CI, TLI))
206 return visitFree(CI);
208 // If the caller function is nounwind, mark the call as nounwind, even if the
210 if (CI.getParent()->getParent()->doesNotThrow() &&
211 !CI.doesNotThrow()) {
212 CI.setDoesNotThrow();
216 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
217 if (!II) return visitCallSite(&CI);
219 // Intrinsics cannot occur in an invoke, so handle them here instead of in
221 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
222 bool Changed = false;
224 // memmove/cpy/set of zero bytes is a noop.
225 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
226 if (NumBytes->isNullValue())
227 return EraseInstFromFunction(CI);
229 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
230 if (CI->getZExtValue() == 1) {
231 // Replace the instruction with just byte operations. We would
232 // transform other cases to loads/stores, but we don't know if
233 // alignment is sufficient.
237 // No other transformations apply to volatile transfers.
238 if (MI->isVolatile())
241 // If we have a memmove and the source operation is a constant global,
242 // then the source and dest pointers can't alias, so we can change this
243 // into a call to memcpy.
244 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
245 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
246 if (GVSrc->isConstant()) {
247 Module *M = CI.getParent()->getParent()->getParent();
248 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
249 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
250 CI.getArgOperand(1)->getType(),
251 CI.getArgOperand(2)->getType() };
252 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
257 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
258 // memmove(x,x,size) -> noop.
259 if (MTI->getSource() == MTI->getDest())
260 return EraseInstFromFunction(CI);
263 // If we can determine a pointer alignment that is bigger than currently
264 // set, update the alignment.
265 if (isa<MemTransferInst>(MI)) {
266 if (Instruction *I = SimplifyMemTransfer(MI))
268 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
269 if (Instruction *I = SimplifyMemSet(MSI))
273 if (Changed) return II;
276 switch (II->getIntrinsicID()) {
278 case Intrinsic::objectsize: {
280 if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
281 return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
284 case Intrinsic::bswap: {
285 Value *IIOperand = II->getArgOperand(0);
288 // bswap(bswap(x)) -> x
289 if (match(IIOperand, m_BSwap(m_Value(X))))
290 return ReplaceInstUsesWith(CI, X);
292 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
293 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
294 unsigned C = X->getType()->getPrimitiveSizeInBits() -
295 IIOperand->getType()->getPrimitiveSizeInBits();
296 Value *CV = ConstantInt::get(X->getType(), C);
297 Value *V = Builder->CreateLShr(X, CV);
298 return new TruncInst(V, IIOperand->getType());
303 case Intrinsic::powi:
304 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
307 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
310 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
311 // powi(x, -1) -> 1/x
312 if (Power->isAllOnesValue())
313 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
314 II->getArgOperand(0));
317 case Intrinsic::cttz: {
318 // If all bits below the first known one are known zero,
319 // this value is constant.
320 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
321 // FIXME: Try to simplify vectors of integers.
323 uint32_t BitWidth = IT->getBitWidth();
324 APInt KnownZero(BitWidth, 0);
325 APInt KnownOne(BitWidth, 0);
326 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
327 unsigned TrailingZeros = KnownOne.countTrailingZeros();
328 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
329 if ((Mask & KnownZero) == Mask)
330 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
331 APInt(BitWidth, TrailingZeros)));
335 case Intrinsic::ctlz: {
336 // If all bits above the first known one are known zero,
337 // this value is constant.
338 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
339 // FIXME: Try to simplify vectors of integers.
341 uint32_t BitWidth = IT->getBitWidth();
342 APInt KnownZero(BitWidth, 0);
343 APInt KnownOne(BitWidth, 0);
344 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
345 unsigned LeadingZeros = KnownOne.countLeadingZeros();
346 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
347 if ((Mask & KnownZero) == Mask)
348 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
349 APInt(BitWidth, LeadingZeros)));
353 case Intrinsic::uadd_with_overflow: {
354 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
355 IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
356 uint32_t BitWidth = IT->getBitWidth();
357 APInt LHSKnownZero(BitWidth, 0);
358 APInt LHSKnownOne(BitWidth, 0);
359 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, II);
360 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
361 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
363 if (LHSKnownNegative || LHSKnownPositive) {
364 APInt RHSKnownZero(BitWidth, 0);
365 APInt RHSKnownOne(BitWidth, 0);
366 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, II);
367 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
368 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
369 if (LHSKnownNegative && RHSKnownNegative) {
370 // The sign bit is set in both cases: this MUST overflow.
371 // Create a simple add instruction, and insert it into the struct.
372 return CreateOverflowTuple(II, Builder->CreateAdd(LHS, RHS), true,
376 if (LHSKnownPositive && RHSKnownPositive) {
377 // The sign bit is clear in both cases: this CANNOT overflow.
378 // Create a simple add instruction, and insert it into the struct.
379 return CreateOverflowTuple(II, Builder->CreateNUWAdd(LHS, RHS), false);
383 // FALL THROUGH uadd into sadd
384 case Intrinsic::sadd_with_overflow:
385 // Canonicalize constants into the RHS.
386 if (isa<Constant>(II->getArgOperand(0)) &&
387 !isa<Constant>(II->getArgOperand(1))) {
388 Value *LHS = II->getArgOperand(0);
389 II->setArgOperand(0, II->getArgOperand(1));
390 II->setArgOperand(1, LHS);
394 // X + undef -> undef
395 if (isa<UndefValue>(II->getArgOperand(1)))
396 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
398 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
399 // X + 0 -> {X, false}
401 return CreateOverflowTuple(II, II->getArgOperand(0), false,
406 // We can strength reduce reduce this signed add into a regular add if we
407 // can prove that it will never overflow.
408 if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow) {
409 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
410 if (WillNotOverflowSignedAdd(LHS, RHS, II)) {
411 return CreateOverflowTuple(II, Builder->CreateNSWAdd(LHS, RHS), false);
416 case Intrinsic::usub_with_overflow:
417 case Intrinsic::ssub_with_overflow: {
418 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
419 // undef - X -> undef
420 // X - undef -> undef
421 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
422 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
424 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
425 // X - 0 -> {X, false}
426 if (ConstRHS->isZero()) {
427 return CreateOverflowTuple(II, LHS, false, /*ReUseName*/false);
430 if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow) {
431 if (WillNotOverflowSignedSub(LHS, RHS, II)) {
432 return CreateOverflowTuple(II, Builder->CreateNSWSub(LHS, RHS), false);
435 if (WillNotOverflowUnsignedSub(LHS, RHS, II)) {
436 return CreateOverflowTuple(II, Builder->CreateNUWSub(LHS, RHS), false);
441 case Intrinsic::umul_with_overflow: {
442 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
443 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, II);
444 if (OR == OverflowResult::NeverOverflows) {
445 return CreateOverflowTuple(II, Builder->CreateNUWMul(LHS, RHS), false);
448 case Intrinsic::smul_with_overflow:
449 // Canonicalize constants into the RHS.
450 if (isa<Constant>(II->getArgOperand(0)) &&
451 !isa<Constant>(II->getArgOperand(1))) {
452 Value *LHS = II->getArgOperand(0);
453 II->setArgOperand(0, II->getArgOperand(1));
454 II->setArgOperand(1, LHS);
458 // X * undef -> undef
459 if (isa<UndefValue>(II->getArgOperand(1)))
460 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
462 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
465 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
467 // X * 1 -> {X, false}
468 if (RHSI->equalsInt(1)) {
469 return CreateOverflowTuple(II, II->getArgOperand(0), false,
473 if (II->getIntrinsicID() == Intrinsic::smul_with_overflow) {
474 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
475 if (WillNotOverflowSignedMul(LHS, RHS, II)) {
476 return CreateOverflowTuple(II, Builder->CreateNSWMul(LHS, RHS), false);
480 case Intrinsic::minnum:
481 case Intrinsic::maxnum: {
482 Value *Arg0 = II->getArgOperand(0);
483 Value *Arg1 = II->getArgOperand(1);
487 return ReplaceInstUsesWith(CI, Arg0);
489 const ConstantFP *C0 = dyn_cast<ConstantFP>(Arg0);
490 const ConstantFP *C1 = dyn_cast<ConstantFP>(Arg1);
492 // Canonicalize constants into the RHS.
494 II->setArgOperand(0, Arg1);
495 II->setArgOperand(1, Arg0);
500 if (C1 && C1->isNaN())
501 return ReplaceInstUsesWith(CI, Arg0);
503 // This is the value because if undef were NaN, we would return the other
504 // value and cannot return a NaN unless both operands are.
506 // fmin(undef, x) -> x
507 if (isa<UndefValue>(Arg0))
508 return ReplaceInstUsesWith(CI, Arg1);
510 // fmin(x, undef) -> x
511 if (isa<UndefValue>(Arg1))
512 return ReplaceInstUsesWith(CI, Arg0);
516 if (II->getIntrinsicID() == Intrinsic::minnum) {
517 // fmin(x, fmin(x, y)) -> fmin(x, y)
518 // fmin(y, fmin(x, y)) -> fmin(x, y)
519 if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
520 if (Arg0 == X || Arg0 == Y)
521 return ReplaceInstUsesWith(CI, Arg1);
524 // fmin(fmin(x, y), x) -> fmin(x, y)
525 // fmin(fmin(x, y), y) -> fmin(x, y)
526 if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
527 if (Arg1 == X || Arg1 == Y)
528 return ReplaceInstUsesWith(CI, Arg0);
531 // TODO: fmin(nnan x, inf) -> x
532 // TODO: fmin(nnan ninf x, flt_max) -> x
533 if (C1 && C1->isInfinity()) {
534 // fmin(x, -inf) -> -inf
535 if (C1->isNegative())
536 return ReplaceInstUsesWith(CI, Arg1);
539 assert(II->getIntrinsicID() == Intrinsic::maxnum);
540 // fmax(x, fmax(x, y)) -> fmax(x, y)
541 // fmax(y, fmax(x, y)) -> fmax(x, y)
542 if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
543 if (Arg0 == X || Arg0 == Y)
544 return ReplaceInstUsesWith(CI, Arg1);
547 // fmax(fmax(x, y), x) -> fmax(x, y)
548 // fmax(fmax(x, y), y) -> fmax(x, y)
549 if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
550 if (Arg1 == X || Arg1 == Y)
551 return ReplaceInstUsesWith(CI, Arg0);
554 // TODO: fmax(nnan x, -inf) -> x
555 // TODO: fmax(nnan ninf x, -flt_max) -> x
556 if (C1 && C1->isInfinity()) {
557 // fmax(x, inf) -> inf
558 if (!C1->isNegative())
559 return ReplaceInstUsesWith(CI, Arg1);
564 case Intrinsic::ppc_altivec_lvx:
565 case Intrinsic::ppc_altivec_lvxl:
566 // Turn PPC lvx -> load if the pointer is known aligned.
567 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16,
568 DL, AT, II, DT) >= 16) {
569 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
570 PointerType::getUnqual(II->getType()));
571 return new LoadInst(Ptr);
574 case Intrinsic::ppc_vsx_lxvw4x:
575 case Intrinsic::ppc_vsx_lxvd2x: {
576 // Turn PPC VSX loads into normal loads.
577 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
578 PointerType::getUnqual(II->getType()));
579 return new LoadInst(Ptr, Twine(""), false, 1);
581 case Intrinsic::ppc_altivec_stvx:
582 case Intrinsic::ppc_altivec_stvxl:
583 // Turn stvx -> store if the pointer is known aligned.
584 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16,
585 DL, AT, II, DT) >= 16) {
587 PointerType::getUnqual(II->getArgOperand(0)->getType());
588 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
589 return new StoreInst(II->getArgOperand(0), Ptr);
592 case Intrinsic::ppc_vsx_stxvw4x:
593 case Intrinsic::ppc_vsx_stxvd2x: {
594 // Turn PPC VSX stores into normal stores.
595 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
596 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
597 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
599 case Intrinsic::x86_sse_storeu_ps:
600 case Intrinsic::x86_sse2_storeu_pd:
601 case Intrinsic::x86_sse2_storeu_dq:
602 // Turn X86 storeu -> store if the pointer is known aligned.
603 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16,
604 DL, AT, II, DT) >= 16) {
606 PointerType::getUnqual(II->getArgOperand(1)->getType());
607 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
608 return new StoreInst(II->getArgOperand(1), Ptr);
612 case Intrinsic::x86_sse_cvtss2si:
613 case Intrinsic::x86_sse_cvtss2si64:
614 case Intrinsic::x86_sse_cvttss2si:
615 case Intrinsic::x86_sse_cvttss2si64:
616 case Intrinsic::x86_sse2_cvtsd2si:
617 case Intrinsic::x86_sse2_cvtsd2si64:
618 case Intrinsic::x86_sse2_cvttsd2si:
619 case Intrinsic::x86_sse2_cvttsd2si64: {
620 // These intrinsics only demand the 0th element of their input vectors. If
621 // we can simplify the input based on that, do so now.
623 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
624 APInt DemandedElts(VWidth, 1);
625 APInt UndefElts(VWidth, 0);
626 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
627 DemandedElts, UndefElts)) {
628 II->setArgOperand(0, V);
634 // Constant fold <A x Bi> << Ci.
635 // FIXME: We don't handle _dq because it's a shift of an i128, but is
636 // represented in the IR as <2 x i64>. A per element shift is wrong.
637 case Intrinsic::x86_sse2_psll_d:
638 case Intrinsic::x86_sse2_psll_q:
639 case Intrinsic::x86_sse2_psll_w:
640 case Intrinsic::x86_sse2_pslli_d:
641 case Intrinsic::x86_sse2_pslli_q:
642 case Intrinsic::x86_sse2_pslli_w:
643 case Intrinsic::x86_avx2_psll_d:
644 case Intrinsic::x86_avx2_psll_q:
645 case Intrinsic::x86_avx2_psll_w:
646 case Intrinsic::x86_avx2_pslli_d:
647 case Intrinsic::x86_avx2_pslli_q:
648 case Intrinsic::x86_avx2_pslli_w:
649 case Intrinsic::x86_sse2_psrl_d:
650 case Intrinsic::x86_sse2_psrl_q:
651 case Intrinsic::x86_sse2_psrl_w:
652 case Intrinsic::x86_sse2_psrli_d:
653 case Intrinsic::x86_sse2_psrli_q:
654 case Intrinsic::x86_sse2_psrli_w:
655 case Intrinsic::x86_avx2_psrl_d:
656 case Intrinsic::x86_avx2_psrl_q:
657 case Intrinsic::x86_avx2_psrl_w:
658 case Intrinsic::x86_avx2_psrli_d:
659 case Intrinsic::x86_avx2_psrli_q:
660 case Intrinsic::x86_avx2_psrli_w: {
661 // Simplify if count is constant. To 0 if >= BitWidth,
662 // otherwise to shl/lshr.
663 auto CDV = dyn_cast<ConstantDataVector>(II->getArgOperand(1));
664 auto CInt = dyn_cast<ConstantInt>(II->getArgOperand(1));
669 Count = cast<ConstantInt>(CDV->getElementAsConstant(0));
673 auto Vec = II->getArgOperand(0);
674 auto VT = cast<VectorType>(Vec->getType());
675 if (Count->getZExtValue() >
676 VT->getElementType()->getPrimitiveSizeInBits() - 1)
677 return ReplaceInstUsesWith(
678 CI, ConstantAggregateZero::get(Vec->getType()));
680 bool isPackedShiftLeft = true;
681 switch (II->getIntrinsicID()) {
683 case Intrinsic::x86_sse2_psrl_d:
684 case Intrinsic::x86_sse2_psrl_q:
685 case Intrinsic::x86_sse2_psrl_w:
686 case Intrinsic::x86_sse2_psrli_d:
687 case Intrinsic::x86_sse2_psrli_q:
688 case Intrinsic::x86_sse2_psrli_w:
689 case Intrinsic::x86_avx2_psrl_d:
690 case Intrinsic::x86_avx2_psrl_q:
691 case Intrinsic::x86_avx2_psrl_w:
692 case Intrinsic::x86_avx2_psrli_d:
693 case Intrinsic::x86_avx2_psrli_q:
694 case Intrinsic::x86_avx2_psrli_w: isPackedShiftLeft = false; break;
697 unsigned VWidth = VT->getNumElements();
698 // Get a constant vector of the same type as the first operand.
699 auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
700 if (isPackedShiftLeft)
701 return BinaryOperator::CreateShl(Vec,
702 Builder->CreateVectorSplat(VWidth, VTCI));
704 return BinaryOperator::CreateLShr(Vec,
705 Builder->CreateVectorSplat(VWidth, VTCI));
708 case Intrinsic::x86_sse41_pmovsxbw:
709 case Intrinsic::x86_sse41_pmovsxwd:
710 case Intrinsic::x86_sse41_pmovsxdq:
711 case Intrinsic::x86_sse41_pmovzxbw:
712 case Intrinsic::x86_sse41_pmovzxwd:
713 case Intrinsic::x86_sse41_pmovzxdq: {
714 // pmov{s|z}x ignores the upper half of their input vectors.
716 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
717 unsigned LowHalfElts = VWidth / 2;
718 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
719 APInt UndefElts(VWidth, 0);
720 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
723 II->setArgOperand(0, TmpV);
729 case Intrinsic::x86_sse4a_insertqi: {
730 // insertqi x, y, 64, 0 can just copy y's lower bits and leave the top
732 // TODO: eventually we should lower this intrinsic to IR
733 if (auto CIWidth = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
734 if (auto CIStart = dyn_cast<ConstantInt>(II->getArgOperand(3))) {
735 unsigned Index = CIStart->getZExtValue();
736 // From AMD documentation: "a value of zero in the field length is
737 // defined as length of 64".
738 unsigned Length = CIWidth->equalsInt(0) ? 64 : CIWidth->getZExtValue();
740 // From AMD documentation: "If the sum of the bit index + length field
741 // is greater than 64, the results are undefined".
743 // Note that both field index and field length are 8-bit quantities.
744 // Since variables 'Index' and 'Length' are unsigned values
745 // obtained from zero-extending field index and field length
746 // respectively, their sum should never wrap around.
747 if ((Index + Length) > 64)
748 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
750 if (Length == 64 && Index == 0) {
751 Value *Vec = II->getArgOperand(1);
752 Value *Undef = UndefValue::get(Vec->getType());
753 const uint32_t Mask[] = { 0, 2 };
754 return ReplaceInstUsesWith(
756 Builder->CreateShuffleVector(
757 Vec, Undef, ConstantDataVector::get(
758 II->getContext(), makeArrayRef(Mask))));
760 } else if (auto Source =
761 dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
762 if (Source->hasOneUse() &&
763 Source->getArgOperand(1) == II->getArgOperand(1)) {
764 // If the source of the insert has only one use and it's another
765 // insert (and they're both inserting from the same vector), try to
766 // bundle both together.
768 dyn_cast<ConstantInt>(Source->getArgOperand(2));
770 dyn_cast<ConstantInt>(Source->getArgOperand(3));
771 if (CISourceStart && CISourceWidth) {
772 unsigned Start = CIStart->getZExtValue();
773 unsigned Width = CIWidth->getZExtValue();
774 unsigned End = Start + Width;
775 unsigned SourceStart = CISourceStart->getZExtValue();
776 unsigned SourceWidth = CISourceWidth->getZExtValue();
777 unsigned SourceEnd = SourceStart + SourceWidth;
778 unsigned NewStart, NewWidth;
779 bool ShouldReplace = false;
780 if (Start <= SourceStart && SourceStart <= End) {
782 NewWidth = std::max(End, SourceEnd) - NewStart;
783 ShouldReplace = true;
784 } else if (SourceStart <= Start && Start <= SourceEnd) {
785 NewStart = SourceStart;
786 NewWidth = std::max(SourceEnd, End) - NewStart;
787 ShouldReplace = true;
791 Constant *ConstantWidth = ConstantInt::get(
792 II->getArgOperand(2)->getType(), NewWidth, false);
793 Constant *ConstantStart = ConstantInt::get(
794 II->getArgOperand(3)->getType(), NewStart, false);
795 Value *Args[4] = { Source->getArgOperand(0),
796 II->getArgOperand(1), ConstantWidth,
798 Module *M = CI.getParent()->getParent()->getParent();
800 Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
801 return ReplaceInstUsesWith(CI, Builder->CreateCall(F, Args));
811 case Intrinsic::x86_sse41_pblendvb:
812 case Intrinsic::x86_sse41_blendvps:
813 case Intrinsic::x86_sse41_blendvpd:
814 case Intrinsic::x86_avx_blendv_ps_256:
815 case Intrinsic::x86_avx_blendv_pd_256:
816 case Intrinsic::x86_avx2_pblendvb: {
817 // Convert blendv* to vector selects if the mask is constant.
818 // This optimization is convoluted because the intrinsic is defined as
819 // getting a vector of floats or doubles for the ps and pd versions.
820 // FIXME: That should be changed.
821 Value *Mask = II->getArgOperand(2);
822 if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
823 auto Tyi1 = Builder->getInt1Ty();
824 auto SelectorType = cast<VectorType>(Mask->getType());
825 auto EltTy = SelectorType->getElementType();
826 unsigned Size = SelectorType->getNumElements();
830 : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
831 assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
832 "Wrong arguments for variable blend intrinsic");
833 SmallVector<Constant *, 32> Selectors;
834 for (unsigned I = 0; I < Size; ++I) {
835 // The intrinsics only read the top bit
838 Selector = C->getElementAsInteger(I);
840 Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
841 Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
843 auto NewSelector = ConstantVector::get(Selectors);
844 return SelectInst::Create(NewSelector, II->getArgOperand(1),
845 II->getArgOperand(0), "blendv");
851 case Intrinsic::x86_avx_vpermilvar_ps:
852 case Intrinsic::x86_avx_vpermilvar_ps_256:
853 case Intrinsic::x86_avx_vpermilvar_pd:
854 case Intrinsic::x86_avx_vpermilvar_pd_256: {
855 // Convert vpermil* to shufflevector if the mask is constant.
856 Value *V = II->getArgOperand(1);
857 unsigned Size = cast<VectorType>(V->getType())->getNumElements();
858 assert(Size == 8 || Size == 4 || Size == 2);
860 if (auto C = dyn_cast<ConstantDataVector>(V)) {
861 // The intrinsics only read one or two bits, clear the rest.
862 for (unsigned I = 0; I < Size; ++I) {
863 uint32_t Index = C->getElementAsInteger(I) & 0x3;
864 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
865 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
869 } else if (isa<ConstantAggregateZero>(V)) {
870 for (unsigned I = 0; I < Size; ++I)
875 // The _256 variants are a bit trickier since the mask bits always index
876 // into the corresponding 128 half. In order to convert to a generic
877 // shuffle, we have to make that explicit.
878 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
879 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
880 for (unsigned I = Size / 2; I < Size; ++I)
881 Indexes[I] += Size / 2;
884 ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
885 auto V1 = II->getArgOperand(0);
886 auto V2 = UndefValue::get(V1->getType());
887 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
888 return ReplaceInstUsesWith(CI, Shuffle);
891 case Intrinsic::ppc_altivec_vperm:
892 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
893 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
894 // a vectorshuffle for little endian, we must undo the transformation
895 // performed on vec_perm in altivec.h. That is, we must complement
896 // the permutation mask with respect to 31 and reverse the order of
898 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
899 assert(Mask->getType()->getVectorNumElements() == 16 &&
900 "Bad type for intrinsic!");
902 // Check that all of the elements are integer constants or undefs.
903 bool AllEltsOk = true;
904 for (unsigned i = 0; i != 16; ++i) {
905 Constant *Elt = Mask->getAggregateElement(i);
906 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
913 // Cast the input vectors to byte vectors.
914 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
916 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
918 Value *Result = UndefValue::get(Op0->getType());
920 // Only extract each element once.
921 Value *ExtractedElts[32];
922 memset(ExtractedElts, 0, sizeof(ExtractedElts));
924 for (unsigned i = 0; i != 16; ++i) {
925 if (isa<UndefValue>(Mask->getAggregateElement(i)))
928 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
929 Idx &= 31; // Match the hardware behavior.
930 if (DL && DL->isLittleEndian())
933 if (!ExtractedElts[Idx]) {
934 Value *Op0ToUse = (DL && DL->isLittleEndian()) ? Op1 : Op0;
935 Value *Op1ToUse = (DL && DL->isLittleEndian()) ? Op0 : Op1;
937 Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
938 Builder->getInt32(Idx&15));
941 // Insert this value into the result vector.
942 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
943 Builder->getInt32(i));
945 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
950 case Intrinsic::arm_neon_vld1:
951 case Intrinsic::arm_neon_vld2:
952 case Intrinsic::arm_neon_vld3:
953 case Intrinsic::arm_neon_vld4:
954 case Intrinsic::arm_neon_vld2lane:
955 case Intrinsic::arm_neon_vld3lane:
956 case Intrinsic::arm_neon_vld4lane:
957 case Intrinsic::arm_neon_vst1:
958 case Intrinsic::arm_neon_vst2:
959 case Intrinsic::arm_neon_vst3:
960 case Intrinsic::arm_neon_vst4:
961 case Intrinsic::arm_neon_vst2lane:
962 case Intrinsic::arm_neon_vst3lane:
963 case Intrinsic::arm_neon_vst4lane: {
964 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, AT, II, DT);
965 unsigned AlignArg = II->getNumArgOperands() - 1;
966 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
967 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
968 II->setArgOperand(AlignArg,
969 ConstantInt::get(Type::getInt32Ty(II->getContext()),
976 case Intrinsic::arm_neon_vmulls:
977 case Intrinsic::arm_neon_vmullu:
978 case Intrinsic::aarch64_neon_smull:
979 case Intrinsic::aarch64_neon_umull: {
980 Value *Arg0 = II->getArgOperand(0);
981 Value *Arg1 = II->getArgOperand(1);
983 // Handle mul by zero first:
984 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
985 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
988 // Check for constant LHS & RHS - in this case we just simplify.
989 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
990 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
991 VectorType *NewVT = cast<VectorType>(II->getType());
992 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
993 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
994 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
995 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
997 return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
1000 // Couldn't simplify - canonicalize constant to the RHS.
1001 std::swap(Arg0, Arg1);
1004 // Handle mul by one:
1005 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
1006 if (ConstantInt *Splat =
1007 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
1009 return CastInst::CreateIntegerCast(Arg0, II->getType(),
1010 /*isSigned=*/!Zext);
1015 case Intrinsic::AMDGPU_rcp: {
1016 if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
1017 const APFloat &ArgVal = C->getValueAPF();
1018 APFloat Val(ArgVal.getSemantics(), 1.0);
1019 APFloat::opStatus Status = Val.divide(ArgVal,
1020 APFloat::rmNearestTiesToEven);
1021 // Only do this if it was exact and therefore not dependent on the
1023 if (Status == APFloat::opOK)
1024 return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
1029 case Intrinsic::stackrestore: {
1030 // If the save is right next to the restore, remove the restore. This can
1031 // happen when variable allocas are DCE'd.
1032 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1033 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
1034 BasicBlock::iterator BI = SS;
1036 return EraseInstFromFunction(CI);
1040 // Scan down this block to see if there is another stack restore in the
1041 // same block without an intervening call/alloca.
1042 BasicBlock::iterator BI = II;
1043 TerminatorInst *TI = II->getParent()->getTerminator();
1044 bool CannotRemove = false;
1045 for (++BI; &*BI != TI; ++BI) {
1046 if (isa<AllocaInst>(BI)) {
1047 CannotRemove = true;
1050 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
1051 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
1052 // If there is a stackrestore below this one, remove this one.
1053 if (II->getIntrinsicID() == Intrinsic::stackrestore)
1054 return EraseInstFromFunction(CI);
1055 // Otherwise, ignore the intrinsic.
1057 // If we found a non-intrinsic call, we can't remove the stack
1059 CannotRemove = true;
1065 // If the stack restore is in a return, resume, or unwind block and if there
1066 // are no allocas or calls between the restore and the return, nuke the
1068 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
1069 return EraseInstFromFunction(CI);
1072 case Intrinsic::assume: {
1073 // Canonicalize assume(a && b) -> assume(a); assume(b);
1074 // Note: New assumption intrinsics created here are registered by
1075 // the InstCombineIRInserter object.
1076 Value *IIOperand = II->getArgOperand(0), *A, *B,
1077 *AssumeIntrinsic = II->getCalledValue();
1078 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
1079 Builder->CreateCall(AssumeIntrinsic, A, II->getName());
1080 Builder->CreateCall(AssumeIntrinsic, B, II->getName());
1081 return EraseInstFromFunction(*II);
1083 // assume(!(a || b)) -> assume(!a); assume(!b);
1084 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
1085 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
1087 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
1089 return EraseInstFromFunction(*II);
1092 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
1093 // (if assume is valid at the load)
1094 if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
1095 Value *LHS = ICmp->getOperand(0);
1096 Value *RHS = ICmp->getOperand(1);
1097 if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
1098 isa<LoadInst>(LHS) &&
1099 isa<Constant>(RHS) &&
1100 RHS->getType()->isPointerTy() &&
1101 cast<Constant>(RHS)->isNullValue()) {
1102 LoadInst* LI = cast<LoadInst>(LHS);
1103 if (isValidAssumeForContext(II, LI, DL, DT)) {
1104 MDNode *MD = MDNode::get(II->getContext(), None);
1105 LI->setMetadata(LLVMContext::MD_nonnull, MD);
1106 return EraseInstFromFunction(*II);
1109 // TODO: apply nonnull return attributes to calls and invokes
1110 // TODO: apply range metadata for range check patterns?
1112 // If there is a dominating assume with the same condition as this one,
1113 // then this one is redundant, and should be removed.
1114 APInt KnownZero(1, 0), KnownOne(1, 0);
1115 computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
1116 if (KnownOne.isAllOnesValue())
1117 return EraseInstFromFunction(*II);
1121 case Intrinsic::experimental_gc_relocate: {
1122 // Translate facts known about a pointer before relocating into
1123 // facts about the relocate value, while being careful to
1124 // preserve relocation semantics.
1125 GCRelocateOperands Operands(II);
1126 Value *DerivedPtr = Operands.derivedPtr();
1128 // Remove the relocation if unused, note that this check is required
1129 // to prevent the cases below from looping forever.
1130 if (II->use_empty())
1131 return EraseInstFromFunction(*II);
1133 // Undef is undef, even after relocation.
1134 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
1135 // most practical collectors, but there was discussion in the review thread
1136 // about whether it was legal for all possible collectors.
1137 if (isa<UndefValue>(DerivedPtr))
1138 return ReplaceInstUsesWith(*II, DerivedPtr);
1140 // The relocation of null will be null for most any collector.
1141 // TODO: provide a hook for this in GCStrategy. There might be some weird
1142 // collector this property does not hold for.
1143 if (isa<ConstantPointerNull>(DerivedPtr))
1144 return ReplaceInstUsesWith(*II, DerivedPtr);
1146 // isKnownNonNull -> nonnull attribute
1147 if (isKnownNonNull(DerivedPtr))
1148 II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
1150 // TODO: dereferenceable -> deref attribute
1152 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
1153 // Canonicalize on the type from the uses to the defs
1155 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
1159 return visitCallSite(II);
1162 // InvokeInst simplification
1164 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
1165 return visitCallSite(&II);
1168 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
1169 /// passed through the varargs area, we can eliminate the use of the cast.
1170 static bool isSafeToEliminateVarargsCast(const CallSite CS,
1171 const CastInst * const CI,
1172 const DataLayout * const DL,
1174 if (!CI->isLosslessCast())
1177 // If this is a GC intrinsic, avoid munging types. We need types for
1178 // statepoint reconstruction in SelectionDAG.
1179 // TODO: This is probably something which should be expanded to all
1180 // intrinsics since the entire point of intrinsics is that
1181 // they are understandable by the optimizer.
1182 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
1185 // The size of ByVal or InAlloca arguments is derived from the type, so we
1186 // can't change to a type with a different size. If the size were
1187 // passed explicitly we could avoid this check.
1188 if (!CS.isByValOrInAllocaArgument(ix))
1192 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
1193 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
1194 if (!SrcTy->isSized() || !DstTy->isSized())
1196 if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))
1201 // Try to fold some different type of calls here.
1202 // Currently we're only working with the checking functions, memcpy_chk,
1203 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
1204 // strcat_chk and strncat_chk.
1205 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {
1206 if (!CI->getCalledFunction()) return nullptr;
1208 if (Value *With = Simplifier->optimizeCall(CI)) {
1210 return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
1216 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
1217 // Strip off at most one level of pointer casts, looking for an alloca. This
1218 // is good enough in practice and simpler than handling any number of casts.
1219 Value *Underlying = TrampMem->stripPointerCasts();
1220 if (Underlying != TrampMem &&
1221 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
1223 if (!isa<AllocaInst>(Underlying))
1226 IntrinsicInst *InitTrampoline = nullptr;
1227 for (User *U : TrampMem->users()) {
1228 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1231 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
1233 // More than one init_trampoline writes to this value. Give up.
1235 InitTrampoline = II;
1238 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
1239 // Allow any number of calls to adjust.trampoline.
1244 // No call to init.trampoline found.
1245 if (!InitTrampoline)
1248 // Check that the alloca is being used in the expected way.
1249 if (InitTrampoline->getOperand(0) != TrampMem)
1252 return InitTrampoline;
1255 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
1257 // Visit all the previous instructions in the basic block, and try to find a
1258 // init.trampoline which has a direct path to the adjust.trampoline.
1259 for (BasicBlock::iterator I = AdjustTramp,
1260 E = AdjustTramp->getParent()->begin(); I != E; ) {
1261 Instruction *Inst = --I;
1262 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1263 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
1264 II->getOperand(0) == TrampMem)
1266 if (Inst->mayWriteToMemory())
1272 // Given a call to llvm.adjust.trampoline, find and return the corresponding
1273 // call to llvm.init.trampoline if the call to the trampoline can be optimized
1274 // to a direct call to a function. Otherwise return NULL.
1276 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
1277 Callee = Callee->stripPointerCasts();
1278 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
1280 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
1283 Value *TrampMem = AdjustTramp->getOperand(0);
1285 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
1287 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
1292 // visitCallSite - Improvements for call and invoke instructions.
1294 Instruction *InstCombiner::visitCallSite(CallSite CS) {
1295 if (isAllocLikeFn(CS.getInstruction(), TLI))
1296 return visitAllocSite(*CS.getInstruction());
1298 bool Changed = false;
1300 // If the callee is a pointer to a function, attempt to move any casts to the
1301 // arguments of the call/invoke.
1302 Value *Callee = CS.getCalledValue();
1303 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
1306 if (Function *CalleeF = dyn_cast<Function>(Callee))
1307 // If the call and callee calling conventions don't match, this call must
1308 // be unreachable, as the call is undefined.
1309 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
1310 // Only do this for calls to a function with a body. A prototype may
1311 // not actually end up matching the implementation's calling conv for a
1312 // variety of reasons (e.g. it may be written in assembly).
1313 !CalleeF->isDeclaration()) {
1314 Instruction *OldCall = CS.getInstruction();
1315 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1316 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1318 // If OldCall does not return void then replaceAllUsesWith undef.
1319 // This allows ValueHandlers and custom metadata to adjust itself.
1320 if (!OldCall->getType()->isVoidTy())
1321 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
1322 if (isa<CallInst>(OldCall))
1323 return EraseInstFromFunction(*OldCall);
1325 // We cannot remove an invoke, because it would change the CFG, just
1326 // change the callee to a null pointer.
1327 cast<InvokeInst>(OldCall)->setCalledFunction(
1328 Constant::getNullValue(CalleeF->getType()));
1332 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1333 // If CS does not return void then replaceAllUsesWith undef.
1334 // This allows ValueHandlers and custom metadata to adjust itself.
1335 if (!CS.getInstruction()->getType()->isVoidTy())
1336 ReplaceInstUsesWith(*CS.getInstruction(),
1337 UndefValue::get(CS.getInstruction()->getType()));
1339 if (isa<InvokeInst>(CS.getInstruction())) {
1340 // Can't remove an invoke because we cannot change the CFG.
1344 // This instruction is not reachable, just remove it. We insert a store to
1345 // undef so that we know that this code is not reachable, despite the fact
1346 // that we can't modify the CFG here.
1347 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1348 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1349 CS.getInstruction());
1351 return EraseInstFromFunction(*CS.getInstruction());
1354 if (IntrinsicInst *II = FindInitTrampoline(Callee))
1355 return transformCallThroughTrampoline(CS, II);
1357 PointerType *PTy = cast<PointerType>(Callee->getType());
1358 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1359 if (FTy->isVarArg()) {
1360 int ix = FTy->getNumParams();
1361 // See if we can optimize any arguments passed through the varargs area of
1363 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
1364 E = CS.arg_end(); I != E; ++I, ++ix) {
1365 CastInst *CI = dyn_cast<CastInst>(*I);
1366 if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {
1367 *I = CI->getOperand(0);
1373 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
1374 // Inline asm calls cannot throw - mark them 'nounwind'.
1375 CS.setDoesNotThrow();
1379 // Try to optimize the call if possible, we require DataLayout for most of
1380 // this. None of these calls are seen as possibly dead so go ahead and
1381 // delete the instruction now.
1382 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1383 Instruction *I = tryOptimizeCall(CI, DL);
1384 // If we changed something return the result, etc. Otherwise let
1385 // the fallthrough check.
1386 if (I) return EraseInstFromFunction(*I);
1389 return Changed ? CS.getInstruction() : nullptr;
1392 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
1393 // attempt to move the cast to the arguments of the call/invoke.
1395 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
1397 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1400 Instruction *Caller = CS.getInstruction();
1401 const AttributeSet &CallerPAL = CS.getAttributes();
1403 // Okay, this is a cast from a function to a different type. Unless doing so
1404 // would cause a type conversion of one of our arguments, change this call to
1405 // be a direct call with arguments casted to the appropriate types.
1407 FunctionType *FT = Callee->getFunctionType();
1408 Type *OldRetTy = Caller->getType();
1409 Type *NewRetTy = FT->getReturnType();
1411 // Check to see if we are changing the return type...
1412 if (OldRetTy != NewRetTy) {
1414 if (NewRetTy->isStructTy())
1415 return false; // TODO: Handle multiple return values.
1417 if (!CastInst::isBitCastable(NewRetTy, OldRetTy)) {
1418 if (Callee->isDeclaration())
1419 return false; // Cannot transform this return value.
1421 if (!Caller->use_empty() &&
1422 // void -> non-void is handled specially
1423 !NewRetTy->isVoidTy())
1424 return false; // Cannot transform this return value.
1427 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1428 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1430 hasAttributes(AttributeFuncs::
1431 typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
1432 AttributeSet::ReturnIndex))
1433 return false; // Attribute not compatible with transformed value.
1436 // If the callsite is an invoke instruction, and the return value is used by
1437 // a PHI node in a successor, we cannot change the return type of the call
1438 // because there is no place to put the cast instruction (without breaking
1439 // the critical edge). Bail out in this case.
1440 if (!Caller->use_empty())
1441 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1442 for (User *U : II->users())
1443 if (PHINode *PN = dyn_cast<PHINode>(U))
1444 if (PN->getParent() == II->getNormalDest() ||
1445 PN->getParent() == II->getUnwindDest())
1449 unsigned NumActualArgs = CS.arg_size();
1450 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1452 CallSite::arg_iterator AI = CS.arg_begin();
1453 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1454 Type *ParamTy = FT->getParamType(i);
1455 Type *ActTy = (*AI)->getType();
1457 if (!CastInst::isBitCastable(ActTy, ParamTy))
1458 return false; // Cannot transform this parameter value.
1460 if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
1461 hasAttributes(AttributeFuncs::
1462 typeIncompatible(ParamTy, i + 1), i + 1))
1463 return false; // Attribute not compatible with transformed value.
1465 if (CS.isInAllocaArgument(i))
1466 return false; // Cannot transform to and from inalloca.
1468 // If the parameter is passed as a byval argument, then we have to have a
1469 // sized type and the sized type has to have the same size as the old type.
1470 if (ParamTy != ActTy &&
1471 CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
1472 Attribute::ByVal)) {
1473 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1474 if (!ParamPTy || !ParamPTy->getElementType()->isSized() || !DL)
1477 Type *CurElTy = ActTy->getPointerElementType();
1478 if (DL->getTypeAllocSize(CurElTy) !=
1479 DL->getTypeAllocSize(ParamPTy->getElementType()))
1484 if (Callee->isDeclaration()) {
1485 // Do not delete arguments unless we have a function body.
1486 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1489 // If the callee is just a declaration, don't change the varargsness of the
1490 // call. We don't want to introduce a varargs call where one doesn't
1492 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1493 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1496 // If both the callee and the cast type are varargs, we still have to make
1497 // sure the number of fixed parameters are the same or we have the same
1498 // ABI issues as if we introduce a varargs call.
1499 if (FT->isVarArg() &&
1500 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1501 FT->getNumParams() !=
1502 cast<FunctionType>(APTy->getElementType())->getNumParams())
1506 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1507 !CallerPAL.isEmpty())
1508 // In this case we have more arguments than the new function type, but we
1509 // won't be dropping them. Check that these extra arguments have attributes
1510 // that are compatible with being a vararg call argument.
1511 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1512 unsigned Index = CallerPAL.getSlotIndex(i - 1);
1513 if (Index <= FT->getNumParams())
1516 // Check if it has an attribute that's incompatible with varargs.
1517 AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
1518 if (PAttrs.hasAttribute(Index, Attribute::StructRet))
1523 // Okay, we decided that this is a safe thing to do: go ahead and start
1524 // inserting cast instructions as necessary.
1525 std::vector<Value*> Args;
1526 Args.reserve(NumActualArgs);
1527 SmallVector<AttributeSet, 8> attrVec;
1528 attrVec.reserve(NumCommonArgs);
1530 // Get any return attributes.
1531 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1533 // If the return value is not being used, the type may not be compatible
1534 // with the existing attributes. Wipe out any problematic attributes.
1536 removeAttributes(AttributeFuncs::
1537 typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
1538 AttributeSet::ReturnIndex);
1540 // Add the new return attributes.
1541 if (RAttrs.hasAttributes())
1542 attrVec.push_back(AttributeSet::get(Caller->getContext(),
1543 AttributeSet::ReturnIndex, RAttrs));
1545 AI = CS.arg_begin();
1546 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1547 Type *ParamTy = FT->getParamType(i);
1549 if ((*AI)->getType() == ParamTy) {
1550 Args.push_back(*AI);
1552 Args.push_back(Builder->CreateBitCast(*AI, ParamTy));
1555 // Add any parameter attributes.
1556 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1557 if (PAttrs.hasAttributes())
1558 attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
1562 // If the function takes more arguments than the call was taking, add them
1564 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1565 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1567 // If we are removing arguments to the function, emit an obnoxious warning.
1568 if (FT->getNumParams() < NumActualArgs) {
1569 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
1570 if (FT->isVarArg()) {
1571 // Add all of the arguments in their promoted form to the arg list.
1572 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1573 Type *PTy = getPromotedType((*AI)->getType());
1574 if (PTy != (*AI)->getType()) {
1575 // Must promote to pass through va_arg area!
1576 Instruction::CastOps opcode =
1577 CastInst::getCastOpcode(*AI, false, PTy, false);
1578 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
1580 Args.push_back(*AI);
1583 // Add any parameter attributes.
1584 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1585 if (PAttrs.hasAttributes())
1586 attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
1592 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
1593 if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
1594 attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
1596 if (NewRetTy->isVoidTy())
1597 Caller->setName(""); // Void type should not have a name.
1599 const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
1603 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1604 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
1605 II->getUnwindDest(), Args);
1607 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1608 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1610 CallInst *CI = cast<CallInst>(Caller);
1611 NC = Builder->CreateCall(Callee, Args);
1613 if (CI->isTailCall())
1614 cast<CallInst>(NC)->setTailCall();
1615 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1616 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1619 // Insert a cast of the return type as necessary.
1621 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1622 if (!NV->getType()->isVoidTy()) {
1623 NV = NC = CastInst::Create(CastInst::BitCast, NC, OldRetTy);
1624 NC->setDebugLoc(Caller->getDebugLoc());
1626 // If this is an invoke instruction, we should insert it after the first
1627 // non-phi, instruction in the normal successor block.
1628 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1629 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
1630 InsertNewInstBefore(NC, *I);
1632 // Otherwise, it's a call, just insert cast right after the call.
1633 InsertNewInstBefore(NC, *Caller);
1635 Worklist.AddUsersToWorkList(*Caller);
1637 NV = UndefValue::get(Caller->getType());
1641 if (!Caller->use_empty())
1642 ReplaceInstUsesWith(*Caller, NV);
1643 else if (Caller->hasValueHandle()) {
1644 if (OldRetTy == NV->getType())
1645 ValueHandleBase::ValueIsRAUWd(Caller, NV);
1647 // We cannot call ValueIsRAUWd with a different type, and the
1648 // actual tracked value will disappear.
1649 ValueHandleBase::ValueIsDeleted(Caller);
1652 EraseInstFromFunction(*Caller);
1656 // transformCallThroughTrampoline - Turn a call to a function created by
1657 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
1658 // underlying function.
1661 InstCombiner::transformCallThroughTrampoline(CallSite CS,
1662 IntrinsicInst *Tramp) {
1663 Value *Callee = CS.getCalledValue();
1664 PointerType *PTy = cast<PointerType>(Callee->getType());
1665 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1666 const AttributeSet &Attrs = CS.getAttributes();
1668 // If the call already has the 'nest' attribute somewhere then give up -
1669 // otherwise 'nest' would occur twice after splicing in the chain.
1670 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1674 "transformCallThroughTrampoline called with incorrect CallSite.");
1676 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1677 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1678 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1680 const AttributeSet &NestAttrs = NestF->getAttributes();
1681 if (!NestAttrs.isEmpty()) {
1682 unsigned NestIdx = 1;
1683 Type *NestTy = nullptr;
1684 AttributeSet NestAttr;
1686 // Look for a parameter marked with the 'nest' attribute.
1687 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1688 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1689 if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
1690 // Record the parameter type and any other attributes.
1692 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1697 Instruction *Caller = CS.getInstruction();
1698 std::vector<Value*> NewArgs;
1699 NewArgs.reserve(CS.arg_size() + 1);
1701 SmallVector<AttributeSet, 8> NewAttrs;
1702 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1704 // Insert the nest argument into the call argument list, which may
1705 // mean appending it. Likewise for attributes.
1707 // Add any result attributes.
1708 if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
1709 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1710 Attrs.getRetAttributes()));
1714 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1716 if (Idx == NestIdx) {
1717 // Add the chain argument and attributes.
1718 Value *NestVal = Tramp->getArgOperand(2);
1719 if (NestVal->getType() != NestTy)
1720 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
1721 NewArgs.push_back(NestVal);
1722 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1729 // Add the original argument and attributes.
1730 NewArgs.push_back(*I);
1731 AttributeSet Attr = Attrs.getParamAttributes(Idx);
1732 if (Attr.hasAttributes(Idx)) {
1733 AttrBuilder B(Attr, Idx);
1734 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1735 Idx + (Idx >= NestIdx), B));
1742 // Add any function attributes.
1743 if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
1744 NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
1745 Attrs.getFnAttributes()));
1747 // The trampoline may have been bitcast to a bogus type (FTy).
1748 // Handle this by synthesizing a new function type, equal to FTy
1749 // with the chain parameter inserted.
1751 std::vector<Type*> NewTypes;
1752 NewTypes.reserve(FTy->getNumParams()+1);
1754 // Insert the chain's type into the list of parameter types, which may
1755 // mean appending it.
1758 FunctionType::param_iterator I = FTy->param_begin(),
1759 E = FTy->param_end();
1763 // Add the chain's type.
1764 NewTypes.push_back(NestTy);
1769 // Add the original type.
1770 NewTypes.push_back(*I);
1776 // Replace the trampoline call with a direct call. Let the generic
1777 // code sort out any function type mismatches.
1778 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1780 Constant *NewCallee =
1781 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1782 NestF : ConstantExpr::getBitCast(NestF,
1783 PointerType::getUnqual(NewFTy));
1784 const AttributeSet &NewPAL =
1785 AttributeSet::get(FTy->getContext(), NewAttrs);
1787 Instruction *NewCaller;
1788 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1789 NewCaller = InvokeInst::Create(NewCallee,
1790 II->getNormalDest(), II->getUnwindDest(),
1792 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1793 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1795 NewCaller = CallInst::Create(NewCallee, NewArgs);
1796 if (cast<CallInst>(Caller)->isTailCall())
1797 cast<CallInst>(NewCaller)->setTailCall();
1798 cast<CallInst>(NewCaller)->
1799 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1800 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1807 // Replace the trampoline call with a direct call. Since there is no 'nest'
1808 // parameter, there is no need to adjust the argument list. Let the generic
1809 // code sort out any function type mismatches.
1810 Constant *NewCallee =
1811 NestF->getType() == PTy ? NestF :
1812 ConstantExpr::getBitCast(NestF, PTy);
1813 CS.setCalledFunction(NewCallee);
1814 return CS.getInstruction();