1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Support/CallSite.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Transforms/Utils/BuildLibCalls.h"
22 /// getPromotedType - Return the specified type promoted as it would be to pass
23 /// though a va_arg area.
24 static const Type *getPromotedType(const Type *Ty) {
25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26 if (ITy->getBitWidth() < 32)
27 return Type::getInt32Ty(Ty->getContext());
32 /// EnforceKnownAlignment - If the specified pointer points to an object that
33 /// we control, modify the object's alignment to PrefAlign. This isn't
34 /// often possible though. If alignment is important, a more reliable approach
35 /// is to simply align all global variables and allocation instructions to
36 /// their preferred alignment from the beginning.
38 static unsigned EnforceKnownAlignment(Value *V,
39 unsigned Align, unsigned PrefAlign) {
41 User *U = dyn_cast<User>(V);
44 switch (Operator::getOpcode(U)) {
46 case Instruction::BitCast:
47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
48 case Instruction::GetElementPtr: {
49 // If all indexes are zero, it is just the alignment of the base pointer.
50 bool AllZeroOperands = true;
51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
52 if (!isa<Constant>(*i) ||
53 !cast<Constant>(*i)->isNullValue()) {
54 AllZeroOperands = false;
58 if (AllZeroOperands) {
59 // Treat this like a bitcast.
60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
66 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
67 // If there is a large requested alignment and we can, bump up the alignment
69 if (!GV->isDeclaration()) {
70 if (GV->getAlignment() >= PrefAlign)
71 Align = GV->getAlignment();
73 GV->setAlignment(PrefAlign);
77 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
78 // If there is a requested alignment and if this is an alloca, round up.
79 if (AI->getAlignment() >= PrefAlign)
80 Align = AI->getAlignment();
82 AI->setAlignment(PrefAlign);
90 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
91 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
92 /// and it is more than the alignment of the ultimate object, see if we can
93 /// increase the alignment of the ultimate object, making this check succeed.
94 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
96 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
97 sizeof(PrefAlign) * CHAR_BIT;
98 APInt Mask = APInt::getAllOnesValue(BitWidth);
99 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
100 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
101 unsigned TrailZ = KnownZero.countTrailingOnes();
102 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
104 if (PrefAlign > Align)
105 Align = EnforceKnownAlignment(V, Align, PrefAlign);
107 // We don't need to make any adjustment.
111 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
112 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
113 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
114 unsigned MinAlign = std::min(DstAlign, SrcAlign);
115 unsigned CopyAlign = MI->getAlignment();
117 if (CopyAlign < MinAlign) {
118 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
123 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
125 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
126 if (MemOpLength == 0) return 0;
128 // Source and destination pointer types are always "i8*" for intrinsic. See
129 // if the size is something we can handle with a single primitive load/store.
130 // A single load+store correctly handles overlapping memory in the memmove
132 unsigned Size = MemOpLength->getZExtValue();
133 if (Size == 0) return MI; // Delete this mem transfer.
135 if (Size > 8 || (Size&(Size-1)))
136 return 0; // If not 1/2/4/8 bytes, exit.
138 // Use an integer load+store unless we can find something better.
140 PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3));
142 // Memcpy forces the use of i8* for the source and destination. That means
143 // that if you're using memcpy to move one double around, you'll get a cast
144 // from double* to i8*. We'd much rather use a double load+store rather than
145 // an i64 load+store, here because this improves the odds that the source or
146 // dest address will be promotable. See if we can find a better type than the
148 Value *StrippedDest = MI->getOperand(1)->stripPointerCasts();
149 if (StrippedDest != MI->getOperand(1)) {
150 const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
152 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
153 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
154 // down through these levels if so.
155 while (!SrcETy->isSingleValueType()) {
156 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
157 if (STy->getNumElements() == 1)
158 SrcETy = STy->getElementType(0);
161 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
162 if (ATy->getNumElements() == 1)
163 SrcETy = ATy->getElementType();
170 if (SrcETy->isSingleValueType())
171 NewPtrTy = PointerType::getUnqual(SrcETy);
176 // If the memcpy/memmove provides better alignment info than we can
178 SrcAlign = std::max(SrcAlign, CopyAlign);
179 DstAlign = std::max(DstAlign, CopyAlign);
181 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
182 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
183 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
184 InsertNewInstBefore(L, *MI);
185 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
187 // Set the size of the copy to 0, it will be deleted on the next iteration.
188 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
192 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
193 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
194 if (MI->getAlignment() < Alignment) {
195 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
200 // Extract the length and alignment and fill if they are constant.
201 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
202 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
203 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
205 uint64_t Len = LenC->getZExtValue();
206 Alignment = MI->getAlignment();
208 // If the length is zero, this is a no-op
209 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
211 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
212 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
213 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
215 Value *Dest = MI->getDest();
216 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
218 // Alignment 0 is identity for alignment 1 for memset, but not store.
219 if (Alignment == 0) Alignment = 1;
221 // Extract the fill value and store.
222 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
223 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
224 Dest, false, Alignment), *MI);
226 // Set the size of the copy to 0, it will be deleted on the next iteration.
227 MI->setLength(Constant::getNullValue(LenC->getType()));
234 /// visitCallInst - CallInst simplification. This mostly only handles folding
235 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
236 /// the heavy lifting.
238 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
240 return visitFree(CI);
242 // If the caller function is nounwind, mark the call as nounwind, even if the
244 if (CI.getParent()->getParent()->doesNotThrow() &&
245 !CI.doesNotThrow()) {
246 CI.setDoesNotThrow();
250 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
251 if (!II) return visitCallSite(&CI);
253 // Intrinsics cannot occur in an invoke, so handle them here instead of in
255 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
256 bool Changed = false;
258 // memmove/cpy/set of zero bytes is a noop.
259 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
260 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
262 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
263 if (CI->getZExtValue() == 1) {
264 // Replace the instruction with just byte operations. We would
265 // transform other cases to loads/stores, but we don't know if
266 // alignment is sufficient.
270 // If we have a memmove and the source operation is a constant global,
271 // then the source and dest pointers can't alias, so we can change this
272 // into a call to memcpy.
273 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
274 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
275 if (GVSrc->isConstant()) {
276 Module *M = CI.getParent()->getParent()->getParent();
277 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
279 Tys[0] = CI.getOperand(3)->getType();
281 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
286 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
287 // memmove(x,x,size) -> noop.
288 if (MTI->getSource() == MTI->getDest())
289 return EraseInstFromFunction(CI);
292 // If we can determine a pointer alignment that is bigger than currently
293 // set, update the alignment.
294 if (isa<MemTransferInst>(MI)) {
295 if (Instruction *I = SimplifyMemTransfer(MI))
297 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
298 if (Instruction *I = SimplifyMemSet(MSI))
302 if (Changed) return II;
305 switch (II->getIntrinsicID()) {
307 case Intrinsic::objectsize: {
308 // We need target data for just about everything so depend on it.
311 const Type *ReturnTy = CI.getType();
312 bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1);
314 // Get to the real allocated thing and offset as fast as possible.
315 Value *Op1 = II->getOperand(1)->stripPointerCasts();
317 // If we've stripped down to a single global variable that we
318 // can know the size of then just return that.
319 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
320 if (GV->hasDefinitiveInitializer()) {
321 Constant *C = GV->getInitializer();
322 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
323 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
325 // Can't determine size of the GV.
326 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
327 return ReplaceInstUsesWith(CI, RetVal);
329 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
331 if (AI->getAllocatedType()->isSized()) {
332 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
333 if (AI->isArrayAllocation()) {
334 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
336 AllocaSize *= C->getZExtValue();
338 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
340 } else if (CallInst *MI = extractMallocCall(Op1)) {
341 const Type* MallocType = getMallocAllocatedType(MI);
343 if (MallocType && MallocType->isSized()) {
344 if (Value *NElems = getMallocArraySize(MI, TD, true)) {
345 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
346 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy,
347 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType))));
350 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
351 // Only handle constant GEPs here.
352 if (CE->getOpcode() != Instruction::GetElementPtr) break;
353 GEPOperator *GEP = cast<GEPOperator>(CE);
355 // Make sure we're not a constant offset from an external
357 Value *Operand = GEP->getPointerOperand();
358 Operand = Operand->stripPointerCasts();
359 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
360 if (!GV->hasDefinitiveInitializer()) break;
362 // Get what we're pointing to and its size.
363 const PointerType *BaseType =
364 cast<PointerType>(Operand->getType());
365 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
367 // Get the current byte offset into the thing. Use the original
368 // operand in case we're looking through a bitcast.
369 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
370 const PointerType *OffsetType =
371 cast<PointerType>(GEP->getPointerOperand()->getType());
372 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
375 // Out of bound reference? Negative index normalized to large
376 // index? Just return "I don't know".
377 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
378 return ReplaceInstUsesWith(CI, RetVal);
381 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
382 return ReplaceInstUsesWith(CI, RetVal);
386 // Do not return "I don't know" here. Later optimization passes could
387 // make it possible to evaluate objectsize to a constant.
390 case Intrinsic::bswap:
391 // bswap(bswap(x)) -> x
392 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
393 if (Operand->getIntrinsicID() == Intrinsic::bswap)
394 return ReplaceInstUsesWith(CI, Operand->getOperand(1));
396 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
397 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
398 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
399 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
400 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
401 TI->getType()->getPrimitiveSizeInBits();
402 Value *CV = ConstantInt::get(Operand->getType(), C);
403 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
404 return new TruncInst(V, TI->getType());
409 case Intrinsic::powi:
410 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
413 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
416 return ReplaceInstUsesWith(CI, II->getOperand(1));
417 // powi(x, -1) -> 1/x
418 if (Power->isAllOnesValue())
419 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
423 case Intrinsic::cttz: {
424 // If all bits below the first known one are known zero,
425 // this value is constant.
426 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
427 uint32_t BitWidth = IT->getBitWidth();
428 APInt KnownZero(BitWidth, 0);
429 APInt KnownOne(BitWidth, 0);
430 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
431 KnownZero, KnownOne);
432 unsigned TrailingZeros = KnownOne.countTrailingZeros();
433 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
434 if ((Mask & KnownZero) == Mask)
435 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
436 APInt(BitWidth, TrailingZeros)));
440 case Intrinsic::ctlz: {
441 // If all bits above the first known one are known zero,
442 // this value is constant.
443 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
444 uint32_t BitWidth = IT->getBitWidth();
445 APInt KnownZero(BitWidth, 0);
446 APInt KnownOne(BitWidth, 0);
447 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
448 KnownZero, KnownOne);
449 unsigned LeadingZeros = KnownOne.countLeadingZeros();
450 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
451 if ((Mask & KnownZero) == Mask)
452 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
453 APInt(BitWidth, LeadingZeros)));
457 case Intrinsic::uadd_with_overflow: {
458 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
459 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
460 uint32_t BitWidth = IT->getBitWidth();
461 APInt Mask = APInt::getSignBit(BitWidth);
462 APInt LHSKnownZero(BitWidth, 0);
463 APInt LHSKnownOne(BitWidth, 0);
464 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
465 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
466 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
468 if (LHSKnownNegative || LHSKnownPositive) {
469 APInt RHSKnownZero(BitWidth, 0);
470 APInt RHSKnownOne(BitWidth, 0);
471 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
472 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
473 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
474 if (LHSKnownNegative && RHSKnownNegative) {
475 // The sign bit is set in both cases: this MUST overflow.
476 // Create a simple add instruction, and insert it into the struct.
477 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
480 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
482 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
483 return InsertValueInst::Create(Struct, Add, 0);
486 if (LHSKnownPositive && RHSKnownPositive) {
487 // The sign bit is clear in both cases: this CANNOT overflow.
488 // Create a simple add instruction, and insert it into the struct.
489 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
492 UndefValue::get(LHS->getType()),
493 ConstantInt::getFalse(II->getContext())
495 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
496 return InsertValueInst::Create(Struct, Add, 0);
500 // FALL THROUGH uadd into sadd
501 case Intrinsic::sadd_with_overflow:
502 // Canonicalize constants into the RHS.
503 if (isa<Constant>(II->getOperand(1)) &&
504 !isa<Constant>(II->getOperand(2))) {
505 Value *LHS = II->getOperand(1);
506 II->setOperand(1, II->getOperand(2));
507 II->setOperand(2, LHS);
511 // X + undef -> undef
512 if (isa<UndefValue>(II->getOperand(2)))
513 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
515 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
516 // X + 0 -> {X, false}
519 UndefValue::get(II->getOperand(0)->getType()),
520 ConstantInt::getFalse(II->getContext())
522 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
523 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
527 case Intrinsic::usub_with_overflow:
528 case Intrinsic::ssub_with_overflow:
529 // undef - X -> undef
530 // X - undef -> undef
531 if (isa<UndefValue>(II->getOperand(1)) ||
532 isa<UndefValue>(II->getOperand(2)))
533 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
535 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
536 // X - 0 -> {X, false}
539 UndefValue::get(II->getOperand(1)->getType()),
540 ConstantInt::getFalse(II->getContext())
542 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
543 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
547 case Intrinsic::umul_with_overflow:
548 case Intrinsic::smul_with_overflow:
549 // Canonicalize constants into the RHS.
550 if (isa<Constant>(II->getOperand(1)) &&
551 !isa<Constant>(II->getOperand(2))) {
552 Value *LHS = II->getOperand(1);
553 II->setOperand(1, II->getOperand(2));
554 II->setOperand(2, LHS);
558 // X * undef -> undef
559 if (isa<UndefValue>(II->getOperand(2)))
560 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
562 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
565 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
567 // X * 1 -> {X, false}
568 if (RHSI->equalsInt(1)) {
570 UndefValue::get(II->getOperand(1)->getType()),
571 ConstantInt::getFalse(II->getContext())
573 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
574 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
578 case Intrinsic::ppc_altivec_lvx:
579 case Intrinsic::ppc_altivec_lvxl:
580 case Intrinsic::x86_sse_loadu_ps:
581 case Intrinsic::x86_sse2_loadu_pd:
582 case Intrinsic::x86_sse2_loadu_dq:
583 // Turn PPC lvx -> load if the pointer is known aligned.
584 // Turn X86 loadups -> load if the pointer is known aligned.
585 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
586 Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
587 PointerType::getUnqual(II->getType()));
588 return new LoadInst(Ptr);
591 case Intrinsic::ppc_altivec_stvx:
592 case Intrinsic::ppc_altivec_stvxl:
593 // Turn stvx -> store if the pointer is known aligned.
594 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
595 const Type *OpPtrTy =
596 PointerType::getUnqual(II->getOperand(1)->getType());
597 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
598 return new StoreInst(II->getOperand(1), Ptr);
601 case Intrinsic::x86_sse_storeu_ps:
602 case Intrinsic::x86_sse2_storeu_pd:
603 case Intrinsic::x86_sse2_storeu_dq:
604 // Turn X86 storeu -> store if the pointer is known aligned.
605 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
606 const Type *OpPtrTy =
607 PointerType::getUnqual(II->getOperand(2)->getType());
608 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
609 return new StoreInst(II->getOperand(2), Ptr);
613 case Intrinsic::x86_sse_cvttss2si: {
614 // These intrinsics only demands the 0th element of its input vector. If
615 // we can simplify the input based on that, do so now.
617 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
618 APInt DemandedElts(VWidth, 1);
619 APInt UndefElts(VWidth, 0);
620 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
622 II->setOperand(1, V);
628 case Intrinsic::ppc_altivec_vperm:
629 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
630 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
631 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
633 // Check that all of the elements are integer constants or undefs.
634 bool AllEltsOk = true;
635 for (unsigned i = 0; i != 16; ++i) {
636 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
637 !isa<UndefValue>(Mask->getOperand(i))) {
644 // Cast the input vectors to byte vectors.
645 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
646 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
647 Value *Result = UndefValue::get(Op0->getType());
649 // Only extract each element once.
650 Value *ExtractedElts[32];
651 memset(ExtractedElts, 0, sizeof(ExtractedElts));
653 for (unsigned i = 0; i != 16; ++i) {
654 if (isa<UndefValue>(Mask->getOperand(i)))
656 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
657 Idx &= 31; // Match the hardware behavior.
659 if (ExtractedElts[Idx] == 0) {
661 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
662 ConstantInt::get(Type::getInt32Ty(II->getContext()),
663 Idx&15, false), "tmp");
666 // Insert this value into the result vector.
667 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
668 ConstantInt::get(Type::getInt32Ty(II->getContext()),
671 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
676 case Intrinsic::stackrestore: {
677 // If the save is right next to the restore, remove the restore. This can
678 // happen when variable allocas are DCE'd.
679 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
680 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
681 BasicBlock::iterator BI = SS;
683 return EraseInstFromFunction(CI);
687 // Scan down this block to see if there is another stack restore in the
688 // same block without an intervening call/alloca.
689 BasicBlock::iterator BI = II;
690 TerminatorInst *TI = II->getParent()->getTerminator();
691 bool CannotRemove = false;
692 for (++BI; &*BI != TI; ++BI) {
693 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
697 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
698 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
699 // If there is a stackrestore below this one, remove this one.
700 if (II->getIntrinsicID() == Intrinsic::stackrestore)
701 return EraseInstFromFunction(CI);
702 // Otherwise, ignore the intrinsic.
704 // If we found a non-intrinsic call, we can't remove the stack
712 // If the stack restore is in a return/unwind block and if there are no
713 // allocas or calls between the restore and the return, nuke the restore.
714 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
715 return EraseInstFromFunction(CI);
720 return visitCallSite(II);
723 // InvokeInst simplification
725 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
726 return visitCallSite(&II);
729 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
730 /// passed through the varargs area, we can eliminate the use of the cast.
731 static bool isSafeToEliminateVarargsCast(const CallSite CS,
732 const CastInst * const CI,
733 const TargetData * const TD,
735 if (!CI->isLosslessCast())
738 // The size of ByVal arguments is derived from the type, so we
739 // can't change to a type with a different size. If the size were
740 // passed explicitly we could avoid this check.
741 if (!CS.paramHasAttr(ix, Attribute::ByVal))
745 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
746 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
747 if (!SrcTy->isSized() || !DstTy->isSized())
749 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
755 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
758 void replaceCall(Value *With) {
759 NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
761 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
762 if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(SizeCIOp))) {
763 if (SizeCI->isAllOnesValue())
766 return SizeCI->getZExtValue() >=
767 GetStringLength(CI->getOperand(SizeArgOp));
768 if (ConstantInt *Arg = dyn_cast<ConstantInt>(CI->getOperand(SizeArgOp)))
769 return SizeCI->getZExtValue() <= Arg->getZExtValue();
774 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
775 Instruction *NewInstruction;
777 } // end anonymous namespace
779 // Try to fold some different type of calls here.
780 // Currently we're only working with the checking functions, memcpy_chk,
781 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
782 // strcat_chk and strncat_chk.
783 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
784 if (CI->getCalledFunction() == 0) return 0;
786 InstCombineFortifiedLibCalls Simplifier(this);
787 Simplifier.fold(CI, TD);
788 return Simplifier.NewInstruction;
791 // visitCallSite - Improvements for call and invoke instructions.
793 Instruction *InstCombiner::visitCallSite(CallSite CS) {
794 bool Changed = false;
796 // If the callee is a constexpr cast of a function, attempt to move the cast
797 // to the arguments of the call/invoke.
798 if (transformConstExprCastCall(CS)) return 0;
800 Value *Callee = CS.getCalledValue();
802 if (Function *CalleeF = dyn_cast<Function>(Callee))
803 // If the call and callee calling conventions don't match, this call must
804 // be unreachable, as the call is undefined.
805 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
806 // Only do this for calls to a function with a body. A prototype may
807 // not actually end up matching the implementation's calling conv for a
808 // variety of reasons (e.g. it may be written in assembly).
809 !CalleeF->isDeclaration()) {
810 Instruction *OldCall = CS.getInstruction();
811 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
812 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
814 // If OldCall dues not return void then replaceAllUsesWith undef.
815 // This allows ValueHandlers and custom metadata to adjust itself.
816 if (!OldCall->getType()->isVoidTy())
817 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
818 if (isa<CallInst>(OldCall))
819 return EraseInstFromFunction(*OldCall);
821 // We cannot remove an invoke, because it would change the CFG, just
822 // change the callee to a null pointer.
823 cast<InvokeInst>(OldCall)->setOperand(0,
824 Constant::getNullValue(CalleeF->getType()));
828 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
829 // This instruction is not reachable, just remove it. We insert a store to
830 // undef so that we know that this code is not reachable, despite the fact
831 // that we can't modify the CFG here.
832 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
833 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
834 CS.getInstruction());
836 // If CS dues not return void then replaceAllUsesWith undef.
837 // This allows ValueHandlers and custom metadata to adjust itself.
838 if (!CS.getInstruction()->getType()->isVoidTy())
839 CS.getInstruction()->
840 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
842 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
843 // Don't break the CFG, insert a dummy cond branch.
844 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
845 ConstantInt::getTrue(Callee->getContext()), II);
847 return EraseInstFromFunction(*CS.getInstruction());
850 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
851 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
852 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
853 return transformCallThroughTrampoline(CS);
855 const PointerType *PTy = cast<PointerType>(Callee->getType());
856 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
857 if (FTy->isVarArg()) {
858 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
859 // See if we can optimize any arguments passed through the varargs area of
861 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
862 E = CS.arg_end(); I != E; ++I, ++ix) {
863 CastInst *CI = dyn_cast<CastInst>(*I);
864 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
865 *I = CI->getOperand(0);
871 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
872 // Inline asm calls cannot throw - mark them 'nounwind'.
873 CS.setDoesNotThrow();
877 // Try to optimize the call if possible, we require TargetData for most of
878 // this. None of these calls are seen as possibly dead so go ahead and
879 // delete the instruction now.
880 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
881 Instruction *I = tryOptimizeCall(CI, TD);
882 // If we changed something return the result, etc. Otherwise let
883 // the fallthrough check.
884 if (I) return EraseInstFromFunction(*I);
887 return Changed ? CS.getInstruction() : 0;
890 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
891 // attempt to move the cast to the arguments of the call/invoke.
893 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
894 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
895 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
896 if (CE->getOpcode() != Instruction::BitCast ||
897 !isa<Function>(CE->getOperand(0)))
899 Function *Callee = cast<Function>(CE->getOperand(0));
900 Instruction *Caller = CS.getInstruction();
901 const AttrListPtr &CallerPAL = CS.getAttributes();
903 // Okay, this is a cast from a function to a different type. Unless doing so
904 // would cause a type conversion of one of our arguments, change this call to
905 // be a direct call with arguments casted to the appropriate types.
907 const FunctionType *FT = Callee->getFunctionType();
908 const Type *OldRetTy = Caller->getType();
909 const Type *NewRetTy = FT->getReturnType();
911 if (NewRetTy->isStructTy())
912 return false; // TODO: Handle multiple return values.
914 // Check to see if we are changing the return type...
915 if (OldRetTy != NewRetTy) {
916 if (Callee->isDeclaration() &&
917 // Conversion is ok if changing from one pointer type to another or from
918 // a pointer to an integer of the same size.
919 !((OldRetTy->isPointerTy() || !TD ||
920 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
921 (NewRetTy->isPointerTy() || !TD ||
922 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
923 return false; // Cannot transform this return value.
925 if (!Caller->use_empty() &&
926 // void -> non-void is handled specially
927 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
928 return false; // Cannot transform this return value.
930 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
931 Attributes RAttrs = CallerPAL.getRetAttributes();
932 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
933 return false; // Attribute not compatible with transformed value.
936 // If the callsite is an invoke instruction, and the return value is used by
937 // a PHI node in a successor, we cannot change the return type of the call
938 // because there is no place to put the cast instruction (without breaking
939 // the critical edge). Bail out in this case.
940 if (!Caller->use_empty())
941 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
942 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
944 if (PHINode *PN = dyn_cast<PHINode>(*UI))
945 if (PN->getParent() == II->getNormalDest() ||
946 PN->getParent() == II->getUnwindDest())
950 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
951 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
953 CallSite::arg_iterator AI = CS.arg_begin();
954 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
955 const Type *ParamTy = FT->getParamType(i);
956 const Type *ActTy = (*AI)->getType();
958 if (!CastInst::isCastable(ActTy, ParamTy))
959 return false; // Cannot transform this parameter value.
961 if (CallerPAL.getParamAttributes(i + 1)
962 & Attribute::typeIncompatible(ParamTy))
963 return false; // Attribute not compatible with transformed value.
965 // Converting from one pointer type to another or between a pointer and an
966 // integer of the same size is safe even if we do not have a body.
967 bool isConvertible = ActTy == ParamTy ||
968 (TD && ((ParamTy->isPointerTy() ||
969 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
970 (ActTy->isPointerTy() ||
971 ActTy == TD->getIntPtrType(Caller->getContext()))));
972 if (Callee->isDeclaration() && !isConvertible) return false;
975 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
976 Callee->isDeclaration())
977 return false; // Do not delete arguments unless we have a function body.
979 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
980 !CallerPAL.isEmpty())
981 // In this case we have more arguments than the new function type, but we
982 // won't be dropping them. Check that these extra arguments have attributes
983 // that are compatible with being a vararg call argument.
984 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
985 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
987 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
988 if (PAttrs & Attribute::VarArgsIncompatible)
992 // Okay, we decided that this is a safe thing to do: go ahead and start
993 // inserting cast instructions as necessary...
994 std::vector<Value*> Args;
995 Args.reserve(NumActualArgs);
996 SmallVector<AttributeWithIndex, 8> attrVec;
997 attrVec.reserve(NumCommonArgs);
999 // Get any return attributes.
1000 Attributes RAttrs = CallerPAL.getRetAttributes();
1002 // If the return value is not being used, the type may not be compatible
1003 // with the existing attributes. Wipe out any problematic attributes.
1004 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1006 // Add the new return attributes.
1008 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1010 AI = CS.arg_begin();
1011 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1012 const Type *ParamTy = FT->getParamType(i);
1013 if ((*AI)->getType() == ParamTy) {
1014 Args.push_back(*AI);
1016 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1017 false, ParamTy, false);
1018 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
1021 // Add any parameter attributes.
1022 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1023 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1026 // If the function takes more arguments than the call was taking, add them
1028 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1029 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1031 // If we are removing arguments to the function, emit an obnoxious warning.
1032 if (FT->getNumParams() < NumActualArgs) {
1033 if (!FT->isVarArg()) {
1034 errs() << "WARNING: While resolving call to function '"
1035 << Callee->getName() << "' arguments were dropped!\n";
1037 // Add all of the arguments in their promoted form to the arg list.
1038 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1039 const Type *PTy = getPromotedType((*AI)->getType());
1040 if (PTy != (*AI)->getType()) {
1041 // Must promote to pass through va_arg area!
1042 Instruction::CastOps opcode =
1043 CastInst::getCastOpcode(*AI, false, PTy, false);
1044 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
1046 Args.push_back(*AI);
1049 // Add any parameter attributes.
1050 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1051 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1056 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1057 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1059 if (NewRetTy->isVoidTy())
1060 Caller->setName(""); // Void type should not have a name.
1062 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1066 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1067 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
1068 Args.begin(), Args.end(),
1069 Caller->getName(), Caller);
1070 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1071 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1073 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
1074 Caller->getName(), Caller);
1075 CallInst *CI = cast<CallInst>(Caller);
1076 if (CI->isTailCall())
1077 cast<CallInst>(NC)->setTailCall();
1078 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1079 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1082 // Insert a cast of the return type as necessary.
1084 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1085 if (!NV->getType()->isVoidTy()) {
1086 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
1088 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
1090 // If this is an invoke instruction, we should insert it after the first
1091 // non-phi, instruction in the normal successor block.
1092 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1093 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
1094 InsertNewInstBefore(NC, *I);
1096 // Otherwise, it's a call, just insert cast right after the call instr
1097 InsertNewInstBefore(NC, *Caller);
1099 Worklist.AddUsersToWorkList(*Caller);
1101 NV = UndefValue::get(Caller->getType());
1106 if (!Caller->use_empty())
1107 Caller->replaceAllUsesWith(NV);
1109 EraseInstFromFunction(*Caller);
1113 // transformCallThroughTrampoline - Turn a call to a function created by the
1114 // init_trampoline intrinsic into a direct call to the underlying function.
1116 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
1117 Value *Callee = CS.getCalledValue();
1118 const PointerType *PTy = cast<PointerType>(Callee->getType());
1119 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1120 const AttrListPtr &Attrs = CS.getAttributes();
1122 // If the call already has the 'nest' attribute somewhere then give up -
1123 // otherwise 'nest' would occur twice after splicing in the chain.
1124 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1127 IntrinsicInst *Tramp =
1128 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
1130 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
1131 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1132 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1134 const AttrListPtr &NestAttrs = NestF->getAttributes();
1135 if (!NestAttrs.isEmpty()) {
1136 unsigned NestIdx = 1;
1137 const Type *NestTy = 0;
1138 Attributes NestAttr = Attribute::None;
1140 // Look for a parameter marked with the 'nest' attribute.
1141 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1142 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1143 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1144 // Record the parameter type and any other attributes.
1146 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1151 Instruction *Caller = CS.getInstruction();
1152 std::vector<Value*> NewArgs;
1153 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1155 SmallVector<AttributeWithIndex, 8> NewAttrs;
1156 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1158 // Insert the nest argument into the call argument list, which may
1159 // mean appending it. Likewise for attributes.
1161 // Add any result attributes.
1162 if (Attributes Attr = Attrs.getRetAttributes())
1163 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1167 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1169 if (Idx == NestIdx) {
1170 // Add the chain argument and attributes.
1171 Value *NestVal = Tramp->getOperand(3);
1172 if (NestVal->getType() != NestTy)
1173 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
1174 NewArgs.push_back(NestVal);
1175 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1181 // Add the original argument and attributes.
1182 NewArgs.push_back(*I);
1183 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1185 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1191 // Add any function attributes.
1192 if (Attributes Attr = Attrs.getFnAttributes())
1193 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1195 // The trampoline may have been bitcast to a bogus type (FTy).
1196 // Handle this by synthesizing a new function type, equal to FTy
1197 // with the chain parameter inserted.
1199 std::vector<const Type*> NewTypes;
1200 NewTypes.reserve(FTy->getNumParams()+1);
1202 // Insert the chain's type into the list of parameter types, which may
1203 // mean appending it.
1206 FunctionType::param_iterator I = FTy->param_begin(),
1207 E = FTy->param_end();
1211 // Add the chain's type.
1212 NewTypes.push_back(NestTy);
1217 // Add the original type.
1218 NewTypes.push_back(*I);
1224 // Replace the trampoline call with a direct call. Let the generic
1225 // code sort out any function type mismatches.
1226 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1228 Constant *NewCallee =
1229 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1230 NestF : ConstantExpr::getBitCast(NestF,
1231 PointerType::getUnqual(NewFTy));
1232 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1235 Instruction *NewCaller;
1236 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1237 NewCaller = InvokeInst::Create(NewCallee,
1238 II->getNormalDest(), II->getUnwindDest(),
1239 NewArgs.begin(), NewArgs.end(),
1240 Caller->getName(), Caller);
1241 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1242 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1244 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
1245 Caller->getName(), Caller);
1246 if (cast<CallInst>(Caller)->isTailCall())
1247 cast<CallInst>(NewCaller)->setTailCall();
1248 cast<CallInst>(NewCaller)->
1249 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1250 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1252 if (!Caller->getType()->isVoidTy())
1253 Caller->replaceAllUsesWith(NewCaller);
1254 Caller->eraseFromParent();
1255 Worklist.Remove(Caller);
1260 // Replace the trampoline call with a direct call. Since there is no 'nest'
1261 // parameter, there is no need to adjust the argument list. Let the generic
1262 // code sort out any function type mismatches.
1263 Constant *NewCallee =
1264 NestF->getType() == PTy ? NestF :
1265 ConstantExpr::getBitCast(NestF, PTy);
1266 CS.setCalledFunction(NewCallee);
1267 return CS.getInstruction();