1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Support/CallSite.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Transforms/Utils/BuildLibCalls.h"
22 /// getPromotedType - Return the specified type promoted as it would be to pass
23 /// though a va_arg area.
24 static const Type *getPromotedType(const Type *Ty) {
25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26 if (ITy->getBitWidth() < 32)
27 return Type::getInt32Ty(Ty->getContext());
32 /// EnforceKnownAlignment - If the specified pointer points to an object that
33 /// we control, modify the object's alignment to PrefAlign. This isn't
34 /// often possible though. If alignment is important, a more reliable approach
35 /// is to simply align all global variables and allocation instructions to
36 /// their preferred alignment from the beginning.
38 static unsigned EnforceKnownAlignment(Value *V,
39 unsigned Align, unsigned PrefAlign) {
41 User *U = dyn_cast<User>(V);
44 switch (Operator::getOpcode(U)) {
46 case Instruction::BitCast:
47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
48 case Instruction::GetElementPtr: {
49 // If all indexes are zero, it is just the alignment of the base pointer.
50 bool AllZeroOperands = true;
51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
52 if (!isa<Constant>(*i) ||
53 !cast<Constant>(*i)->isNullValue()) {
54 AllZeroOperands = false;
58 if (AllZeroOperands) {
59 // Treat this like a bitcast.
60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
64 case Instruction::Alloca: {
65 AllocaInst *AI = cast<AllocaInst>(V);
66 // If there is a requested alignment and if this is an alloca, round up.
67 if (AI->getAlignment() >= PrefAlign)
68 return AI->getAlignment();
69 AI->setAlignment(PrefAlign);
74 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
75 // If there is a large requested alignment and we can, bump up the alignment
77 if (GV->isDeclaration()) return Align;
79 if (GV->getAlignment() >= PrefAlign)
80 return GV->getAlignment();
81 // We can only increase the alignment of the global if it has no alignment
82 // specified or if it is not assigned a section. If it is assigned a
83 // section, the global could be densely packed with other objects in the
84 // section, increasing the alignment could cause padding issues.
85 if (!GV->hasSection() || GV->getAlignment() == 0)
86 GV->setAlignment(PrefAlign);
87 return GV->getAlignment();
93 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
94 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
95 /// and it is more than the alignment of the ultimate object, see if we can
96 /// increase the alignment of the ultimate object, making this check succeed.
97 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
99 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
100 sizeof(PrefAlign) * CHAR_BIT;
101 APInt Mask = APInt::getAllOnesValue(BitWidth);
102 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
103 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
104 unsigned TrailZ = KnownZero.countTrailingOnes();
105 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
107 if (PrefAlign > Align)
108 Align = EnforceKnownAlignment(V, Align, PrefAlign);
110 // We don't need to make any adjustment.
114 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
115 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0));
116 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1));
117 unsigned MinAlign = std::min(DstAlign, SrcAlign);
118 unsigned CopyAlign = MI->getAlignment();
120 if (CopyAlign < MinAlign) {
121 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
126 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
128 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
129 if (MemOpLength == 0) return 0;
131 // Source and destination pointer types are always "i8*" for intrinsic. See
132 // if the size is something we can handle with a single primitive load/store.
133 // A single load+store correctly handles overlapping memory in the memmove
135 unsigned Size = MemOpLength->getZExtValue();
136 if (Size == 0) return MI; // Delete this mem transfer.
138 if (Size > 8 || (Size&(Size-1)))
139 return 0; // If not 1/2/4/8 bytes, exit.
141 // Use an integer load+store unless we can find something better.
143 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
145 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
147 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
148 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
149 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
151 // Memcpy forces the use of i8* for the source and destination. That means
152 // that if you're using memcpy to move one double around, you'll get a cast
153 // from double* to i8*. We'd much rather use a double load+store rather than
154 // an i64 load+store, here because this improves the odds that the source or
155 // dest address will be promotable. See if we can find a better type than the
157 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
158 if (StrippedDest != MI->getArgOperand(0)) {
159 const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
161 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
162 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
163 // down through these levels if so.
164 while (!SrcETy->isSingleValueType()) {
165 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
166 if (STy->getNumElements() == 1)
167 SrcETy = STy->getElementType(0);
170 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
171 if (ATy->getNumElements() == 1)
172 SrcETy = ATy->getElementType();
179 if (SrcETy->isSingleValueType()) {
180 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
181 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
187 // If the memcpy/memmove provides better alignment info than we can
189 SrcAlign = std::max(SrcAlign, CopyAlign);
190 DstAlign = std::max(DstAlign, CopyAlign);
192 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewSrcPtrTy);
193 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewDstPtrTy);
194 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
195 InsertNewInstBefore(L, *MI);
196 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
199 // Set the size of the copy to 0, it will be deleted on the next iteration.
200 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
204 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
205 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
206 if (MI->getAlignment() < Alignment) {
207 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
212 // Extract the length and alignment and fill if they are constant.
213 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
214 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
215 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
217 uint64_t Len = LenC->getZExtValue();
218 Alignment = MI->getAlignment();
220 // If the length is zero, this is a no-op
221 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
223 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
224 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
225 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
227 Value *Dest = MI->getDest();
228 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
230 // Alignment 0 is identity for alignment 1 for memset, but not store.
231 if (Alignment == 0) Alignment = 1;
233 // Extract the fill value and store.
234 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
235 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
236 Dest, false, Alignment), *MI);
238 // Set the size of the copy to 0, it will be deleted on the next iteration.
239 MI->setLength(Constant::getNullValue(LenC->getType()));
246 /// visitCallInst - CallInst simplification. This mostly only handles folding
247 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
248 /// the heavy lifting.
250 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
252 return visitFree(CI);
254 return visitMalloc(CI);
256 // If the caller function is nounwind, mark the call as nounwind, even if the
258 if (CI.getParent()->getParent()->doesNotThrow() &&
259 !CI.doesNotThrow()) {
260 CI.setDoesNotThrow();
264 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
265 if (!II) return visitCallSite(&CI);
267 // Intrinsics cannot occur in an invoke, so handle them here instead of in
269 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
270 bool Changed = false;
272 // memmove/cpy/set of zero bytes is a noop.
273 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
274 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
276 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
277 if (CI->getZExtValue() == 1) {
278 // Replace the instruction with just byte operations. We would
279 // transform other cases to loads/stores, but we don't know if
280 // alignment is sufficient.
284 // If we have a memmove and the source operation is a constant global,
285 // then the source and dest pointers can't alias, so we can change this
286 // into a call to memcpy.
287 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
288 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
289 if (GVSrc->isConstant()) {
290 Module *M = CI.getParent()->getParent()->getParent();
291 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
292 const Type *Tys[3] = { CI.getArgOperand(0)->getType(),
293 CI.getArgOperand(1)->getType(),
294 CI.getArgOperand(2)->getType() };
295 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
300 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
301 // memmove(x,x,size) -> noop.
302 if (MTI->getSource() == MTI->getDest())
303 return EraseInstFromFunction(CI);
306 // If we can determine a pointer alignment that is bigger than currently
307 // set, update the alignment.
308 if (isa<MemTransferInst>(MI)) {
309 if (Instruction *I = SimplifyMemTransfer(MI))
311 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
312 if (Instruction *I = SimplifyMemSet(MSI))
316 if (Changed) return II;
319 switch (II->getIntrinsicID()) {
321 case Intrinsic::objectsize: {
322 // We need target data for just about everything so depend on it.
325 const Type *ReturnTy = CI.getType();
326 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
328 // Get to the real allocated thing and offset as fast as possible.
329 Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
331 // If we've stripped down to a single global variable that we
332 // can know the size of then just return that.
333 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
334 if (GV->hasDefinitiveInitializer()) {
335 Constant *C = GV->getInitializer();
336 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
337 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
339 // Can't determine size of the GV.
340 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
341 return ReplaceInstUsesWith(CI, RetVal);
343 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
345 if (AI->getAllocatedType()->isSized()) {
346 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
347 if (AI->isArrayAllocation()) {
348 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
350 AllocaSize *= C->getZExtValue();
352 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
354 } else if (CallInst *MI = extractMallocCall(Op1)) {
355 const Type* MallocType = getMallocAllocatedType(MI);
357 if (MallocType && MallocType->isSized()) {
358 if (Value *NElems = getMallocArraySize(MI, TD, true)) {
359 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
360 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy,
361 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType))));
364 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
365 // Only handle constant GEPs here.
366 if (CE->getOpcode() != Instruction::GetElementPtr) break;
367 GEPOperator *GEP = cast<GEPOperator>(CE);
369 // Make sure we're not a constant offset from an external
371 Value *Operand = GEP->getPointerOperand();
372 Operand = Operand->stripPointerCasts();
373 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
374 if (!GV->hasDefinitiveInitializer()) break;
376 // Get what we're pointing to and its size.
377 const PointerType *BaseType =
378 cast<PointerType>(Operand->getType());
379 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
381 // Get the current byte offset into the thing. Use the original
382 // operand in case we're looking through a bitcast.
383 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
384 const PointerType *OffsetType =
385 cast<PointerType>(GEP->getPointerOperand()->getType());
386 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
389 // Out of bound reference? Negative index normalized to large
390 // index? Just return "I don't know".
391 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
392 return ReplaceInstUsesWith(CI, RetVal);
395 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
396 return ReplaceInstUsesWith(CI, RetVal);
399 // Do not return "I don't know" here. Later optimization passes could
400 // make it possible to evaluate objectsize to a constant.
403 case Intrinsic::bswap:
404 // bswap(bswap(x)) -> x
405 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
406 if (Operand->getIntrinsicID() == Intrinsic::bswap)
407 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
409 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
410 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
411 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
412 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
413 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
414 TI->getType()->getPrimitiveSizeInBits();
415 Value *CV = ConstantInt::get(Operand->getType(), C);
416 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
417 return new TruncInst(V, TI->getType());
422 case Intrinsic::powi:
423 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
426 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
429 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
430 // powi(x, -1) -> 1/x
431 if (Power->isAllOnesValue())
432 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
433 II->getArgOperand(0));
436 case Intrinsic::cttz: {
437 // If all bits below the first known one are known zero,
438 // this value is constant.
439 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
440 uint32_t BitWidth = IT->getBitWidth();
441 APInt KnownZero(BitWidth, 0);
442 APInt KnownOne(BitWidth, 0);
443 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
444 KnownZero, KnownOne);
445 unsigned TrailingZeros = KnownOne.countTrailingZeros();
446 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
447 if ((Mask & KnownZero) == Mask)
448 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
449 APInt(BitWidth, TrailingZeros)));
453 case Intrinsic::ctlz: {
454 // If all bits above the first known one are known zero,
455 // this value is constant.
456 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
457 uint32_t BitWidth = IT->getBitWidth();
458 APInt KnownZero(BitWidth, 0);
459 APInt KnownOne(BitWidth, 0);
460 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
461 KnownZero, KnownOne);
462 unsigned LeadingZeros = KnownOne.countLeadingZeros();
463 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
464 if ((Mask & KnownZero) == Mask)
465 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
466 APInt(BitWidth, LeadingZeros)));
470 case Intrinsic::uadd_with_overflow: {
471 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
472 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
473 uint32_t BitWidth = IT->getBitWidth();
474 APInt Mask = APInt::getSignBit(BitWidth);
475 APInt LHSKnownZero(BitWidth, 0);
476 APInt LHSKnownOne(BitWidth, 0);
477 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
478 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
479 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
481 if (LHSKnownNegative || LHSKnownPositive) {
482 APInt RHSKnownZero(BitWidth, 0);
483 APInt RHSKnownOne(BitWidth, 0);
484 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
485 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
486 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
487 if (LHSKnownNegative && RHSKnownNegative) {
488 // The sign bit is set in both cases: this MUST overflow.
489 // Create a simple add instruction, and insert it into the struct.
490 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
493 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
495 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
496 return InsertValueInst::Create(Struct, Add, 0);
499 if (LHSKnownPositive && RHSKnownPositive) {
500 // The sign bit is clear in both cases: this CANNOT overflow.
501 // Create a simple add instruction, and insert it into the struct.
502 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
505 UndefValue::get(LHS->getType()),
506 ConstantInt::getFalse(II->getContext())
508 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
509 return InsertValueInst::Create(Struct, Add, 0);
513 // FALL THROUGH uadd into sadd
514 case Intrinsic::sadd_with_overflow:
515 // Canonicalize constants into the RHS.
516 if (isa<Constant>(II->getOperand(1)) &&
517 !isa<Constant>(II->getOperand(2))) {
518 Value *LHS = II->getOperand(1);
519 II->setOperand(1, II->getOperand(2));
520 II->setOperand(2, LHS);
524 // X + undef -> undef
525 if (isa<UndefValue>(II->getOperand(2)))
526 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
528 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
529 // X + 0 -> {X, false}
532 UndefValue::get(II->getCalledValue()->getType()),
533 ConstantInt::getFalse(II->getContext())
535 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
536 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
540 case Intrinsic::usub_with_overflow:
541 case Intrinsic::ssub_with_overflow:
542 // undef - X -> undef
543 // X - undef -> undef
544 if (isa<UndefValue>(II->getOperand(1)) ||
545 isa<UndefValue>(II->getOperand(2)))
546 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
548 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
549 // X - 0 -> {X, false}
552 UndefValue::get(II->getOperand(1)->getType()),
553 ConstantInt::getFalse(II->getContext())
555 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
556 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
560 case Intrinsic::umul_with_overflow:
561 case Intrinsic::smul_with_overflow:
562 // Canonicalize constants into the RHS.
563 if (isa<Constant>(II->getOperand(1)) &&
564 !isa<Constant>(II->getOperand(2))) {
565 Value *LHS = II->getOperand(1);
566 II->setOperand(1, II->getOperand(2));
567 II->setOperand(2, LHS);
571 // X * undef -> undef
572 if (isa<UndefValue>(II->getOperand(2)))
573 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
575 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
578 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
580 // X * 1 -> {X, false}
581 if (RHSI->equalsInt(1)) {
583 UndefValue::get(II->getArgOperand(0)->getType()),
584 ConstantInt::getFalse(II->getContext())
586 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
587 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
591 case Intrinsic::ppc_altivec_lvx:
592 case Intrinsic::ppc_altivec_lvxl:
593 case Intrinsic::x86_sse_loadu_ps:
594 case Intrinsic::x86_sse2_loadu_pd:
595 case Intrinsic::x86_sse2_loadu_dq:
596 // Turn PPC lvx -> load if the pointer is known aligned.
597 // Turn X86 loadups -> load if the pointer is known aligned.
598 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
599 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
600 PointerType::getUnqual(II->getType()));
601 return new LoadInst(Ptr);
604 case Intrinsic::ppc_altivec_stvx:
605 case Intrinsic::ppc_altivec_stvxl:
606 // Turn stvx -> store if the pointer is known aligned.
607 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) {
608 const Type *OpPtrTy =
609 PointerType::getUnqual(II->getArgOperand(0)->getType());
610 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
611 return new StoreInst(II->getArgOperand(0), Ptr);
614 case Intrinsic::x86_sse_storeu_ps:
615 case Intrinsic::x86_sse2_storeu_pd:
616 case Intrinsic::x86_sse2_storeu_dq:
617 // Turn X86 storeu -> store if the pointer is known aligned.
618 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
619 const Type *OpPtrTy =
620 PointerType::getUnqual(II->getArgOperand(1)->getType());
621 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
622 return new StoreInst(II->getArgOperand(1), Ptr);
626 case Intrinsic::x86_sse_cvttss2si: {
627 // These intrinsics only demands the 0th element of its input vector. If
628 // we can simplify the input based on that, do so now.
630 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
631 APInt DemandedElts(VWidth, 1);
632 APInt UndefElts(VWidth, 0);
633 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
635 II->setOperand(1, V);
641 case Intrinsic::ppc_altivec_vperm:
642 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
643 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
644 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
646 // Check that all of the elements are integer constants or undefs.
647 bool AllEltsOk = true;
648 for (unsigned i = 0; i != 16; ++i) {
649 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
650 !isa<UndefValue>(Mask->getOperand(i))) {
657 // Cast the input vectors to byte vectors.
658 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), Mask->getType());
659 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), Mask->getType());
660 Value *Result = UndefValue::get(Op0->getType());
662 // Only extract each element once.
663 Value *ExtractedElts[32];
664 memset(ExtractedElts, 0, sizeof(ExtractedElts));
666 for (unsigned i = 0; i != 16; ++i) {
667 if (isa<UndefValue>(Mask->getOperand(i)))
669 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
670 Idx &= 31; // Match the hardware behavior.
672 if (ExtractedElts[Idx] == 0) {
674 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
675 ConstantInt::get(Type::getInt32Ty(II->getContext()),
676 Idx&15, false), "tmp");
679 // Insert this value into the result vector.
680 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
681 ConstantInt::get(Type::getInt32Ty(II->getContext()),
684 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
689 case Intrinsic::stackrestore: {
690 // If the save is right next to the restore, remove the restore. This can
691 // happen when variable allocas are DCE'd.
692 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
693 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
694 BasicBlock::iterator BI = SS;
696 return EraseInstFromFunction(CI);
700 // Scan down this block to see if there is another stack restore in the
701 // same block without an intervening call/alloca.
702 BasicBlock::iterator BI = II;
703 TerminatorInst *TI = II->getParent()->getTerminator();
704 bool CannotRemove = false;
705 for (++BI; &*BI != TI; ++BI) {
706 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
710 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
711 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
712 // If there is a stackrestore below this one, remove this one.
713 if (II->getIntrinsicID() == Intrinsic::stackrestore)
714 return EraseInstFromFunction(CI);
715 // Otherwise, ignore the intrinsic.
717 // If we found a non-intrinsic call, we can't remove the stack
725 // If the stack restore is in a return/unwind block and if there are no
726 // allocas or calls between the restore and the return, nuke the restore.
727 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
728 return EraseInstFromFunction(CI);
733 return visitCallSite(II);
736 // InvokeInst simplification
738 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
739 return visitCallSite(&II);
742 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
743 /// passed through the varargs area, we can eliminate the use of the cast.
744 static bool isSafeToEliminateVarargsCast(const CallSite CS,
745 const CastInst * const CI,
746 const TargetData * const TD,
748 if (!CI->isLosslessCast())
751 // The size of ByVal arguments is derived from the type, so we
752 // can't change to a type with a different size. If the size were
753 // passed explicitly we could avoid this check.
754 if (!CS.paramHasAttr(ix, Attribute::ByVal))
758 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
759 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
760 if (!SrcTy->isSized() || !DstTy->isSized())
762 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
768 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
771 void replaceCall(Value *With) {
772 NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
774 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
775 if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(SizeCIOp))) {
776 if (SizeCI->isAllOnesValue())
779 return SizeCI->getZExtValue() >=
780 GetStringLength(CI->getOperand(SizeArgOp));
781 if (ConstantInt *Arg = dyn_cast<ConstantInt>(CI->getOperand(SizeArgOp)))
782 return SizeCI->getZExtValue() >= Arg->getZExtValue();
787 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
788 Instruction *NewInstruction;
790 } // end anonymous namespace
792 // Try to fold some different type of calls here.
793 // Currently we're only working with the checking functions, memcpy_chk,
794 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
795 // strcat_chk and strncat_chk.
796 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
797 if (CI->getCalledFunction() == 0) return 0;
799 InstCombineFortifiedLibCalls Simplifier(this);
800 Simplifier.fold(CI, TD);
801 return Simplifier.NewInstruction;
804 // visitCallSite - Improvements for call and invoke instructions.
806 Instruction *InstCombiner::visitCallSite(CallSite CS) {
807 bool Changed = false;
809 // If the callee is a constexpr cast of a function, attempt to move the cast
810 // to the arguments of the call/invoke.
811 if (transformConstExprCastCall(CS)) return 0;
813 Value *Callee = CS.getCalledValue();
815 if (Function *CalleeF = dyn_cast<Function>(Callee))
816 // If the call and callee calling conventions don't match, this call must
817 // be unreachable, as the call is undefined.
818 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
819 // Only do this for calls to a function with a body. A prototype may
820 // not actually end up matching the implementation's calling conv for a
821 // variety of reasons (e.g. it may be written in assembly).
822 !CalleeF->isDeclaration()) {
823 Instruction *OldCall = CS.getInstruction();
824 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
825 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
827 // If OldCall dues not return void then replaceAllUsesWith undef.
828 // This allows ValueHandlers and custom metadata to adjust itself.
829 if (!OldCall->getType()->isVoidTy())
830 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
831 if (isa<CallInst>(OldCall))
832 return EraseInstFromFunction(*OldCall);
834 // We cannot remove an invoke, because it would change the CFG, just
835 // change the callee to a null pointer.
836 cast<InvokeInst>(OldCall)->setCalledFunction(
837 Constant::getNullValue(CalleeF->getType()));
841 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
842 // This instruction is not reachable, just remove it. We insert a store to
843 // undef so that we know that this code is not reachable, despite the fact
844 // that we can't modify the CFG here.
845 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
846 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
847 CS.getInstruction());
849 // If CS does not return void then replaceAllUsesWith undef.
850 // This allows ValueHandlers and custom metadata to adjust itself.
851 if (!CS.getInstruction()->getType()->isVoidTy())
852 CS.getInstruction()->
853 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
855 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
856 // Don't break the CFG, insert a dummy cond branch.
857 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
858 ConstantInt::getTrue(Callee->getContext()), II);
860 return EraseInstFromFunction(*CS.getInstruction());
863 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
864 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
865 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
866 return transformCallThroughTrampoline(CS);
868 const PointerType *PTy = cast<PointerType>(Callee->getType());
869 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
870 if (FTy->isVarArg()) {
871 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
872 // See if we can optimize any arguments passed through the varargs area of
874 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
875 E = CS.arg_end(); I != E; ++I, ++ix) {
876 CastInst *CI = dyn_cast<CastInst>(*I);
877 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
878 *I = CI->getOperand(0);
884 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
885 // Inline asm calls cannot throw - mark them 'nounwind'.
886 CS.setDoesNotThrow();
890 // Try to optimize the call if possible, we require TargetData for most of
891 // this. None of these calls are seen as possibly dead so go ahead and
892 // delete the instruction now.
893 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
894 Instruction *I = tryOptimizeCall(CI, TD);
895 // If we changed something return the result, etc. Otherwise let
896 // the fallthrough check.
897 if (I) return EraseInstFromFunction(*I);
900 return Changed ? CS.getInstruction() : 0;
903 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
904 // attempt to move the cast to the arguments of the call/invoke.
906 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
907 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
908 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
909 if (CE->getOpcode() != Instruction::BitCast ||
910 !isa<Function>(CE->getOperand(0)))
912 Function *Callee = cast<Function>(CE->getOperand(0));
913 Instruction *Caller = CS.getInstruction();
914 const AttrListPtr &CallerPAL = CS.getAttributes();
916 // Okay, this is a cast from a function to a different type. Unless doing so
917 // would cause a type conversion of one of our arguments, change this call to
918 // be a direct call with arguments casted to the appropriate types.
920 const FunctionType *FT = Callee->getFunctionType();
921 const Type *OldRetTy = Caller->getType();
922 const Type *NewRetTy = FT->getReturnType();
924 if (NewRetTy->isStructTy())
925 return false; // TODO: Handle multiple return values.
927 // Check to see if we are changing the return type...
928 if (OldRetTy != NewRetTy) {
929 if (Callee->isDeclaration() &&
930 // Conversion is ok if changing from one pointer type to another or from
931 // a pointer to an integer of the same size.
932 !((OldRetTy->isPointerTy() || !TD ||
933 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
934 (NewRetTy->isPointerTy() || !TD ||
935 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
936 return false; // Cannot transform this return value.
938 if (!Caller->use_empty() &&
939 // void -> non-void is handled specially
940 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
941 return false; // Cannot transform this return value.
943 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
944 Attributes RAttrs = CallerPAL.getRetAttributes();
945 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
946 return false; // Attribute not compatible with transformed value.
949 // If the callsite is an invoke instruction, and the return value is used by
950 // a PHI node in a successor, we cannot change the return type of the call
951 // because there is no place to put the cast instruction (without breaking
952 // the critical edge). Bail out in this case.
953 if (!Caller->use_empty())
954 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
955 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
957 if (PHINode *PN = dyn_cast<PHINode>(*UI))
958 if (PN->getParent() == II->getNormalDest() ||
959 PN->getParent() == II->getUnwindDest())
963 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
964 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
966 CallSite::arg_iterator AI = CS.arg_begin();
967 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
968 const Type *ParamTy = FT->getParamType(i);
969 const Type *ActTy = (*AI)->getType();
971 if (!CastInst::isCastable(ActTy, ParamTy))
972 return false; // Cannot transform this parameter value.
974 if (CallerPAL.getParamAttributes(i + 1)
975 & Attribute::typeIncompatible(ParamTy))
976 return false; // Attribute not compatible with transformed value.
978 // Converting from one pointer type to another or between a pointer and an
979 // integer of the same size is safe even if we do not have a body.
980 bool isConvertible = ActTy == ParamTy ||
981 (TD && ((ParamTy->isPointerTy() ||
982 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
983 (ActTy->isPointerTy() ||
984 ActTy == TD->getIntPtrType(Caller->getContext()))));
985 if (Callee->isDeclaration() && !isConvertible) return false;
988 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
989 Callee->isDeclaration())
990 return false; // Do not delete arguments unless we have a function body.
992 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
993 !CallerPAL.isEmpty())
994 // In this case we have more arguments than the new function type, but we
995 // won't be dropping them. Check that these extra arguments have attributes
996 // that are compatible with being a vararg call argument.
997 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
998 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1000 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1001 if (PAttrs & Attribute::VarArgsIncompatible)
1005 // Okay, we decided that this is a safe thing to do: go ahead and start
1006 // inserting cast instructions as necessary...
1007 std::vector<Value*> Args;
1008 Args.reserve(NumActualArgs);
1009 SmallVector<AttributeWithIndex, 8> attrVec;
1010 attrVec.reserve(NumCommonArgs);
1012 // Get any return attributes.
1013 Attributes RAttrs = CallerPAL.getRetAttributes();
1015 // If the return value is not being used, the type may not be compatible
1016 // with the existing attributes. Wipe out any problematic attributes.
1017 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1019 // Add the new return attributes.
1021 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1023 AI = CS.arg_begin();
1024 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1025 const Type *ParamTy = FT->getParamType(i);
1026 if ((*AI)->getType() == ParamTy) {
1027 Args.push_back(*AI);
1029 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1030 false, ParamTy, false);
1031 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
1034 // Add any parameter attributes.
1035 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1036 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1039 // If the function takes more arguments than the call was taking, add them
1041 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1042 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1044 // If we are removing arguments to the function, emit an obnoxious warning.
1045 if (FT->getNumParams() < NumActualArgs) {
1046 if (!FT->isVarArg()) {
1047 errs() << "WARNING: While resolving call to function '"
1048 << Callee->getName() << "' arguments were dropped!\n";
1050 // Add all of the arguments in their promoted form to the arg list.
1051 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1052 const Type *PTy = getPromotedType((*AI)->getType());
1053 if (PTy != (*AI)->getType()) {
1054 // Must promote to pass through va_arg area!
1055 Instruction::CastOps opcode =
1056 CastInst::getCastOpcode(*AI, false, PTy, false);
1057 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
1059 Args.push_back(*AI);
1062 // Add any parameter attributes.
1063 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1064 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1069 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1070 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1072 if (NewRetTy->isVoidTy())
1073 Caller->setName(""); // Void type should not have a name.
1075 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1079 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1080 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
1081 Args.begin(), Args.end(),
1082 Caller->getName(), Caller);
1083 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1084 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1086 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
1087 Caller->getName(), Caller);
1088 CallInst *CI = cast<CallInst>(Caller);
1089 if (CI->isTailCall())
1090 cast<CallInst>(NC)->setTailCall();
1091 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1092 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1095 // Insert a cast of the return type as necessary.
1097 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1098 if (!NV->getType()->isVoidTy()) {
1099 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
1101 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
1103 // If this is an invoke instruction, we should insert it after the first
1104 // non-phi, instruction in the normal successor block.
1105 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1106 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
1107 InsertNewInstBefore(NC, *I);
1109 // Otherwise, it's a call, just insert cast right after the call instr
1110 InsertNewInstBefore(NC, *Caller);
1112 Worklist.AddUsersToWorkList(*Caller);
1114 NV = UndefValue::get(Caller->getType());
1119 if (!Caller->use_empty())
1120 Caller->replaceAllUsesWith(NV);
1122 EraseInstFromFunction(*Caller);
1126 // transformCallThroughTrampoline - Turn a call to a function created by the
1127 // init_trampoline intrinsic into a direct call to the underlying function.
1129 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
1130 Value *Callee = CS.getCalledValue();
1131 const PointerType *PTy = cast<PointerType>(Callee->getType());
1132 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1133 const AttrListPtr &Attrs = CS.getAttributes();
1135 // If the call already has the 'nest' attribute somewhere then give up -
1136 // otherwise 'nest' would occur twice after splicing in the chain.
1137 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1140 IntrinsicInst *Tramp =
1141 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
1143 Function *NestF = cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1144 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1145 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1147 const AttrListPtr &NestAttrs = NestF->getAttributes();
1148 if (!NestAttrs.isEmpty()) {
1149 unsigned NestIdx = 1;
1150 const Type *NestTy = 0;
1151 Attributes NestAttr = Attribute::None;
1153 // Look for a parameter marked with the 'nest' attribute.
1154 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1155 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1156 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1157 // Record the parameter type and any other attributes.
1159 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1164 Instruction *Caller = CS.getInstruction();
1165 std::vector<Value*> NewArgs;
1166 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1168 SmallVector<AttributeWithIndex, 8> NewAttrs;
1169 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1171 // Insert the nest argument into the call argument list, which may
1172 // mean appending it. Likewise for attributes.
1174 // Add any result attributes.
1175 if (Attributes Attr = Attrs.getRetAttributes())
1176 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1180 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1182 if (Idx == NestIdx) {
1183 // Add the chain argument and attributes.
1184 Value *NestVal = Tramp->getArgOperand(2);
1185 if (NestVal->getType() != NestTy)
1186 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
1187 NewArgs.push_back(NestVal);
1188 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1194 // Add the original argument and attributes.
1195 NewArgs.push_back(*I);
1196 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1198 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1204 // Add any function attributes.
1205 if (Attributes Attr = Attrs.getFnAttributes())
1206 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1208 // The trampoline may have been bitcast to a bogus type (FTy).
1209 // Handle this by synthesizing a new function type, equal to FTy
1210 // with the chain parameter inserted.
1212 std::vector<const Type*> NewTypes;
1213 NewTypes.reserve(FTy->getNumParams()+1);
1215 // Insert the chain's type into the list of parameter types, which may
1216 // mean appending it.
1219 FunctionType::param_iterator I = FTy->param_begin(),
1220 E = FTy->param_end();
1224 // Add the chain's type.
1225 NewTypes.push_back(NestTy);
1230 // Add the original type.
1231 NewTypes.push_back(*I);
1237 // Replace the trampoline call with a direct call. Let the generic
1238 // code sort out any function type mismatches.
1239 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1241 Constant *NewCallee =
1242 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1243 NestF : ConstantExpr::getBitCast(NestF,
1244 PointerType::getUnqual(NewFTy));
1245 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1248 Instruction *NewCaller;
1249 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1250 NewCaller = InvokeInst::Create(NewCallee,
1251 II->getNormalDest(), II->getUnwindDest(),
1252 NewArgs.begin(), NewArgs.end(),
1253 Caller->getName(), Caller);
1254 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1255 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1257 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
1258 Caller->getName(), Caller);
1259 if (cast<CallInst>(Caller)->isTailCall())
1260 cast<CallInst>(NewCaller)->setTailCall();
1261 cast<CallInst>(NewCaller)->
1262 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1263 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1265 if (!Caller->getType()->isVoidTy())
1266 Caller->replaceAllUsesWith(NewCaller);
1267 Caller->eraseFromParent();
1268 Worklist.Remove(Caller);
1273 // Replace the trampoline call with a direct call. Since there is no 'nest'
1274 // parameter, there is no need to adjust the argument list. Let the generic
1275 // code sort out any function type mismatches.
1276 Constant *NewCallee =
1277 NestF->getType() == PTy ? NestF :
1278 ConstantExpr::getBitCast(NestF, PTy);
1279 CS.setCalledFunction(NewCallee);
1280 return CS.getInstruction();