1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Support/CallSite.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Transforms/Utils/BuildLibCalls.h"
22 /// getPromotedType - Return the specified type promoted as it would be to pass
23 /// though a va_arg area.
24 static const Type *getPromotedType(const Type *Ty) {
25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26 if (ITy->getBitWidth() < 32)
27 return Type::getInt32Ty(Ty->getContext());
32 /// EnforceKnownAlignment - If the specified pointer points to an object that
33 /// we control, modify the object's alignment to PrefAlign. This isn't
34 /// often possible though. If alignment is important, a more reliable approach
35 /// is to simply align all global variables and allocation instructions to
36 /// their preferred alignment from the beginning.
38 static unsigned EnforceKnownAlignment(Value *V,
39 unsigned Align, unsigned PrefAlign) {
41 User *U = dyn_cast<User>(V);
44 switch (Operator::getOpcode(U)) {
46 case Instruction::BitCast:
47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
48 case Instruction::GetElementPtr: {
49 // If all indexes are zero, it is just the alignment of the base pointer.
50 bool AllZeroOperands = true;
51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
52 if (!isa<Constant>(*i) ||
53 !cast<Constant>(*i)->isNullValue()) {
54 AllZeroOperands = false;
58 if (AllZeroOperands) {
59 // Treat this like a bitcast.
60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
64 case Instruction::Alloca: {
65 AllocaInst *AI = cast<AllocaInst>(V);
66 // If there is a requested alignment and if this is an alloca, round up.
67 if (AI->getAlignment() >= PrefAlign)
68 return AI->getAlignment();
69 AI->setAlignment(PrefAlign);
74 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
75 // If there is a large requested alignment and we can, bump up the alignment
77 if (GV->isDeclaration()) return Align;
79 if (GV->getAlignment() >= PrefAlign)
80 return GV->getAlignment();
81 // We can only increase the alignment of the global if it has no alignment
82 // specified or if it is not assigned a section. If it is assigned a
83 // section, the global could be densely packed with other objects in the
84 // section, increasing the alignment could cause padding issues.
85 if (!GV->hasSection() || GV->getAlignment() == 0)
86 GV->setAlignment(PrefAlign);
87 return GV->getAlignment();
93 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
94 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
95 /// and it is more than the alignment of the ultimate object, see if we can
96 /// increase the alignment of the ultimate object, making this check succeed.
97 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
99 assert(V->getType()->isPointerTy() &&
100 "GetOrEnforceKnownAlignment expects a pointer!");
101 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
102 APInt Mask = APInt::getAllOnesValue(BitWidth);
103 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
104 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
105 unsigned TrailZ = KnownZero.countTrailingOnes();
107 // Avoid trouble with rediculously large TrailZ values, such as
108 // those computed from a null pointer.
109 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
111 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
113 // LLVM doesn't support alignments larger than this currently.
114 Align = std::min(Align, +Value::MaximumAlignment);
116 if (PrefAlign > Align)
117 Align = EnforceKnownAlignment(V, Align, PrefAlign);
119 // We don't need to make any adjustment.
123 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
124 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0));
125 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1));
126 unsigned MinAlign = std::min(DstAlign, SrcAlign);
127 unsigned CopyAlign = MI->getAlignment();
129 if (CopyAlign < MinAlign) {
130 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
135 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
137 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
138 if (MemOpLength == 0) return 0;
140 // Source and destination pointer types are always "i8*" for intrinsic. See
141 // if the size is something we can handle with a single primitive load/store.
142 // A single load+store correctly handles overlapping memory in the memmove
144 unsigned Size = MemOpLength->getZExtValue();
145 if (Size == 0) return MI; // Delete this mem transfer.
147 if (Size > 8 || (Size&(Size-1)))
148 return 0; // If not 1/2/4/8 bytes, exit.
150 // Use an integer load+store unless we can find something better.
152 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
154 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
156 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
157 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
158 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
160 // Memcpy forces the use of i8* for the source and destination. That means
161 // that if you're using memcpy to move one double around, you'll get a cast
162 // from double* to i8*. We'd much rather use a double load+store rather than
163 // an i64 load+store, here because this improves the odds that the source or
164 // dest address will be promotable. See if we can find a better type than the
166 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
167 if (StrippedDest != MI->getArgOperand(0)) {
168 const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
170 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
171 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
172 // down through these levels if so.
173 while (!SrcETy->isSingleValueType()) {
174 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
175 if (STy->getNumElements() == 1)
176 SrcETy = STy->getElementType(0);
179 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
180 if (ATy->getNumElements() == 1)
181 SrcETy = ATy->getElementType();
188 if (SrcETy->isSingleValueType()) {
189 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
190 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
196 // If the memcpy/memmove provides better alignment info than we can
198 SrcAlign = std::max(SrcAlign, CopyAlign);
199 DstAlign = std::max(DstAlign, CopyAlign);
201 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
202 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
203 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
204 InsertNewInstBefore(L, *MI);
205 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
208 // Set the size of the copy to 0, it will be deleted on the next iteration.
209 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
213 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
214 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
215 if (MI->getAlignment() < Alignment) {
216 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
221 // Extract the length and alignment and fill if they are constant.
222 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
223 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
224 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
226 uint64_t Len = LenC->getZExtValue();
227 Alignment = MI->getAlignment();
229 // If the length is zero, this is a no-op
230 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
232 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
233 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
234 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
236 Value *Dest = MI->getDest();
237 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
238 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
239 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
241 // Alignment 0 is identity for alignment 1 for memset, but not store.
242 if (Alignment == 0) Alignment = 1;
244 // Extract the fill value and store.
245 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
246 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
247 Dest, false, Alignment), *MI);
249 // Set the size of the copy to 0, it will be deleted on the next iteration.
250 MI->setLength(Constant::getNullValue(LenC->getType()));
257 /// visitCallInst - CallInst simplification. This mostly only handles folding
258 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
259 /// the heavy lifting.
261 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
263 return visitFree(CI);
265 return visitMalloc(CI);
267 // If the caller function is nounwind, mark the call as nounwind, even if the
269 if (CI.getParent()->getParent()->doesNotThrow() &&
270 !CI.doesNotThrow()) {
271 CI.setDoesNotThrow();
275 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
276 if (!II) return visitCallSite(&CI);
278 // Intrinsics cannot occur in an invoke, so handle them here instead of in
280 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
281 bool Changed = false;
283 // memmove/cpy/set of zero bytes is a noop.
284 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
285 if (NumBytes->isNullValue())
286 return EraseInstFromFunction(CI);
288 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
289 if (CI->getZExtValue() == 1) {
290 // Replace the instruction with just byte operations. We would
291 // transform other cases to loads/stores, but we don't know if
292 // alignment is sufficient.
296 // No other transformations apply to volatile transfers.
297 if (MI->isVolatile())
300 // If we have a memmove and the source operation is a constant global,
301 // then the source and dest pointers can't alias, so we can change this
302 // into a call to memcpy.
303 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
304 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
305 if (GVSrc->isConstant()) {
306 Module *M = CI.getParent()->getParent()->getParent();
307 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
308 const Type *Tys[3] = { CI.getArgOperand(0)->getType(),
309 CI.getArgOperand(1)->getType(),
310 CI.getArgOperand(2)->getType() };
311 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
316 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
317 // memmove(x,x,size) -> noop.
318 if (MTI->getSource() == MTI->getDest())
319 return EraseInstFromFunction(CI);
322 // If we can determine a pointer alignment that is bigger than currently
323 // set, update the alignment.
324 if (isa<MemTransferInst>(MI)) {
325 if (Instruction *I = SimplifyMemTransfer(MI))
327 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
328 if (Instruction *I = SimplifyMemSet(MSI))
332 if (Changed) return II;
335 switch (II->getIntrinsicID()) {
337 case Intrinsic::objectsize: {
338 // We need target data for just about everything so depend on it.
341 const Type *ReturnTy = CI.getType();
342 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
344 // Get to the real allocated thing and offset as fast as possible.
345 Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
347 // If we've stripped down to a single global variable that we
348 // can know the size of then just return that.
349 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
350 if (GV->hasDefinitiveInitializer()) {
351 Constant *C = GV->getInitializer();
352 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
353 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
355 // Can't determine size of the GV.
356 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
357 return ReplaceInstUsesWith(CI, RetVal);
359 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
361 if (AI->getAllocatedType()->isSized()) {
362 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
363 if (AI->isArrayAllocation()) {
364 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
366 AllocaSize *= C->getZExtValue();
368 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
370 } else if (CallInst *MI = extractMallocCall(Op1)) {
371 const Type* MallocType = getMallocAllocatedType(MI);
373 if (MallocType && MallocType->isSized()) {
374 if (Value *NElems = getMallocArraySize(MI, TD, true)) {
375 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
376 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy,
377 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType))));
380 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
381 // Only handle constant GEPs here.
382 if (CE->getOpcode() != Instruction::GetElementPtr) break;
383 GEPOperator *GEP = cast<GEPOperator>(CE);
385 // Make sure we're not a constant offset from an external
387 Value *Operand = GEP->getPointerOperand();
388 Operand = Operand->stripPointerCasts();
389 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
390 if (!GV->hasDefinitiveInitializer()) break;
392 // Get what we're pointing to and its size.
393 const PointerType *BaseType =
394 cast<PointerType>(Operand->getType());
395 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
397 // Get the current byte offset into the thing. Use the original
398 // operand in case we're looking through a bitcast.
399 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
400 const PointerType *OffsetType =
401 cast<PointerType>(GEP->getPointerOperand()->getType());
402 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
405 // Out of bound reference? Negative index normalized to large
406 // index? Just return "I don't know".
407 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
408 return ReplaceInstUsesWith(CI, RetVal);
411 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
412 return ReplaceInstUsesWith(CI, RetVal);
415 // Do not return "I don't know" here. Later optimization passes could
416 // make it possible to evaluate objectsize to a constant.
419 case Intrinsic::bswap:
420 // bswap(bswap(x)) -> x
421 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
422 if (Operand->getIntrinsicID() == Intrinsic::bswap)
423 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
425 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
426 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
427 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
428 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
429 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
430 TI->getType()->getPrimitiveSizeInBits();
431 Value *CV = ConstantInt::get(Operand->getType(), C);
432 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
433 return new TruncInst(V, TI->getType());
438 case Intrinsic::powi:
439 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
442 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
445 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
446 // powi(x, -1) -> 1/x
447 if (Power->isAllOnesValue())
448 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
449 II->getArgOperand(0));
452 case Intrinsic::cttz: {
453 // If all bits below the first known one are known zero,
454 // this value is constant.
455 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
456 uint32_t BitWidth = IT->getBitWidth();
457 APInt KnownZero(BitWidth, 0);
458 APInt KnownOne(BitWidth, 0);
459 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
460 KnownZero, KnownOne);
461 unsigned TrailingZeros = KnownOne.countTrailingZeros();
462 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
463 if ((Mask & KnownZero) == Mask)
464 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
465 APInt(BitWidth, TrailingZeros)));
469 case Intrinsic::ctlz: {
470 // If all bits above the first known one are known zero,
471 // this value is constant.
472 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
473 uint32_t BitWidth = IT->getBitWidth();
474 APInt KnownZero(BitWidth, 0);
475 APInt KnownOne(BitWidth, 0);
476 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
477 KnownZero, KnownOne);
478 unsigned LeadingZeros = KnownOne.countLeadingZeros();
479 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
480 if ((Mask & KnownZero) == Mask)
481 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
482 APInt(BitWidth, LeadingZeros)));
486 case Intrinsic::uadd_with_overflow: {
487 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
488 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
489 uint32_t BitWidth = IT->getBitWidth();
490 APInt Mask = APInt::getSignBit(BitWidth);
491 APInt LHSKnownZero(BitWidth, 0);
492 APInt LHSKnownOne(BitWidth, 0);
493 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
494 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
495 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
497 if (LHSKnownNegative || LHSKnownPositive) {
498 APInt RHSKnownZero(BitWidth, 0);
499 APInt RHSKnownOne(BitWidth, 0);
500 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
501 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
502 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
503 if (LHSKnownNegative && RHSKnownNegative) {
504 // The sign bit is set in both cases: this MUST overflow.
505 // Create a simple add instruction, and insert it into the struct.
506 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
509 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
511 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
512 return InsertValueInst::Create(Struct, Add, 0);
515 if (LHSKnownPositive && RHSKnownPositive) {
516 // The sign bit is clear in both cases: this CANNOT overflow.
517 // Create a simple add instruction, and insert it into the struct.
518 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
521 UndefValue::get(LHS->getType()),
522 ConstantInt::getFalse(II->getContext())
524 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
525 return InsertValueInst::Create(Struct, Add, 0);
529 // FALL THROUGH uadd into sadd
530 case Intrinsic::sadd_with_overflow:
531 // Canonicalize constants into the RHS.
532 if (isa<Constant>(II->getArgOperand(0)) &&
533 !isa<Constant>(II->getArgOperand(1))) {
534 Value *LHS = II->getArgOperand(0);
535 II->setArgOperand(0, II->getArgOperand(1));
536 II->setArgOperand(1, LHS);
540 // X + undef -> undef
541 if (isa<UndefValue>(II->getArgOperand(1)))
542 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
544 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
545 // X + 0 -> {X, false}
548 UndefValue::get(II->getArgOperand(0)->getType()),
549 ConstantInt::getFalse(II->getContext())
551 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
552 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
556 case Intrinsic::usub_with_overflow:
557 case Intrinsic::ssub_with_overflow:
558 // undef - X -> undef
559 // X - undef -> undef
560 if (isa<UndefValue>(II->getArgOperand(0)) ||
561 isa<UndefValue>(II->getArgOperand(1)))
562 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
564 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
565 // X - 0 -> {X, false}
568 UndefValue::get(II->getArgOperand(0)->getType()),
569 ConstantInt::getFalse(II->getContext())
571 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
572 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
576 case Intrinsic::umul_with_overflow:
577 case Intrinsic::smul_with_overflow:
578 // Canonicalize constants into the RHS.
579 if (isa<Constant>(II->getArgOperand(0)) &&
580 !isa<Constant>(II->getArgOperand(1))) {
581 Value *LHS = II->getArgOperand(0);
582 II->setArgOperand(0, II->getArgOperand(1));
583 II->setArgOperand(1, LHS);
587 // X * undef -> undef
588 if (isa<UndefValue>(II->getArgOperand(1)))
589 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
591 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
594 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
596 // X * 1 -> {X, false}
597 if (RHSI->equalsInt(1)) {
599 UndefValue::get(II->getArgOperand(0)->getType()),
600 ConstantInt::getFalse(II->getContext())
602 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
603 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
607 case Intrinsic::ppc_altivec_lvx:
608 case Intrinsic::ppc_altivec_lvxl:
609 case Intrinsic::x86_sse_loadu_ps:
610 case Intrinsic::x86_sse2_loadu_pd:
611 case Intrinsic::x86_sse2_loadu_dq:
612 // Turn PPC lvx -> load if the pointer is known aligned.
613 // Turn X86 loadups -> load if the pointer is known aligned.
614 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
615 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
616 PointerType::getUnqual(II->getType()));
617 return new LoadInst(Ptr);
620 case Intrinsic::ppc_altivec_stvx:
621 case Intrinsic::ppc_altivec_stvxl:
622 // Turn stvx -> store if the pointer is known aligned.
623 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) {
624 const Type *OpPtrTy =
625 PointerType::getUnqual(II->getArgOperand(0)->getType());
626 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
627 return new StoreInst(II->getArgOperand(0), Ptr);
630 case Intrinsic::x86_sse_storeu_ps:
631 case Intrinsic::x86_sse2_storeu_pd:
632 case Intrinsic::x86_sse2_storeu_dq:
633 // Turn X86 storeu -> store if the pointer is known aligned.
634 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
635 const Type *OpPtrTy =
636 PointerType::getUnqual(II->getArgOperand(1)->getType());
637 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
638 return new StoreInst(II->getArgOperand(1), Ptr);
642 case Intrinsic::x86_sse_cvttss2si: {
643 // These intrinsics only demands the 0th element of its input vector. If
644 // we can simplify the input based on that, do so now.
646 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
647 APInt DemandedElts(VWidth, 1);
648 APInt UndefElts(VWidth, 0);
649 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
650 DemandedElts, UndefElts)) {
651 II->setArgOperand(0, V);
657 case Intrinsic::ppc_altivec_vperm:
658 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
659 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
660 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
662 // Check that all of the elements are integer constants or undefs.
663 bool AllEltsOk = true;
664 for (unsigned i = 0; i != 16; ++i) {
665 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
666 !isa<UndefValue>(Mask->getOperand(i))) {
673 // Cast the input vectors to byte vectors.
674 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
676 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
678 Value *Result = UndefValue::get(Op0->getType());
680 // Only extract each element once.
681 Value *ExtractedElts[32];
682 memset(ExtractedElts, 0, sizeof(ExtractedElts));
684 for (unsigned i = 0; i != 16; ++i) {
685 if (isa<UndefValue>(Mask->getOperand(i)))
687 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
688 Idx &= 31; // Match the hardware behavior.
690 if (ExtractedElts[Idx] == 0) {
692 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
693 ConstantInt::get(Type::getInt32Ty(II->getContext()),
694 Idx&15, false), "tmp");
697 // Insert this value into the result vector.
698 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
699 ConstantInt::get(Type::getInt32Ty(II->getContext()),
702 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
707 case Intrinsic::arm_neon_vld1:
708 case Intrinsic::arm_neon_vld2:
709 case Intrinsic::arm_neon_vld3:
710 case Intrinsic::arm_neon_vld4:
711 case Intrinsic::arm_neon_vld2lane:
712 case Intrinsic::arm_neon_vld3lane:
713 case Intrinsic::arm_neon_vld4lane:
714 case Intrinsic::arm_neon_vst1:
715 case Intrinsic::arm_neon_vst2:
716 case Intrinsic::arm_neon_vst3:
717 case Intrinsic::arm_neon_vst4:
718 case Intrinsic::arm_neon_vst2lane:
719 case Intrinsic::arm_neon_vst3lane:
720 case Intrinsic::arm_neon_vst4lane: {
721 unsigned MemAlign = GetOrEnforceKnownAlignment(II->getArgOperand(0));
722 unsigned AlignArg = II->getNumArgOperands() - 1;
723 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
724 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
725 II->setArgOperand(AlignArg,
726 ConstantInt::get(Type::getInt32Ty(II->getContext()),
733 case Intrinsic::stackrestore: {
734 // If the save is right next to the restore, remove the restore. This can
735 // happen when variable allocas are DCE'd.
736 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
737 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
738 BasicBlock::iterator BI = SS;
740 return EraseInstFromFunction(CI);
744 // Scan down this block to see if there is another stack restore in the
745 // same block without an intervening call/alloca.
746 BasicBlock::iterator BI = II;
747 TerminatorInst *TI = II->getParent()->getTerminator();
748 bool CannotRemove = false;
749 for (++BI; &*BI != TI; ++BI) {
750 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
754 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
755 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
756 // If there is a stackrestore below this one, remove this one.
757 if (II->getIntrinsicID() == Intrinsic::stackrestore)
758 return EraseInstFromFunction(CI);
759 // Otherwise, ignore the intrinsic.
761 // If we found a non-intrinsic call, we can't remove the stack
769 // If the stack restore is in a return/unwind block and if there are no
770 // allocas or calls between the restore and the return, nuke the restore.
771 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
772 return EraseInstFromFunction(CI);
777 return visitCallSite(II);
780 // InvokeInst simplification
782 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
783 return visitCallSite(&II);
786 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
787 /// passed through the varargs area, we can eliminate the use of the cast.
788 static bool isSafeToEliminateVarargsCast(const CallSite CS,
789 const CastInst * const CI,
790 const TargetData * const TD,
792 if (!CI->isLosslessCast())
795 // The size of ByVal arguments is derived from the type, so we
796 // can't change to a type with a different size. If the size were
797 // passed explicitly we could avoid this check.
798 if (!CS.paramHasAttr(ix, Attribute::ByVal))
802 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
803 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
804 if (!SrcTy->isSized() || !DstTy->isSized())
806 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
812 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
815 void replaceCall(Value *With) {
816 NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
818 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
819 if (ConstantInt *SizeCI =
820 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
821 if (SizeCI->isAllOnesValue())
824 return SizeCI->getZExtValue() >=
825 GetStringLength(CI->getArgOperand(SizeArgOp));
826 if (ConstantInt *Arg = dyn_cast<ConstantInt>(
827 CI->getArgOperand(SizeArgOp)))
828 return SizeCI->getZExtValue() >= Arg->getZExtValue();
833 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
834 Instruction *NewInstruction;
836 } // end anonymous namespace
838 // Try to fold some different type of calls here.
839 // Currently we're only working with the checking functions, memcpy_chk,
840 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
841 // strcat_chk and strncat_chk.
842 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
843 if (CI->getCalledFunction() == 0) return 0;
845 InstCombineFortifiedLibCalls Simplifier(this);
846 Simplifier.fold(CI, TD);
847 return Simplifier.NewInstruction;
850 // visitCallSite - Improvements for call and invoke instructions.
852 Instruction *InstCombiner::visitCallSite(CallSite CS) {
853 bool Changed = false;
855 // If the callee is a constexpr cast of a function, attempt to move the cast
856 // to the arguments of the call/invoke.
857 if (transformConstExprCastCall(CS)) return 0;
859 Value *Callee = CS.getCalledValue();
861 if (Function *CalleeF = dyn_cast<Function>(Callee))
862 // If the call and callee calling conventions don't match, this call must
863 // be unreachable, as the call is undefined.
864 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
865 // Only do this for calls to a function with a body. A prototype may
866 // not actually end up matching the implementation's calling conv for a
867 // variety of reasons (e.g. it may be written in assembly).
868 !CalleeF->isDeclaration()) {
869 Instruction *OldCall = CS.getInstruction();
870 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
871 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
873 // If OldCall dues not return void then replaceAllUsesWith undef.
874 // This allows ValueHandlers and custom metadata to adjust itself.
875 if (!OldCall->getType()->isVoidTy())
876 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
877 if (isa<CallInst>(OldCall))
878 return EraseInstFromFunction(*OldCall);
880 // We cannot remove an invoke, because it would change the CFG, just
881 // change the callee to a null pointer.
882 cast<InvokeInst>(OldCall)->setCalledFunction(
883 Constant::getNullValue(CalleeF->getType()));
887 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
888 // This instruction is not reachable, just remove it. We insert a store to
889 // undef so that we know that this code is not reachable, despite the fact
890 // that we can't modify the CFG here.
891 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
892 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
893 CS.getInstruction());
895 // If CS does not return void then replaceAllUsesWith undef.
896 // This allows ValueHandlers and custom metadata to adjust itself.
897 if (!CS.getInstruction()->getType()->isVoidTy())
898 CS.getInstruction()->
899 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
901 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
902 // Don't break the CFG, insert a dummy cond branch.
903 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
904 ConstantInt::getTrue(Callee->getContext()), II);
906 return EraseInstFromFunction(*CS.getInstruction());
909 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
910 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
911 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
912 return transformCallThroughTrampoline(CS);
914 const PointerType *PTy = cast<PointerType>(Callee->getType());
915 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
916 if (FTy->isVarArg()) {
917 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
918 // See if we can optimize any arguments passed through the varargs area of
920 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
921 E = CS.arg_end(); I != E; ++I, ++ix) {
922 CastInst *CI = dyn_cast<CastInst>(*I);
923 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
924 *I = CI->getOperand(0);
930 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
931 // Inline asm calls cannot throw - mark them 'nounwind'.
932 CS.setDoesNotThrow();
936 // Try to optimize the call if possible, we require TargetData for most of
937 // this. None of these calls are seen as possibly dead so go ahead and
938 // delete the instruction now.
939 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
940 Instruction *I = tryOptimizeCall(CI, TD);
941 // If we changed something return the result, etc. Otherwise let
942 // the fallthrough check.
943 if (I) return EraseInstFromFunction(*I);
946 return Changed ? CS.getInstruction() : 0;
949 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
950 // attempt to move the cast to the arguments of the call/invoke.
952 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
953 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
954 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
955 if (CE->getOpcode() != Instruction::BitCast ||
956 !isa<Function>(CE->getOperand(0)))
958 Function *Callee = cast<Function>(CE->getOperand(0));
959 Instruction *Caller = CS.getInstruction();
960 const AttrListPtr &CallerPAL = CS.getAttributes();
962 // Okay, this is a cast from a function to a different type. Unless doing so
963 // would cause a type conversion of one of our arguments, change this call to
964 // be a direct call with arguments casted to the appropriate types.
966 const FunctionType *FT = Callee->getFunctionType();
967 const Type *OldRetTy = Caller->getType();
968 const Type *NewRetTy = FT->getReturnType();
970 if (NewRetTy->isStructTy())
971 return false; // TODO: Handle multiple return values.
973 // Check to see if we are changing the return type...
974 if (OldRetTy != NewRetTy) {
975 if (Callee->isDeclaration() &&
976 // Conversion is ok if changing from one pointer type to another or from
977 // a pointer to an integer of the same size.
978 !((OldRetTy->isPointerTy() || !TD ||
979 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
980 (NewRetTy->isPointerTy() || !TD ||
981 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
982 return false; // Cannot transform this return value.
984 if (!Caller->use_empty() &&
985 // void -> non-void is handled specially
986 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
987 return false; // Cannot transform this return value.
989 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
990 Attributes RAttrs = CallerPAL.getRetAttributes();
991 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
992 return false; // Attribute not compatible with transformed value.
995 // If the callsite is an invoke instruction, and the return value is used by
996 // a PHI node in a successor, we cannot change the return type of the call
997 // because there is no place to put the cast instruction (without breaking
998 // the critical edge). Bail out in this case.
999 if (!Caller->use_empty())
1000 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1001 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
1003 if (PHINode *PN = dyn_cast<PHINode>(*UI))
1004 if (PN->getParent() == II->getNormalDest() ||
1005 PN->getParent() == II->getUnwindDest())
1009 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
1010 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1012 CallSite::arg_iterator AI = CS.arg_begin();
1013 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1014 const Type *ParamTy = FT->getParamType(i);
1015 const Type *ActTy = (*AI)->getType();
1017 if (!CastInst::isCastable(ActTy, ParamTy))
1018 return false; // Cannot transform this parameter value.
1020 if (CallerPAL.getParamAttributes(i + 1)
1021 & Attribute::typeIncompatible(ParamTy))
1022 return false; // Attribute not compatible with transformed value.
1024 // Converting from one pointer type to another or between a pointer and an
1025 // integer of the same size is safe even if we do not have a body.
1026 bool isConvertible = ActTy == ParamTy ||
1027 (TD && ((ParamTy->isPointerTy() ||
1028 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
1029 (ActTy->isPointerTy() ||
1030 ActTy == TD->getIntPtrType(Caller->getContext()))));
1031 if (Callee->isDeclaration() && !isConvertible) return false;
1034 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
1035 Callee->isDeclaration())
1036 return false; // Do not delete arguments unless we have a function body.
1038 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1039 !CallerPAL.isEmpty())
1040 // In this case we have more arguments than the new function type, but we
1041 // won't be dropping them. Check that these extra arguments have attributes
1042 // that are compatible with being a vararg call argument.
1043 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1044 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1046 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1047 if (PAttrs & Attribute::VarArgsIncompatible)
1051 // Okay, we decided that this is a safe thing to do: go ahead and start
1052 // inserting cast instructions as necessary...
1053 std::vector<Value*> Args;
1054 Args.reserve(NumActualArgs);
1055 SmallVector<AttributeWithIndex, 8> attrVec;
1056 attrVec.reserve(NumCommonArgs);
1058 // Get any return attributes.
1059 Attributes RAttrs = CallerPAL.getRetAttributes();
1061 // If the return value is not being used, the type may not be compatible
1062 // with the existing attributes. Wipe out any problematic attributes.
1063 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1065 // Add the new return attributes.
1067 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1069 AI = CS.arg_begin();
1070 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1071 const Type *ParamTy = FT->getParamType(i);
1072 if ((*AI)->getType() == ParamTy) {
1073 Args.push_back(*AI);
1075 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1076 false, ParamTy, false);
1077 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
1080 // Add any parameter attributes.
1081 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1082 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1085 // If the function takes more arguments than the call was taking, add them
1087 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1088 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1090 // If we are removing arguments to the function, emit an obnoxious warning.
1091 if (FT->getNumParams() < NumActualArgs) {
1092 if (!FT->isVarArg()) {
1093 errs() << "WARNING: While resolving call to function '"
1094 << Callee->getName() << "' arguments were dropped!\n";
1096 // Add all of the arguments in their promoted form to the arg list.
1097 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1098 const Type *PTy = getPromotedType((*AI)->getType());
1099 if (PTy != (*AI)->getType()) {
1100 // Must promote to pass through va_arg area!
1101 Instruction::CastOps opcode =
1102 CastInst::getCastOpcode(*AI, false, PTy, false);
1103 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
1105 Args.push_back(*AI);
1108 // Add any parameter attributes.
1109 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1110 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1115 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1116 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1118 if (NewRetTy->isVoidTy())
1119 Caller->setName(""); // Void type should not have a name.
1121 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1125 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1126 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
1127 Args.begin(), Args.end(),
1128 Caller->getName(), Caller);
1129 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1130 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1132 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
1133 Caller->getName(), Caller);
1134 CallInst *CI = cast<CallInst>(Caller);
1135 if (CI->isTailCall())
1136 cast<CallInst>(NC)->setTailCall();
1137 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1138 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1141 // Insert a cast of the return type as necessary.
1143 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1144 if (!NV->getType()->isVoidTy()) {
1145 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
1147 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
1149 // If this is an invoke instruction, we should insert it after the first
1150 // non-phi, instruction in the normal successor block.
1151 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1152 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
1153 InsertNewInstBefore(NC, *I);
1155 // Otherwise, it's a call, just insert cast right after the call instr
1156 InsertNewInstBefore(NC, *Caller);
1158 Worklist.AddUsersToWorkList(*Caller);
1160 NV = UndefValue::get(Caller->getType());
1165 if (!Caller->use_empty())
1166 Caller->replaceAllUsesWith(NV);
1168 EraseInstFromFunction(*Caller);
1172 // transformCallThroughTrampoline - Turn a call to a function created by the
1173 // init_trampoline intrinsic into a direct call to the underlying function.
1175 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
1176 Value *Callee = CS.getCalledValue();
1177 const PointerType *PTy = cast<PointerType>(Callee->getType());
1178 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1179 const AttrListPtr &Attrs = CS.getAttributes();
1181 // If the call already has the 'nest' attribute somewhere then give up -
1182 // otherwise 'nest' would occur twice after splicing in the chain.
1183 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1186 IntrinsicInst *Tramp =
1187 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
1189 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1190 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1191 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1193 const AttrListPtr &NestAttrs = NestF->getAttributes();
1194 if (!NestAttrs.isEmpty()) {
1195 unsigned NestIdx = 1;
1196 const Type *NestTy = 0;
1197 Attributes NestAttr = Attribute::None;
1199 // Look for a parameter marked with the 'nest' attribute.
1200 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1201 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1202 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1203 // Record the parameter type and any other attributes.
1205 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1210 Instruction *Caller = CS.getInstruction();
1211 std::vector<Value*> NewArgs;
1212 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1214 SmallVector<AttributeWithIndex, 8> NewAttrs;
1215 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1217 // Insert the nest argument into the call argument list, which may
1218 // mean appending it. Likewise for attributes.
1220 // Add any result attributes.
1221 if (Attributes Attr = Attrs.getRetAttributes())
1222 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1226 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1228 if (Idx == NestIdx) {
1229 // Add the chain argument and attributes.
1230 Value *NestVal = Tramp->getArgOperand(2);
1231 if (NestVal->getType() != NestTy)
1232 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
1233 NewArgs.push_back(NestVal);
1234 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1240 // Add the original argument and attributes.
1241 NewArgs.push_back(*I);
1242 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1244 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1250 // Add any function attributes.
1251 if (Attributes Attr = Attrs.getFnAttributes())
1252 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1254 // The trampoline may have been bitcast to a bogus type (FTy).
1255 // Handle this by synthesizing a new function type, equal to FTy
1256 // with the chain parameter inserted.
1258 std::vector<const Type*> NewTypes;
1259 NewTypes.reserve(FTy->getNumParams()+1);
1261 // Insert the chain's type into the list of parameter types, which may
1262 // mean appending it.
1265 FunctionType::param_iterator I = FTy->param_begin(),
1266 E = FTy->param_end();
1270 // Add the chain's type.
1271 NewTypes.push_back(NestTy);
1276 // Add the original type.
1277 NewTypes.push_back(*I);
1283 // Replace the trampoline call with a direct call. Let the generic
1284 // code sort out any function type mismatches.
1285 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1287 Constant *NewCallee =
1288 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1289 NestF : ConstantExpr::getBitCast(NestF,
1290 PointerType::getUnqual(NewFTy));
1291 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1294 Instruction *NewCaller;
1295 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1296 NewCaller = InvokeInst::Create(NewCallee,
1297 II->getNormalDest(), II->getUnwindDest(),
1298 NewArgs.begin(), NewArgs.end(),
1299 Caller->getName(), Caller);
1300 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1301 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1303 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
1304 Caller->getName(), Caller);
1305 if (cast<CallInst>(Caller)->isTailCall())
1306 cast<CallInst>(NewCaller)->setTailCall();
1307 cast<CallInst>(NewCaller)->
1308 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1309 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1311 if (!Caller->getType()->isVoidTy())
1312 Caller->replaceAllUsesWith(NewCaller);
1313 Caller->eraseFromParent();
1314 Worklist.Remove(Caller);
1319 // Replace the trampoline call with a direct call. Since there is no 'nest'
1320 // parameter, there is no need to adjust the argument list. Let the generic
1321 // code sort out any function type mismatches.
1322 Constant *NewCallee =
1323 NestF->getType() == PTy ? NestF :
1324 ConstantExpr::getBitCast(NestF, PTy);
1325 CS.setCalledFunction(NewCallee);
1326 return CS.getInstruction();