1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/Support/CallSite.h"
16 #include "llvm/Target/TargetData.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/Transforms/Utils/BuildLibCalls.h"
19 #include "llvm/Transforms/Utils/Local.h"
22 /// getPromotedType - Return the specified type promoted as it would be to pass
23 /// though a va_arg area.
24 static Type *getPromotedType(Type *Ty) {
25 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26 if (ITy->getBitWidth() < 32)
27 return Type::getInt32Ty(Ty->getContext());
33 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
34 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
35 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
36 unsigned MinAlign = std::min(DstAlign, SrcAlign);
37 unsigned CopyAlign = MI->getAlignment();
39 if (CopyAlign < MinAlign) {
40 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
45 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
47 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
48 if (MemOpLength == 0) return 0;
50 // Source and destination pointer types are always "i8*" for intrinsic. See
51 // if the size is something we can handle with a single primitive load/store.
52 // A single load+store correctly handles overlapping memory in the memmove
54 unsigned Size = MemOpLength->getZExtValue();
55 if (Size == 0) return MI; // Delete this mem transfer.
57 if (Size > 8 || (Size&(Size-1)))
58 return 0; // If not 1/2/4/8 bytes, exit.
60 // Use an integer load+store unless we can find something better.
62 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
64 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
66 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
67 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
68 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
70 // Memcpy forces the use of i8* for the source and destination. That means
71 // that if you're using memcpy to move one double around, you'll get a cast
72 // from double* to i8*. We'd much rather use a double load+store rather than
73 // an i64 load+store, here because this improves the odds that the source or
74 // dest address will be promotable. See if we can find a better type than the
76 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
77 if (StrippedDest != MI->getArgOperand(0)) {
78 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
80 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
81 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
82 // down through these levels if so.
83 while (!SrcETy->isSingleValueType()) {
84 if (StructType *STy = dyn_cast<StructType>(SrcETy)) {
85 if (STy->getNumElements() == 1)
86 SrcETy = STy->getElementType(0);
89 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
90 if (ATy->getNumElements() == 1)
91 SrcETy = ATy->getElementType();
98 if (SrcETy->isSingleValueType()) {
99 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
100 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
106 // If the memcpy/memmove provides better alignment info than we can
108 SrcAlign = std::max(SrcAlign, CopyAlign);
109 DstAlign = std::max(DstAlign, CopyAlign);
111 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
112 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
113 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
114 L->setAlignment(SrcAlign);
115 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
116 S->setAlignment(DstAlign);
118 // Set the size of the copy to 0, it will be deleted on the next iteration.
119 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
123 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
124 unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
125 if (MI->getAlignment() < Alignment) {
126 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
131 // Extract the length and alignment and fill if they are constant.
132 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
133 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
134 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
136 uint64_t Len = LenC->getZExtValue();
137 Alignment = MI->getAlignment();
139 // If the length is zero, this is a no-op
140 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
142 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
143 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
144 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
146 Value *Dest = MI->getDest();
147 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
148 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
149 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
151 // Alignment 0 is identity for alignment 1 for memset, but not store.
152 if (Alignment == 0) Alignment = 1;
154 // Extract the fill value and store.
155 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
156 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
158 S->setAlignment(Alignment);
160 // Set the size of the copy to 0, it will be deleted on the next iteration.
161 MI->setLength(Constant::getNullValue(LenC->getType()));
168 /// computeAllocSize - compute the object size allocated by an allocation
169 /// site. Returns 0 if the size is not constant (in SizeValue), 1 if the size
170 /// is constant (in Size), and 2 if the size could not be determined within the
171 /// given maximum Penalty that the computation would incurr at run-time.
172 static int computeAllocSize(Value *Alloc, uint64_t &Size, Value* &SizeValue,
173 uint64_t Penalty, TargetData *TD,
174 InstCombiner::BuilderTy *Builder) {
175 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Alloc)) {
176 if (GV->hasDefinitiveInitializer()) {
177 Constant *C = GV->getInitializer();
178 Size = TD->getTypeAllocSize(C->getType());
181 // Can't determine size of the GV.
184 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Alloc)) {
185 if (!AI->getAllocatedType()->isSized())
188 Size = TD->getTypeAllocSize(AI->getAllocatedType());
189 if (!AI->isArrayAllocation())
190 return 1; // we are done
192 Value *ArraySize = AI->getArraySize();
193 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
194 Size *= C->getZExtValue();
201 SizeValue = ConstantInt::get(ArraySize->getType(), Size);
202 SizeValue = Builder->CreateMul(SizeValue, ArraySize);
205 } else if (CallInst *MI = extractMallocCall(Alloc)) {
206 SizeValue = MI->getArgOperand(0);
207 if (ConstantInt *CI = dyn_cast<ConstantInt>(SizeValue)) {
208 Size = CI->getZExtValue();
213 } else if (CallInst *MI = extractCallocCall(Alloc)) {
214 Value *Arg1 = MI->getArgOperand(0);
215 Value *Arg2 = MI->getArgOperand(1);
216 if (ConstantInt *CI1 = dyn_cast<ConstantInt>(Arg1)) {
217 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Arg2)) {
218 Size = (CI1->getValue() * CI2->getValue()).getZExtValue();
226 SizeValue = Builder->CreateMul(Arg1, Arg2);
230 DEBUG(errs() << "computeAllocSize failed:\n");
231 DEBUG(Alloc->dump());
235 /// visitCallInst - CallInst simplification. This mostly only handles folding
236 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
237 /// the heavy lifting.
239 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
241 return visitFree(CI);
242 if (extractMallocCall(&CI) || extractCallocCall(&CI))
243 return visitMalloc(CI);
245 // If the caller function is nounwind, mark the call as nounwind, even if the
247 if (CI.getParent()->getParent()->doesNotThrow() &&
248 !CI.doesNotThrow()) {
249 CI.setDoesNotThrow();
253 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
254 if (!II) return visitCallSite(&CI);
256 // Intrinsics cannot occur in an invoke, so handle them here instead of in
258 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
259 bool Changed = false;
261 // memmove/cpy/set of zero bytes is a noop.
262 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
263 if (NumBytes->isNullValue())
264 return EraseInstFromFunction(CI);
266 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
267 if (CI->getZExtValue() == 1) {
268 // Replace the instruction with just byte operations. We would
269 // transform other cases to loads/stores, but we don't know if
270 // alignment is sufficient.
274 // No other transformations apply to volatile transfers.
275 if (MI->isVolatile())
278 // If we have a memmove and the source operation is a constant global,
279 // then the source and dest pointers can't alias, so we can change this
280 // into a call to memcpy.
281 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
282 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
283 if (GVSrc->isConstant()) {
284 Module *M = CI.getParent()->getParent()->getParent();
285 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
286 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
287 CI.getArgOperand(1)->getType(),
288 CI.getArgOperand(2)->getType() };
289 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
294 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
295 // memmove(x,x,size) -> noop.
296 if (MTI->getSource() == MTI->getDest())
297 return EraseInstFromFunction(CI);
300 // If we can determine a pointer alignment that is bigger than currently
301 // set, update the alignment.
302 if (isa<MemTransferInst>(MI)) {
303 if (Instruction *I = SimplifyMemTransfer(MI))
305 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
306 if (Instruction *I = SimplifyMemSet(MSI))
310 if (Changed) return II;
313 switch (II->getIntrinsicID()) {
315 case Intrinsic::objectsize: {
316 // We need target data for just about everything so depend on it.
319 Type *ReturnTy = CI.getType();
320 uint64_t Penalty = cast<ConstantInt>(II->getArgOperand(2))->getZExtValue();
322 // Get to the real allocated thing and offset as fast as possible.
323 Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
326 if ((GEP = dyn_cast<GEPOperator>(Op1))) {
327 // check if we will be able to get the offset
328 if (!GEP->hasAllConstantIndices() && Penalty < 2)
330 Op1 = GEP->getPointerOperand()->stripPointerCasts();
335 int ConstAlloc = computeAllocSize(Op1, Size, SizeValue, Penalty, TD,
338 // Do not return "I don't know" here. Later optimization passes could
339 // make it possible to evaluate objectsize to a constant.
344 Value *OffsetValue = 0;
347 if (GEP->hasAllConstantIndices()) {
348 SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
349 assert(GEP->getPointerOperandType()->isPointerTy());
350 Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
352 OffsetValue = EmitGEPOffset(GEP, true /*NoNUW*/);
355 if (!OffsetValue && ConstAlloc) {
358 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 0));
360 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
364 OffsetValue = ConstantInt::get(ReturnTy, Offset);
366 SizeValue = ConstantInt::get(ReturnTy, Size);
368 Value *Val = Builder->CreateSub(SizeValue, OffsetValue);
369 // return 0 if there's an overflow
370 Value *Cmp = Builder->CreateICmpULT(SizeValue, OffsetValue);
371 Val = Builder->CreateSelect(Cmp, ConstantInt::get(ReturnTy, 0), Val);
372 return ReplaceInstUsesWith(CI, Val);
374 case Intrinsic::bswap:
375 // bswap(bswap(x)) -> x
376 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
377 if (Operand->getIntrinsicID() == Intrinsic::bswap)
378 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
380 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
381 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
382 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
383 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
384 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
385 TI->getType()->getPrimitiveSizeInBits();
386 Value *CV = ConstantInt::get(Operand->getType(), C);
387 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
388 return new TruncInst(V, TI->getType());
393 case Intrinsic::powi:
394 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
397 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
400 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
401 // powi(x, -1) -> 1/x
402 if (Power->isAllOnesValue())
403 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
404 II->getArgOperand(0));
407 case Intrinsic::cttz: {
408 // If all bits below the first known one are known zero,
409 // this value is constant.
410 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
411 // FIXME: Try to simplify vectors of integers.
413 uint32_t BitWidth = IT->getBitWidth();
414 APInt KnownZero(BitWidth, 0);
415 APInt KnownOne(BitWidth, 0);
416 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
417 unsigned TrailingZeros = KnownOne.countTrailingZeros();
418 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
419 if ((Mask & KnownZero) == Mask)
420 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
421 APInt(BitWidth, TrailingZeros)));
425 case Intrinsic::ctlz: {
426 // If all bits above the first known one are known zero,
427 // this value is constant.
428 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
429 // FIXME: Try to simplify vectors of integers.
431 uint32_t BitWidth = IT->getBitWidth();
432 APInt KnownZero(BitWidth, 0);
433 APInt KnownOne(BitWidth, 0);
434 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
435 unsigned LeadingZeros = KnownOne.countLeadingZeros();
436 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
437 if ((Mask & KnownZero) == Mask)
438 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
439 APInt(BitWidth, LeadingZeros)));
443 case Intrinsic::uadd_with_overflow: {
444 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
445 IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
446 uint32_t BitWidth = IT->getBitWidth();
447 APInt LHSKnownZero(BitWidth, 0);
448 APInt LHSKnownOne(BitWidth, 0);
449 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
450 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
451 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
453 if (LHSKnownNegative || LHSKnownPositive) {
454 APInt RHSKnownZero(BitWidth, 0);
455 APInt RHSKnownOne(BitWidth, 0);
456 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
457 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
458 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
459 if (LHSKnownNegative && RHSKnownNegative) {
460 // The sign bit is set in both cases: this MUST overflow.
461 // Create a simple add instruction, and insert it into the struct.
462 Value *Add = Builder->CreateAdd(LHS, RHS);
465 UndefValue::get(LHS->getType()),
466 ConstantInt::getTrue(II->getContext())
468 StructType *ST = cast<StructType>(II->getType());
469 Constant *Struct = ConstantStruct::get(ST, V);
470 return InsertValueInst::Create(Struct, Add, 0);
473 if (LHSKnownPositive && RHSKnownPositive) {
474 // The sign bit is clear in both cases: this CANNOT overflow.
475 // Create a simple add instruction, and insert it into the struct.
476 Value *Add = Builder->CreateNUWAdd(LHS, RHS);
479 UndefValue::get(LHS->getType()),
480 ConstantInt::getFalse(II->getContext())
482 StructType *ST = cast<StructType>(II->getType());
483 Constant *Struct = ConstantStruct::get(ST, V);
484 return InsertValueInst::Create(Struct, Add, 0);
488 // FALL THROUGH uadd into sadd
489 case Intrinsic::sadd_with_overflow:
490 // Canonicalize constants into the RHS.
491 if (isa<Constant>(II->getArgOperand(0)) &&
492 !isa<Constant>(II->getArgOperand(1))) {
493 Value *LHS = II->getArgOperand(0);
494 II->setArgOperand(0, II->getArgOperand(1));
495 II->setArgOperand(1, LHS);
499 // X + undef -> undef
500 if (isa<UndefValue>(II->getArgOperand(1)))
501 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
503 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
504 // X + 0 -> {X, false}
507 UndefValue::get(II->getArgOperand(0)->getType()),
508 ConstantInt::getFalse(II->getContext())
511 ConstantStruct::get(cast<StructType>(II->getType()), V);
512 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
516 case Intrinsic::usub_with_overflow:
517 case Intrinsic::ssub_with_overflow:
518 // undef - X -> undef
519 // X - undef -> undef
520 if (isa<UndefValue>(II->getArgOperand(0)) ||
521 isa<UndefValue>(II->getArgOperand(1)))
522 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
524 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
525 // X - 0 -> {X, false}
528 UndefValue::get(II->getArgOperand(0)->getType()),
529 ConstantInt::getFalse(II->getContext())
532 ConstantStruct::get(cast<StructType>(II->getType()), V);
533 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
537 case Intrinsic::umul_with_overflow: {
538 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
539 unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
541 APInt LHSKnownZero(BitWidth, 0);
542 APInt LHSKnownOne(BitWidth, 0);
543 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
544 APInt RHSKnownZero(BitWidth, 0);
545 APInt RHSKnownOne(BitWidth, 0);
546 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
548 // Get the largest possible values for each operand.
549 APInt LHSMax = ~LHSKnownZero;
550 APInt RHSMax = ~RHSKnownZero;
552 // If multiplying the maximum values does not overflow then we can turn
553 // this into a plain NUW mul.
555 LHSMax.umul_ov(RHSMax, Overflow);
557 Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow");
559 UndefValue::get(LHS->getType()),
562 Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V);
563 return InsertValueInst::Create(Struct, Mul, 0);
566 case Intrinsic::smul_with_overflow:
567 // Canonicalize constants into the RHS.
568 if (isa<Constant>(II->getArgOperand(0)) &&
569 !isa<Constant>(II->getArgOperand(1))) {
570 Value *LHS = II->getArgOperand(0);
571 II->setArgOperand(0, II->getArgOperand(1));
572 II->setArgOperand(1, LHS);
576 // X * undef -> undef
577 if (isa<UndefValue>(II->getArgOperand(1)))
578 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
580 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
583 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
585 // X * 1 -> {X, false}
586 if (RHSI->equalsInt(1)) {
588 UndefValue::get(II->getArgOperand(0)->getType()),
589 ConstantInt::getFalse(II->getContext())
592 ConstantStruct::get(cast<StructType>(II->getType()), V);
593 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
597 case Intrinsic::ppc_altivec_lvx:
598 case Intrinsic::ppc_altivec_lvxl:
599 // Turn PPC lvx -> load if the pointer is known aligned.
600 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
601 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
602 PointerType::getUnqual(II->getType()));
603 return new LoadInst(Ptr);
606 case Intrinsic::ppc_altivec_stvx:
607 case Intrinsic::ppc_altivec_stvxl:
608 // Turn stvx -> store if the pointer is known aligned.
609 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
611 PointerType::getUnqual(II->getArgOperand(0)->getType());
612 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
613 return new StoreInst(II->getArgOperand(0), Ptr);
616 case Intrinsic::x86_sse_storeu_ps:
617 case Intrinsic::x86_sse2_storeu_pd:
618 case Intrinsic::x86_sse2_storeu_dq:
619 // Turn X86 storeu -> store if the pointer is known aligned.
620 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
622 PointerType::getUnqual(II->getArgOperand(1)->getType());
623 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
624 return new StoreInst(II->getArgOperand(1), Ptr);
628 case Intrinsic::x86_sse_cvtss2si:
629 case Intrinsic::x86_sse_cvtss2si64:
630 case Intrinsic::x86_sse_cvttss2si:
631 case Intrinsic::x86_sse_cvttss2si64:
632 case Intrinsic::x86_sse2_cvtsd2si:
633 case Intrinsic::x86_sse2_cvtsd2si64:
634 case Intrinsic::x86_sse2_cvttsd2si:
635 case Intrinsic::x86_sse2_cvttsd2si64: {
636 // These intrinsics only demand the 0th element of their input vectors. If
637 // we can simplify the input based on that, do so now.
639 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
640 APInt DemandedElts(VWidth, 1);
641 APInt UndefElts(VWidth, 0);
642 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
643 DemandedElts, UndefElts)) {
644 II->setArgOperand(0, V);
651 case Intrinsic::x86_sse41_pmovsxbw:
652 case Intrinsic::x86_sse41_pmovsxwd:
653 case Intrinsic::x86_sse41_pmovsxdq:
654 case Intrinsic::x86_sse41_pmovzxbw:
655 case Intrinsic::x86_sse41_pmovzxwd:
656 case Intrinsic::x86_sse41_pmovzxdq: {
657 // pmov{s|z}x ignores the upper half of their input vectors.
659 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
660 unsigned LowHalfElts = VWidth / 2;
661 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
662 APInt UndefElts(VWidth, 0);
663 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
666 II->setArgOperand(0, TmpV);
672 case Intrinsic::ppc_altivec_vperm:
673 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
674 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
675 assert(Mask->getType()->getVectorNumElements() == 16 &&
676 "Bad type for intrinsic!");
678 // Check that all of the elements are integer constants or undefs.
679 bool AllEltsOk = true;
680 for (unsigned i = 0; i != 16; ++i) {
681 Constant *Elt = Mask->getAggregateElement(i);
683 !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
690 // Cast the input vectors to byte vectors.
691 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
693 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
695 Value *Result = UndefValue::get(Op0->getType());
697 // Only extract each element once.
698 Value *ExtractedElts[32];
699 memset(ExtractedElts, 0, sizeof(ExtractedElts));
701 for (unsigned i = 0; i != 16; ++i) {
702 if (isa<UndefValue>(Mask->getAggregateElement(i)))
705 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
706 Idx &= 31; // Match the hardware behavior.
708 if (ExtractedElts[Idx] == 0) {
710 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
711 Builder->getInt32(Idx&15));
714 // Insert this value into the result vector.
715 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
716 Builder->getInt32(i));
718 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
723 case Intrinsic::arm_neon_vld1:
724 case Intrinsic::arm_neon_vld2:
725 case Intrinsic::arm_neon_vld3:
726 case Intrinsic::arm_neon_vld4:
727 case Intrinsic::arm_neon_vld2lane:
728 case Intrinsic::arm_neon_vld3lane:
729 case Intrinsic::arm_neon_vld4lane:
730 case Intrinsic::arm_neon_vst1:
731 case Intrinsic::arm_neon_vst2:
732 case Intrinsic::arm_neon_vst3:
733 case Intrinsic::arm_neon_vst4:
734 case Intrinsic::arm_neon_vst2lane:
735 case Intrinsic::arm_neon_vst3lane:
736 case Intrinsic::arm_neon_vst4lane: {
737 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
738 unsigned AlignArg = II->getNumArgOperands() - 1;
739 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
740 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
741 II->setArgOperand(AlignArg,
742 ConstantInt::get(Type::getInt32Ty(II->getContext()),
749 case Intrinsic::arm_neon_vmulls:
750 case Intrinsic::arm_neon_vmullu: {
751 Value *Arg0 = II->getArgOperand(0);
752 Value *Arg1 = II->getArgOperand(1);
754 // Handle mul by zero first:
755 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
756 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
759 // Check for constant LHS & RHS - in this case we just simplify.
760 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu);
761 VectorType *NewVT = cast<VectorType>(II->getType());
762 unsigned NewWidth = NewVT->getElementType()->getIntegerBitWidth();
763 if (ConstantDataVector *CV0 = dyn_cast<ConstantDataVector>(Arg0)) {
764 if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
765 VectorType* VT = cast<VectorType>(CV0->getType());
766 SmallVector<Constant*, 4> NewElems;
767 for (unsigned i = 0; i < VT->getNumElements(); ++i) {
769 (cast<ConstantInt>(CV0->getAggregateElement(i)))->getValue();
770 CV0E = Zext ? CV0E.zext(NewWidth) : CV0E.sext(NewWidth);
772 (cast<ConstantInt>(CV1->getAggregateElement(i)))->getValue();
773 CV1E = Zext ? CV1E.zext(NewWidth) : CV1E.sext(NewWidth);
775 ConstantInt::get(NewVT->getElementType(), CV0E * CV1E));
777 return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
780 // Couldn't simplify - cannonicalize constant to the RHS.
781 std::swap(Arg0, Arg1);
784 // Handle mul by one:
785 if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
786 if (ConstantInt *Splat =
787 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) {
788 if (Splat->isOne()) {
790 return CastInst::CreateZExtOrBitCast(Arg0, II->getType());
792 return CastInst::CreateSExtOrBitCast(Arg0, II->getType());
800 case Intrinsic::stackrestore: {
801 // If the save is right next to the restore, remove the restore. This can
802 // happen when variable allocas are DCE'd.
803 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
804 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
805 BasicBlock::iterator BI = SS;
807 return EraseInstFromFunction(CI);
811 // Scan down this block to see if there is another stack restore in the
812 // same block without an intervening call/alloca.
813 BasicBlock::iterator BI = II;
814 TerminatorInst *TI = II->getParent()->getTerminator();
815 bool CannotRemove = false;
816 for (++BI; &*BI != TI; ++BI) {
817 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
821 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
822 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
823 // If there is a stackrestore below this one, remove this one.
824 if (II->getIntrinsicID() == Intrinsic::stackrestore)
825 return EraseInstFromFunction(CI);
826 // Otherwise, ignore the intrinsic.
828 // If we found a non-intrinsic call, we can't remove the stack
836 // If the stack restore is in a return, resume, or unwind block and if there
837 // are no allocas or calls between the restore and the return, nuke the
839 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
840 return EraseInstFromFunction(CI);
845 return visitCallSite(II);
848 // InvokeInst simplification
850 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
851 return visitCallSite(&II);
854 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
855 /// passed through the varargs area, we can eliminate the use of the cast.
856 static bool isSafeToEliminateVarargsCast(const CallSite CS,
857 const CastInst * const CI,
858 const TargetData * const TD,
860 if (!CI->isLosslessCast())
863 // The size of ByVal arguments is derived from the type, so we
864 // can't change to a type with a different size. If the size were
865 // passed explicitly we could avoid this check.
866 if (!CS.isByValArgument(ix))
870 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
871 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
872 if (!SrcTy->isSized() || !DstTy->isSized())
874 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
880 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
883 void replaceCall(Value *With) {
884 NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
886 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
887 if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
889 if (ConstantInt *SizeCI =
890 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
891 if (SizeCI->isAllOnesValue())
894 uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp));
895 // If the length is 0 we don't know how long it is and so we can't
897 if (Len == 0) return false;
898 return SizeCI->getZExtValue() >= Len;
900 if (ConstantInt *Arg = dyn_cast<ConstantInt>(
901 CI->getArgOperand(SizeArgOp)))
902 return SizeCI->getZExtValue() >= Arg->getZExtValue();
907 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
908 Instruction *NewInstruction;
910 } // end anonymous namespace
912 // Try to fold some different type of calls here.
913 // Currently we're only working with the checking functions, memcpy_chk,
914 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
915 // strcat_chk and strncat_chk.
916 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
917 if (CI->getCalledFunction() == 0) return 0;
919 InstCombineFortifiedLibCalls Simplifier(this);
920 Simplifier.fold(CI, TD);
921 return Simplifier.NewInstruction;
924 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
925 // Strip off at most one level of pointer casts, looking for an alloca. This
926 // is good enough in practice and simpler than handling any number of casts.
927 Value *Underlying = TrampMem->stripPointerCasts();
928 if (Underlying != TrampMem &&
929 (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem))
931 if (!isa<AllocaInst>(Underlying))
934 IntrinsicInst *InitTrampoline = 0;
935 for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end();
937 IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I);
940 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
942 // More than one init_trampoline writes to this value. Give up.
947 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
948 // Allow any number of calls to adjust.trampoline.
953 // No call to init.trampoline found.
957 // Check that the alloca is being used in the expected way.
958 if (InitTrampoline->getOperand(0) != TrampMem)
961 return InitTrampoline;
964 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
966 // Visit all the previous instructions in the basic block, and try to find a
967 // init.trampoline which has a direct path to the adjust.trampoline.
968 for (BasicBlock::iterator I = AdjustTramp,
969 E = AdjustTramp->getParent()->begin(); I != E; ) {
970 Instruction *Inst = --I;
971 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
972 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
973 II->getOperand(0) == TrampMem)
975 if (Inst->mayWriteToMemory())
981 // Given a call to llvm.adjust.trampoline, find and return the corresponding
982 // call to llvm.init.trampoline if the call to the trampoline can be optimized
983 // to a direct call to a function. Otherwise return NULL.
985 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
986 Callee = Callee->stripPointerCasts();
987 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
989 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
992 Value *TrampMem = AdjustTramp->getOperand(0);
994 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
996 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
1001 // visitCallSite - Improvements for call and invoke instructions.
1003 Instruction *InstCombiner::visitCallSite(CallSite CS) {
1004 bool Changed = false;
1006 // If the callee is a pointer to a function, attempt to move any casts to the
1007 // arguments of the call/invoke.
1008 Value *Callee = CS.getCalledValue();
1009 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
1012 if (Function *CalleeF = dyn_cast<Function>(Callee))
1013 // If the call and callee calling conventions don't match, this call must
1014 // be unreachable, as the call is undefined.
1015 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
1016 // Only do this for calls to a function with a body. A prototype may
1017 // not actually end up matching the implementation's calling conv for a
1018 // variety of reasons (e.g. it may be written in assembly).
1019 !CalleeF->isDeclaration()) {
1020 Instruction *OldCall = CS.getInstruction();
1021 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1022 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1024 // If OldCall dues not return void then replaceAllUsesWith undef.
1025 // This allows ValueHandlers and custom metadata to adjust itself.
1026 if (!OldCall->getType()->isVoidTy())
1027 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
1028 if (isa<CallInst>(OldCall))
1029 return EraseInstFromFunction(*OldCall);
1031 // We cannot remove an invoke, because it would change the CFG, just
1032 // change the callee to a null pointer.
1033 cast<InvokeInst>(OldCall)->setCalledFunction(
1034 Constant::getNullValue(CalleeF->getType()));
1038 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1039 // This instruction is not reachable, just remove it. We insert a store to
1040 // undef so that we know that this code is not reachable, despite the fact
1041 // that we can't modify the CFG here.
1042 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1043 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1044 CS.getInstruction());
1046 // If CS does not return void then replaceAllUsesWith undef.
1047 // This allows ValueHandlers and custom metadata to adjust itself.
1048 if (!CS.getInstruction()->getType()->isVoidTy())
1049 ReplaceInstUsesWith(*CS.getInstruction(),
1050 UndefValue::get(CS.getInstruction()->getType()));
1052 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1053 // Don't break the CFG, insert a dummy cond branch.
1054 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
1055 ConstantInt::getTrue(Callee->getContext()), II);
1057 return EraseInstFromFunction(*CS.getInstruction());
1060 if (IntrinsicInst *II = FindInitTrampoline(Callee))
1061 return transformCallThroughTrampoline(CS, II);
1063 PointerType *PTy = cast<PointerType>(Callee->getType());
1064 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1065 if (FTy->isVarArg()) {
1066 int ix = FTy->getNumParams();
1067 // See if we can optimize any arguments passed through the varargs area of
1069 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
1070 E = CS.arg_end(); I != E; ++I, ++ix) {
1071 CastInst *CI = dyn_cast<CastInst>(*I);
1072 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
1073 *I = CI->getOperand(0);
1079 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
1080 // Inline asm calls cannot throw - mark them 'nounwind'.
1081 CS.setDoesNotThrow();
1085 // Try to optimize the call if possible, we require TargetData for most of
1086 // this. None of these calls are seen as possibly dead so go ahead and
1087 // delete the instruction now.
1088 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1089 Instruction *I = tryOptimizeCall(CI, TD);
1090 // If we changed something return the result, etc. Otherwise let
1091 // the fallthrough check.
1092 if (I) return EraseInstFromFunction(*I);
1095 return Changed ? CS.getInstruction() : 0;
1098 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
1099 // attempt to move the cast to the arguments of the call/invoke.
1101 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
1103 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1106 Instruction *Caller = CS.getInstruction();
1107 const AttrListPtr &CallerPAL = CS.getAttributes();
1109 // Okay, this is a cast from a function to a different type. Unless doing so
1110 // would cause a type conversion of one of our arguments, change this call to
1111 // be a direct call with arguments casted to the appropriate types.
1113 FunctionType *FT = Callee->getFunctionType();
1114 Type *OldRetTy = Caller->getType();
1115 Type *NewRetTy = FT->getReturnType();
1117 if (NewRetTy->isStructTy())
1118 return false; // TODO: Handle multiple return values.
1120 // Check to see if we are changing the return type...
1121 if (OldRetTy != NewRetTy) {
1122 if (Callee->isDeclaration() &&
1123 // Conversion is ok if changing from one pointer type to another or from
1124 // a pointer to an integer of the same size.
1125 !((OldRetTy->isPointerTy() || !TD ||
1126 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
1127 (NewRetTy->isPointerTy() || !TD ||
1128 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
1129 return false; // Cannot transform this return value.
1131 if (!Caller->use_empty() &&
1132 // void -> non-void is handled specially
1133 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
1134 return false; // Cannot transform this return value.
1136 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1137 Attributes RAttrs = CallerPAL.getRetAttributes();
1138 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
1139 return false; // Attribute not compatible with transformed value.
1142 // If the callsite is an invoke instruction, and the return value is used by
1143 // a PHI node in a successor, we cannot change the return type of the call
1144 // because there is no place to put the cast instruction (without breaking
1145 // the critical edge). Bail out in this case.
1146 if (!Caller->use_empty())
1147 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1148 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
1150 if (PHINode *PN = dyn_cast<PHINode>(*UI))
1151 if (PN->getParent() == II->getNormalDest() ||
1152 PN->getParent() == II->getUnwindDest())
1156 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
1157 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1159 CallSite::arg_iterator AI = CS.arg_begin();
1160 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1161 Type *ParamTy = FT->getParamType(i);
1162 Type *ActTy = (*AI)->getType();
1164 if (!CastInst::isCastable(ActTy, ParamTy))
1165 return false; // Cannot transform this parameter value.
1167 Attributes Attrs = CallerPAL.getParamAttributes(i + 1);
1168 if (Attrs & Attribute::typeIncompatible(ParamTy))
1169 return false; // Attribute not compatible with transformed value.
1171 // If the parameter is passed as a byval argument, then we have to have a
1172 // sized type and the sized type has to have the same size as the old type.
1173 if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
1174 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1175 if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
1178 Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
1179 if (TD->getTypeAllocSize(CurElTy) !=
1180 TD->getTypeAllocSize(ParamPTy->getElementType()))
1184 // Converting from one pointer type to another or between a pointer and an
1185 // integer of the same size is safe even if we do not have a body.
1186 bool isConvertible = ActTy == ParamTy ||
1187 (TD && ((ParamTy->isPointerTy() ||
1188 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
1189 (ActTy->isPointerTy() ||
1190 ActTy == TD->getIntPtrType(Caller->getContext()))));
1191 if (Callee->isDeclaration() && !isConvertible) return false;
1194 if (Callee->isDeclaration()) {
1195 // Do not delete arguments unless we have a function body.
1196 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1199 // If the callee is just a declaration, don't change the varargsness of the
1200 // call. We don't want to introduce a varargs call where one doesn't
1202 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1203 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1206 // If both the callee and the cast type are varargs, we still have to make
1207 // sure the number of fixed parameters are the same or we have the same
1208 // ABI issues as if we introduce a varargs call.
1209 if (FT->isVarArg() &&
1210 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1211 FT->getNumParams() !=
1212 cast<FunctionType>(APTy->getElementType())->getNumParams())
1216 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1217 !CallerPAL.isEmpty())
1218 // In this case we have more arguments than the new function type, but we
1219 // won't be dropping them. Check that these extra arguments have attributes
1220 // that are compatible with being a vararg call argument.
1221 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1222 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1224 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1225 if (PAttrs & Attribute::VarArgsIncompatible)
1230 // Okay, we decided that this is a safe thing to do: go ahead and start
1231 // inserting cast instructions as necessary.
1232 std::vector<Value*> Args;
1233 Args.reserve(NumActualArgs);
1234 SmallVector<AttributeWithIndex, 8> attrVec;
1235 attrVec.reserve(NumCommonArgs);
1237 // Get any return attributes.
1238 Attributes RAttrs = CallerPAL.getRetAttributes();
1240 // If the return value is not being used, the type may not be compatible
1241 // with the existing attributes. Wipe out any problematic attributes.
1242 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1244 // Add the new return attributes.
1246 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1248 AI = CS.arg_begin();
1249 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1250 Type *ParamTy = FT->getParamType(i);
1251 if ((*AI)->getType() == ParamTy) {
1252 Args.push_back(*AI);
1254 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1255 false, ParamTy, false);
1256 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy));
1259 // Add any parameter attributes.
1260 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1261 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1264 // If the function takes more arguments than the call was taking, add them
1266 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1267 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1269 // If we are removing arguments to the function, emit an obnoxious warning.
1270 if (FT->getNumParams() < NumActualArgs) {
1271 if (!FT->isVarArg()) {
1272 errs() << "WARNING: While resolving call to function '"
1273 << Callee->getName() << "' arguments were dropped!\n";
1275 // Add all of the arguments in their promoted form to the arg list.
1276 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1277 Type *PTy = getPromotedType((*AI)->getType());
1278 if (PTy != (*AI)->getType()) {
1279 // Must promote to pass through va_arg area!
1280 Instruction::CastOps opcode =
1281 CastInst::getCastOpcode(*AI, false, PTy, false);
1282 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
1284 Args.push_back(*AI);
1287 // Add any parameter attributes.
1288 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1289 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1294 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1295 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1297 if (NewRetTy->isVoidTy())
1298 Caller->setName(""); // Void type should not have a name.
1300 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1304 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1305 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
1306 II->getUnwindDest(), Args);
1308 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1309 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1311 CallInst *CI = cast<CallInst>(Caller);
1312 NC = Builder->CreateCall(Callee, Args);
1314 if (CI->isTailCall())
1315 cast<CallInst>(NC)->setTailCall();
1316 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1317 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1320 // Insert a cast of the return type as necessary.
1322 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1323 if (!NV->getType()->isVoidTy()) {
1324 Instruction::CastOps opcode =
1325 CastInst::getCastOpcode(NC, false, OldRetTy, false);
1326 NV = NC = CastInst::Create(opcode, NC, OldRetTy);
1327 NC->setDebugLoc(Caller->getDebugLoc());
1329 // If this is an invoke instruction, we should insert it after the first
1330 // non-phi, instruction in the normal successor block.
1331 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1332 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
1333 InsertNewInstBefore(NC, *I);
1335 // Otherwise, it's a call, just insert cast right after the call.
1336 InsertNewInstBefore(NC, *Caller);
1338 Worklist.AddUsersToWorkList(*Caller);
1340 NV = UndefValue::get(Caller->getType());
1344 if (!Caller->use_empty())
1345 ReplaceInstUsesWith(*Caller, NV);
1347 EraseInstFromFunction(*Caller);
1351 // transformCallThroughTrampoline - Turn a call to a function created by
1352 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
1353 // underlying function.
1356 InstCombiner::transformCallThroughTrampoline(CallSite CS,
1357 IntrinsicInst *Tramp) {
1358 Value *Callee = CS.getCalledValue();
1359 PointerType *PTy = cast<PointerType>(Callee->getType());
1360 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1361 const AttrListPtr &Attrs = CS.getAttributes();
1363 // If the call already has the 'nest' attribute somewhere then give up -
1364 // otherwise 'nest' would occur twice after splicing in the chain.
1365 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1369 "transformCallThroughTrampoline called with incorrect CallSite.");
1371 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1372 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1373 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1375 const AttrListPtr &NestAttrs = NestF->getAttributes();
1376 if (!NestAttrs.isEmpty()) {
1377 unsigned NestIdx = 1;
1379 Attributes NestAttr = Attribute::None;
1381 // Look for a parameter marked with the 'nest' attribute.
1382 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1383 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1384 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1385 // Record the parameter type and any other attributes.
1387 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1392 Instruction *Caller = CS.getInstruction();
1393 std::vector<Value*> NewArgs;
1394 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1396 SmallVector<AttributeWithIndex, 8> NewAttrs;
1397 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1399 // Insert the nest argument into the call argument list, which may
1400 // mean appending it. Likewise for attributes.
1402 // Add any result attributes.
1403 if (Attributes Attr = Attrs.getRetAttributes())
1404 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1408 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1410 if (Idx == NestIdx) {
1411 // Add the chain argument and attributes.
1412 Value *NestVal = Tramp->getArgOperand(2);
1413 if (NestVal->getType() != NestTy)
1414 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
1415 NewArgs.push_back(NestVal);
1416 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1422 // Add the original argument and attributes.
1423 NewArgs.push_back(*I);
1424 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1426 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1432 // Add any function attributes.
1433 if (Attributes Attr = Attrs.getFnAttributes())
1434 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1436 // The trampoline may have been bitcast to a bogus type (FTy).
1437 // Handle this by synthesizing a new function type, equal to FTy
1438 // with the chain parameter inserted.
1440 std::vector<Type*> NewTypes;
1441 NewTypes.reserve(FTy->getNumParams()+1);
1443 // Insert the chain's type into the list of parameter types, which may
1444 // mean appending it.
1447 FunctionType::param_iterator I = FTy->param_begin(),
1448 E = FTy->param_end();
1452 // Add the chain's type.
1453 NewTypes.push_back(NestTy);
1458 // Add the original type.
1459 NewTypes.push_back(*I);
1465 // Replace the trampoline call with a direct call. Let the generic
1466 // code sort out any function type mismatches.
1467 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1469 Constant *NewCallee =
1470 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1471 NestF : ConstantExpr::getBitCast(NestF,
1472 PointerType::getUnqual(NewFTy));
1473 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1476 Instruction *NewCaller;
1477 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1478 NewCaller = InvokeInst::Create(NewCallee,
1479 II->getNormalDest(), II->getUnwindDest(),
1481 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1482 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1484 NewCaller = CallInst::Create(NewCallee, NewArgs);
1485 if (cast<CallInst>(Caller)->isTailCall())
1486 cast<CallInst>(NewCaller)->setTailCall();
1487 cast<CallInst>(NewCaller)->
1488 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1489 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1496 // Replace the trampoline call with a direct call. Since there is no 'nest'
1497 // parameter, there is no need to adjust the argument list. Let the generic
1498 // code sort out any function type mismatches.
1499 Constant *NewCallee =
1500 NestF->getType() == PTy ? NestF :
1501 ConstantExpr::getBitCast(NestF, PTy);
1502 CS.setCalledFunction(NewCallee);
1503 return CS.getInstruction();