1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the auto-upgrade helper functions
12 //===----------------------------------------------------------------------===//
14 #include "llvm/AutoUpgrade.h"
15 #include "llvm/Constants.h"
16 #include "llvm/Function.h"
17 #include "llvm/LLVMContext.h"
18 #include "llvm/Module.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Support/CallSite.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/IRBuilder.h"
28 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
29 assert(F && "Illegal to upgrade a non-existent Function.");
31 // Get the Function's name.
32 const std::string& Name = F->getName();
35 const FunctionType *FTy = F->getFunctionType();
37 // Quickly eliminate it, if it's not a candidate.
38 if (Name.length() <= 8 || Name[0] != 'l' || Name[1] != 'l' ||
39 Name[2] != 'v' || Name[3] != 'm' || Name[4] != '.')
42 Module *M = F->getParent();
46 // This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss,
47 // and atomics with default address spaces to their new names to their new
48 // function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32)
49 if (Name.compare(5,7,"atomic.",7) == 0) {
50 if (Name.compare(12,3,"lcs",3) == 0) {
51 std::string::size_type delim = Name.find('.',12);
52 F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) +
53 ".p0" + Name.substr(delim+1));
57 else if (Name.compare(12,3,"las",3) == 0) {
58 std::string::size_type delim = Name.find('.',12);
59 F->setName("llvm.atomic.load.add"+Name.substr(delim)
60 + ".p0" + Name.substr(delim+1));
64 else if (Name.compare(12,3,"lss",3) == 0) {
65 std::string::size_type delim = Name.find('.',12);
66 F->setName("llvm.atomic.load.sub"+Name.substr(delim)
67 + ".p0" + Name.substr(delim+1));
71 else if (Name.rfind(".p") == std::string::npos) {
72 // We don't have an address space qualifier so this has be upgraded
73 // to the new name. Copy the type name at the end of the intrinsic
75 std::string::size_type delim = Name.find_last_of('.');
76 assert(delim != std::string::npos && "can not find type");
77 F->setName(Name + ".p0" + Name.substr(delim+1));
84 // This upgrades the name of the llvm.bswap intrinsic function to only use
85 // a single type name for overloading. We only care about the old format
86 // 'llvm.bswap.i*.i*', so check for 'bswap.' and then for there being
87 // a '.' after 'bswap.'
88 if (Name.compare(5,6,"bswap.",6) == 0) {
89 std::string::size_type delim = Name.find('.',11);
91 if (delim != std::string::npos) {
92 // Construct the new name as 'llvm.bswap' + '.i*'
93 F->setName(Name.substr(0,10)+Name.substr(delim));
101 // We only want to fix the 'llvm.ct*' intrinsics which do not have the
102 // correct return type, so we check for the name, and then check if the
103 // return type does not match the parameter type.
104 if ( (Name.compare(5,5,"ctpop",5) == 0 ||
105 Name.compare(5,4,"ctlz",4) == 0 ||
106 Name.compare(5,4,"cttz",4) == 0) &&
107 FTy->getReturnType() != FTy->getParamType(0)) {
108 // We first need to change the name of the old (bad) intrinsic, because
109 // its type is incorrect, but we cannot overload that name. We
110 // arbitrarily unique it here allowing us to construct a correctly named
111 // and typed function below.
114 // Now construct the new intrinsic with the correct name and type. We
115 // leave the old function around in order to query its type, whatever it
116 // may be, and correctly convert up to the new type.
117 NewFn = cast<Function>(M->getOrInsertFunction(Name,
118 FTy->getParamType(0),
119 FTy->getParamType(0),
126 // The old llvm.eh.selector.i32 is equivalent to the new llvm.eh.selector.
127 if (Name.compare("llvm.eh.selector.i32") == 0) {
128 F->setName("llvm.eh.selector");
132 // The old llvm.eh.typeid.for.i32 is equivalent to llvm.eh.typeid.for.
133 if (Name.compare("llvm.eh.typeid.for.i32") == 0) {
134 F->setName("llvm.eh.typeid.for");
138 // Convert the old llvm.eh.selector.i64 to a call to llvm.eh.selector.
139 if (Name.compare("llvm.eh.selector.i64") == 0) {
140 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_selector);
143 // Convert the old llvm.eh.typeid.for.i64 to a call to llvm.eh.typeid.for.
144 if (Name.compare("llvm.eh.typeid.for.i64") == 0) {
145 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_typeid_for);
151 // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the
152 // new format that allows overloading the pointer for different address
153 // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16)
154 const char* NewFnName = NULL;
155 if (Name.compare(5,8,"memcpy.i",8) == 0) {
157 NewFnName = "llvm.memcpy.p0i8.p0i8.i8";
158 else if (Name.compare(13,2,"16") == 0)
159 NewFnName = "llvm.memcpy.p0i8.p0i8.i16";
160 else if (Name.compare(13,2,"32") == 0)
161 NewFnName = "llvm.memcpy.p0i8.p0i8.i32";
162 else if (Name.compare(13,2,"64") == 0)
163 NewFnName = "llvm.memcpy.p0i8.p0i8.i64";
164 } else if (Name.compare(5,9,"memmove.i",9) == 0) {
166 NewFnName = "llvm.memmove.p0i8.p0i8.i8";
167 else if (Name.compare(14,2,"16") == 0)
168 NewFnName = "llvm.memmove.p0i8.p0i8.i16";
169 else if (Name.compare(14,2,"32") == 0)
170 NewFnName = "llvm.memmove.p0i8.p0i8.i32";
171 else if (Name.compare(14,2,"64") == 0)
172 NewFnName = "llvm.memmove.p0i8.p0i8.i64";
174 else if (Name.compare(5,8,"memset.i",8) == 0) {
176 NewFnName = "llvm.memset.p0i8.i8";
177 else if (Name.compare(13,2,"16") == 0)
178 NewFnName = "llvm.memset.p0i8.i16";
179 else if (Name.compare(13,2,"32") == 0)
180 NewFnName = "llvm.memset.p0i8.i32";
181 else if (Name.compare(13,2,"64") == 0)
182 NewFnName = "llvm.memset.p0i8.i64";
185 const FunctionType *FTy = F->getFunctionType();
186 NewFn = cast<Function>(M->getOrInsertFunction(NewFnName,
187 FTy->getReturnType(),
188 FTy->getParamType(0),
189 FTy->getParamType(1),
190 FTy->getParamType(2),
191 FTy->getParamType(3),
192 Type::getInt1Ty(F->getContext()),
199 // This upgrades the llvm.part.select overloaded intrinsic names to only
200 // use one type specifier in the name. We only care about the old format
201 // 'llvm.part.select.i*.i*', and solve as above with bswap.
202 if (Name.compare(5,12,"part.select.",12) == 0) {
203 std::string::size_type delim = Name.find('.',17);
205 if (delim != std::string::npos) {
206 // Construct a new name as 'llvm.part.select' + '.i*'
207 F->setName(Name.substr(0,16)+Name.substr(delim));
214 // This upgrades the llvm.part.set intrinsics similarly as above, however
215 // we care about 'llvm.part.set.i*.i*.i*', but only the first two types
216 // must match. There is an additional type specifier after these two
217 // matching types that we must retain when upgrading. Thus, we require
218 // finding 2 periods, not just one, after the intrinsic name.
219 if (Name.compare(5,9,"part.set.",9) == 0) {
220 std::string::size_type delim = Name.find('.',14);
222 if (delim != std::string::npos &&
223 Name.find('.',delim+1) != std::string::npos) {
224 // Construct a new name as 'llvm.part.select' + '.i*.i*'
225 F->setName(Name.substr(0,13)+Name.substr(delim));
234 // This fixes all MMX shift intrinsic instructions to take a
235 // v1i64 instead of a v2i32 as the second parameter.
236 if (Name.compare(5,10,"x86.mmx.ps",10) == 0 &&
237 (Name.compare(13,4,"psll", 4) == 0 ||
238 Name.compare(13,4,"psra", 4) == 0 ||
239 Name.compare(13,4,"psrl", 4) == 0) && Name[17] != 'i') {
241 const llvm::Type *VT =
242 VectorType::get(IntegerType::get(FTy->getContext(), 64), 1);
244 // We don't have to do anything if the parameter already has
246 if (FTy->getParamType(1) == VT)
249 // We first need to change the name of the old (bad) intrinsic, because
250 // its type is incorrect, but we cannot overload that name. We
251 // arbitrarily unique it here allowing us to construct a correctly named
252 // and typed function below.
255 assert(FTy->getNumParams() == 2 && "MMX shift intrinsics take 2 args!");
257 // Now construct the new intrinsic with the correct name and type. We
258 // leave the old function around in order to query its type, whatever it
259 // may be, and correctly convert up to the new type.
260 NewFn = cast<Function>(M->getOrInsertFunction(Name,
261 FTy->getReturnType(),
262 FTy->getParamType(0),
266 } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
267 Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
268 Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
269 Name.compare(5,15,"x86.sse2.movs.d",15) == 0 ||
270 Name.compare(5,16,"x86.sse2.shuf.pd",16) == 0 ||
271 Name.compare(5,18,"x86.sse2.unpckh.pd",18) == 0 ||
272 Name.compare(5,18,"x86.sse2.unpckl.pd",18) == 0 ||
273 Name.compare(5,20,"x86.sse2.punpckh.qdq",20) == 0 ||
274 Name.compare(5,20,"x86.sse2.punpckl.qdq",20) == 0) {
275 // Calls to these intrinsics are transformed into ShuffleVector's.
278 } else if (Name.compare(5, 16, "x86.sse41.pmulld", 16) == 0) {
279 // Calls to these intrinsics are transformed into vector multiplies.
282 } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
283 Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
284 // Calls to these intrinsics are transformed into vector shuffles, shifts,
293 // This may not belong here. This function is effectively being overloaded
294 // to both detect an intrinsic which needs upgrading, and to provide the
295 // upgraded form of the intrinsic. We should perhaps have two separate
296 // functions for this.
300 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
302 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
304 // Upgrade intrinsic attributes. This does not change the function.
307 if (unsigned id = F->getIntrinsicID())
308 F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id));
312 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
313 // upgraded intrinsic. All argument and return casting must be provided in
314 // order to seamlessly integrate with existing context.
315 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
316 Function *F = CI->getCalledFunction();
317 LLVMContext &C = CI->getContext();
318 ImmutableCallSite CS(CI);
320 assert(F && "CallInst has no function associated with it.");
323 bool isLoadH = false, isLoadL = false, isMovL = false;
324 bool isMovSD = false, isShufPD = false;
325 bool isUnpckhPD = false, isUnpcklPD = false;
326 bool isPunpckhQPD = false, isPunpcklQPD = false;
327 if (F->getName() == "llvm.x86.sse2.loadh.pd")
329 else if (F->getName() == "llvm.x86.sse2.loadl.pd")
331 else if (F->getName() == "llvm.x86.sse2.movl.dq")
333 else if (F->getName() == "llvm.x86.sse2.movs.d")
335 else if (F->getName() == "llvm.x86.sse2.shuf.pd")
337 else if (F->getName() == "llvm.x86.sse2.unpckh.pd")
339 else if (F->getName() == "llvm.x86.sse2.unpckl.pd")
341 else if (F->getName() == "llvm.x86.sse2.punpckh.qdq")
343 else if (F->getName() == "llvm.x86.sse2.punpckl.qdq")
346 if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD ||
347 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
348 std::vector<Constant*> Idxs;
349 Value *Op0 = CI->getArgOperand(0);
350 ShuffleVectorInst *SI = NULL;
351 if (isLoadH || isLoadL) {
352 Value *Op1 = UndefValue::get(Op0->getType());
353 Value *Addr = new BitCastInst(CI->getArgOperand(1),
354 Type::getDoublePtrTy(C),
356 Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI);
357 Value *Idx = ConstantInt::get(Type::getInt32Ty(C), 0);
358 Op1 = InsertElementInst::Create(Op1, Load, Idx, "upgraded.", CI);
361 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
362 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
364 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
365 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
367 Value *Mask = ConstantVector::get(Idxs);
368 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
370 Constant *Zero = ConstantInt::get(Type::getInt32Ty(C), 0);
371 Idxs.push_back(Zero);
372 Idxs.push_back(Zero);
373 Idxs.push_back(Zero);
374 Idxs.push_back(Zero);
375 Value *ZeroV = ConstantVector::get(Idxs);
378 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 4));
379 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 5));
380 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
381 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
382 Value *Mask = ConstantVector::get(Idxs);
383 SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI);
384 } else if (isMovSD ||
385 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
386 Value *Op1 = CI->getArgOperand(1);
388 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
389 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
390 } else if (isUnpckhPD || isPunpckhQPD) {
391 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
392 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
394 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
395 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
397 Value *Mask = ConstantVector::get(Idxs);
398 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
399 } else if (isShufPD) {
400 Value *Op1 = CI->getArgOperand(1);
401 unsigned MaskVal = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
402 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1));
403 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C),
404 ((MaskVal >> 1) & 1)+2));
405 Value *Mask = ConstantVector::get(Idxs);
406 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
409 assert(SI && "Unexpected!");
411 // Handle any uses of the old CallInst.
412 if (!CI->use_empty())
413 // Replace all uses of the old call with the new cast which has the
415 CI->replaceAllUsesWith(SI);
417 // Clean up the old call now that it has been completely upgraded.
418 CI->eraseFromParent();
419 } else if (F->getName() == "llvm.x86.sse41.pmulld") {
420 // Upgrade this set of intrinsics into vector multiplies.
421 Instruction *Mul = BinaryOperator::CreateMul(CI->getArgOperand(0),
422 CI->getArgOperand(1),
425 // Fix up all the uses with our new multiply.
426 if (!CI->use_empty())
427 CI->replaceAllUsesWith(Mul);
429 // Remove upgraded multiply.
430 CI->eraseFromParent();
431 } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
432 Value *Op1 = CI->getArgOperand(0);
433 Value *Op2 = CI->getArgOperand(1);
434 Value *Op3 = CI->getArgOperand(2);
435 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
437 IRBuilder<> Builder(C);
438 Builder.SetInsertPoint(CI->getParent(), CI);
440 // If palignr is shifting the pair of input vectors less than 9 bytes,
441 // emit a shuffle instruction.
443 const Type *IntTy = Type::getInt32Ty(C);
444 const Type *EltTy = Type::getInt8Ty(C);
445 const Type *VecTy = VectorType::get(EltTy, 8);
447 Op2 = Builder.CreateBitCast(Op2, VecTy);
448 Op1 = Builder.CreateBitCast(Op1, VecTy);
450 llvm::SmallVector<llvm::Constant*, 8> Indices;
451 for (unsigned i = 0; i != 8; ++i)
452 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
454 Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
455 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
456 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
459 // If palignr is shifting the pair of input vectors more than 8 but less
460 // than 16 bytes, emit a logical right shift of the destination.
461 else if (shiftVal < 16) {
462 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
463 const Type *EltTy = Type::getInt64Ty(C);
464 const Type *VecTy = VectorType::get(EltTy, 1);
466 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
467 Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
469 // create i32 constant
471 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
472 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
475 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
477 Rep = Constant::getNullValue(F->getReturnType());
480 // Replace any uses with our new instruction.
481 if (!CI->use_empty())
482 CI->replaceAllUsesWith(Rep);
484 // Remove upgraded instruction.
485 CI->eraseFromParent();
487 } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
488 Value *Op1 = CI->getArgOperand(0);
489 Value *Op2 = CI->getArgOperand(1);
490 Value *Op3 = CI->getArgOperand(2);
491 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
493 IRBuilder<> Builder(C);
494 Builder.SetInsertPoint(CI->getParent(), CI);
496 // If palignr is shifting the pair of input vectors less than 17 bytes,
497 // emit a shuffle instruction.
498 if (shiftVal <= 16) {
499 const Type *IntTy = Type::getInt32Ty(C);
500 const Type *EltTy = Type::getInt8Ty(C);
501 const Type *VecTy = VectorType::get(EltTy, 16);
503 Op2 = Builder.CreateBitCast(Op2, VecTy);
504 Op1 = Builder.CreateBitCast(Op1, VecTy);
506 llvm::SmallVector<llvm::Constant*, 16> Indices;
507 for (unsigned i = 0; i != 16; ++i)
508 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
510 Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
511 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
512 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
515 // If palignr is shifting the pair of input vectors more than 16 but less
516 // than 32 bytes, emit a logical right shift of the destination.
517 else if (shiftVal < 32) {
518 const Type *EltTy = Type::getInt64Ty(C);
519 const Type *VecTy = VectorType::get(EltTy, 2);
520 const Type *IntTy = Type::getInt32Ty(C);
522 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
523 Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
525 // create i32 constant
527 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
528 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
531 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
533 Rep = Constant::getNullValue(F->getReturnType());
536 // Replace any uses with our new instruction.
537 if (!CI->use_empty())
538 CI->replaceAllUsesWith(Rep);
540 // Remove upgraded instruction.
541 CI->eraseFromParent();
544 llvm_unreachable("Unknown function for CallInst upgrade.");
549 switch (NewFn->getIntrinsicID()) {
550 default: llvm_unreachable("Unknown function for CallInst upgrade.");
551 case Intrinsic::x86_mmx_psll_d:
552 case Intrinsic::x86_mmx_psll_q:
553 case Intrinsic::x86_mmx_psll_w:
554 case Intrinsic::x86_mmx_psra_d:
555 case Intrinsic::x86_mmx_psra_w:
556 case Intrinsic::x86_mmx_psrl_d:
557 case Intrinsic::x86_mmx_psrl_q:
558 case Intrinsic::x86_mmx_psrl_w: {
561 Operands[0] = CI->getArgOperand(0);
563 // Cast the second parameter to the correct type.
564 BitCastInst *BC = new BitCastInst(CI->getArgOperand(1),
565 NewFn->getFunctionType()->getParamType(1),
569 // Construct a new CallInst
570 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+2,
571 "upgraded."+CI->getName(), CI);
572 NewCI->setTailCall(CI->isTailCall());
573 NewCI->setCallingConv(CI->getCallingConv());
575 // Handle any uses of the old CallInst.
576 if (!CI->use_empty())
577 // Replace all uses of the old call with the new cast which has the
579 CI->replaceAllUsesWith(NewCI);
581 // Clean up the old call now that it has been completely upgraded.
582 CI->eraseFromParent();
585 case Intrinsic::ctlz:
586 case Intrinsic::ctpop:
587 case Intrinsic::cttz: {
588 // Build a small vector of the original arguments.
589 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
591 // Construct a new CallInst
592 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
593 "upgraded."+CI->getName(), CI);
594 NewCI->setTailCall(CI->isTailCall());
595 NewCI->setCallingConv(CI->getCallingConv());
597 // Handle any uses of the old CallInst.
598 if (!CI->use_empty()) {
599 // Check for sign extend parameter attributes on the return values.
600 bool SrcSExt = NewFn->getAttributes().paramHasAttr(0, Attribute::SExt);
601 bool DestSExt = F->getAttributes().paramHasAttr(0, Attribute::SExt);
603 // Construct an appropriate cast from the new return type to the old.
604 CastInst *RetCast = CastInst::Create(
605 CastInst::getCastOpcode(NewCI, SrcSExt,
608 NewCI, F->getReturnType(),
609 NewCI->getName(), CI);
610 NewCI->moveBefore(RetCast);
612 // Replace all uses of the old call with the new cast which has the
614 CI->replaceAllUsesWith(RetCast);
617 // Clean up the old call now that it has been completely upgraded.
618 CI->eraseFromParent();
621 case Intrinsic::eh_selector:
622 case Intrinsic::eh_typeid_for: {
623 // Only the return type changed.
624 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
625 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
626 "upgraded." + CI->getName(), CI);
627 NewCI->setTailCall(CI->isTailCall());
628 NewCI->setCallingConv(CI->getCallingConv());
630 // Handle any uses of the old CallInst.
631 if (!CI->use_empty()) {
632 // Construct an appropriate cast from the new return type to the old.
634 CastInst::Create(CastInst::getCastOpcode(NewCI, true,
635 F->getReturnType(), true),
636 NewCI, F->getReturnType(), NewCI->getName(), CI);
637 CI->replaceAllUsesWith(RetCast);
639 CI->eraseFromParent();
642 case Intrinsic::memcpy:
643 case Intrinsic::memmove:
644 case Intrinsic::memset: {
646 const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
647 Value *Operands[5] = { CI->getArgOperand(0), CI->getArgOperand(1),
648 CI->getArgOperand(2), CI->getArgOperand(3),
649 llvm::ConstantInt::get(I1Ty, 0) };
650 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
652 NewCI->setTailCall(CI->isTailCall());
653 NewCI->setCallingConv(CI->getCallingConv());
654 // Handle any uses of the old CallInst.
655 if (!CI->use_empty())
656 // Replace all uses of the old call with the new cast which has the
658 CI->replaceAllUsesWith(NewCI);
660 // Clean up the old call now that it has been completely upgraded.
661 CI->eraseFromParent();
667 // This tests each Function to determine if it needs upgrading. When we find
668 // one we are interested in, we then upgrade all calls to reflect the new
670 void llvm::UpgradeCallsToIntrinsic(Function* F) {
671 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
673 // Upgrade the function and check if it is a totaly new function.
675 if (UpgradeIntrinsicFunction(F, NewFn)) {
677 // Replace all uses to the old function with the new one if necessary.
678 for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
680 if (CallInst* CI = dyn_cast<CallInst>(*UI++))
681 UpgradeIntrinsicCall(CI, NewFn);
683 // Remove old function, no longer used, from the module.
684 F->eraseFromParent();
689 /// This function strips all debug info intrinsics, except for llvm.dbg.declare.
690 /// If an llvm.dbg.declare intrinsic is invalid, then this function simply
692 void llvm::CheckDebugInfoIntrinsics(Module *M) {
695 if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) {
696 while (!FuncStart->use_empty()) {
697 CallInst *CI = cast<CallInst>(FuncStart->use_back());
698 CI->eraseFromParent();
700 FuncStart->eraseFromParent();
703 if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) {
704 while (!StopPoint->use_empty()) {
705 CallInst *CI = cast<CallInst>(StopPoint->use_back());
706 CI->eraseFromParent();
708 StopPoint->eraseFromParent();
711 if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) {
712 while (!RegionStart->use_empty()) {
713 CallInst *CI = cast<CallInst>(RegionStart->use_back());
714 CI->eraseFromParent();
716 RegionStart->eraseFromParent();
719 if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) {
720 while (!RegionEnd->use_empty()) {
721 CallInst *CI = cast<CallInst>(RegionEnd->use_back());
722 CI->eraseFromParent();
724 RegionEnd->eraseFromParent();
727 if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
728 if (!Declare->use_empty()) {
729 DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
730 if (!isa<MDNode>(DDI->getArgOperand(0)) ||
731 !isa<MDNode>(DDI->getArgOperand(1))) {
732 while (!Declare->use_empty()) {
733 CallInst *CI = cast<CallInst>(Declare->use_back());
734 CI->eraseFromParent();
736 Declare->eraseFromParent();