1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the auto-upgrade helper functions
12 //===----------------------------------------------------------------------===//
14 #include "llvm/AutoUpgrade.h"
15 #include "llvm/Constants.h"
16 #include "llvm/Function.h"
17 #include "llvm/LLVMContext.h"
18 #include "llvm/Module.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Support/ErrorHandling.h"
22 #include "llvm/Support/IRBuilder.h"
27 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
28 assert(F && "Illegal to upgrade a non-existent Function.");
30 // Get the Function's name.
31 const std::string& Name = F->getName();
34 const FunctionType *FTy = F->getFunctionType();
36 // Quickly eliminate it, if it's not a candidate.
37 if (Name.length() <= 8 || Name[0] != 'l' || Name[1] != 'l' ||
38 Name[2] != 'v' || Name[3] != 'm' || Name[4] != '.')
41 Module *M = F->getParent();
45 // This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss,
46 // and atomics with default address spaces to their new names to their new
47 // function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32)
48 if (Name.compare(5,7,"atomic.",7) == 0) {
49 if (Name.compare(12,3,"lcs",3) == 0) {
50 std::string::size_type delim = Name.find('.',12);
51 F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) +
52 ".p0" + Name.substr(delim+1));
56 else if (Name.compare(12,3,"las",3) == 0) {
57 std::string::size_type delim = Name.find('.',12);
58 F->setName("llvm.atomic.load.add"+Name.substr(delim)
59 + ".p0" + Name.substr(delim+1));
63 else if (Name.compare(12,3,"lss",3) == 0) {
64 std::string::size_type delim = Name.find('.',12);
65 F->setName("llvm.atomic.load.sub"+Name.substr(delim)
66 + ".p0" + Name.substr(delim+1));
70 else if (Name.rfind(".p") == std::string::npos) {
71 // We don't have an address space qualifier so this has be upgraded
72 // to the new name. Copy the type name at the end of the intrinsic
74 std::string::size_type delim = Name.find_last_of('.');
75 assert(delim != std::string::npos && "can not find type");
76 F->setName(Name + ".p0" + Name.substr(delim+1));
83 // This upgrades the name of the llvm.bswap intrinsic function to only use
84 // a single type name for overloading. We only care about the old format
85 // 'llvm.bswap.i*.i*', so check for 'bswap.' and then for there being
86 // a '.' after 'bswap.'
87 if (Name.compare(5,6,"bswap.",6) == 0) {
88 std::string::size_type delim = Name.find('.',11);
90 if (delim != std::string::npos) {
91 // Construct the new name as 'llvm.bswap' + '.i*'
92 F->setName(Name.substr(0,10)+Name.substr(delim));
100 // We only want to fix the 'llvm.ct*' intrinsics which do not have the
101 // correct return type, so we check for the name, and then check if the
102 // return type does not match the parameter type.
103 if ( (Name.compare(5,5,"ctpop",5) == 0 ||
104 Name.compare(5,4,"ctlz",4) == 0 ||
105 Name.compare(5,4,"cttz",4) == 0) &&
106 FTy->getReturnType() != FTy->getParamType(0)) {
107 // We first need to change the name of the old (bad) intrinsic, because
108 // its type is incorrect, but we cannot overload that name. We
109 // arbitrarily unique it here allowing us to construct a correctly named
110 // and typed function below.
113 // Now construct the new intrinsic with the correct name and type. We
114 // leave the old function around in order to query its type, whatever it
115 // may be, and correctly convert up to the new type.
116 NewFn = cast<Function>(M->getOrInsertFunction(Name,
117 FTy->getParamType(0),
118 FTy->getParamType(0),
125 // The old llvm.eh.selector.i32 is equivalent to the new llvm.eh.selector.
126 if (Name.compare("llvm.eh.selector.i32") == 0) {
127 F->setName("llvm.eh.selector");
131 // The old llvm.eh.typeid.for.i32 is equivalent to llvm.eh.typeid.for.
132 if (Name.compare("llvm.eh.typeid.for.i32") == 0) {
133 F->setName("llvm.eh.typeid.for");
137 // Convert the old llvm.eh.selector.i64 to a call to llvm.eh.selector.
138 if (Name.compare("llvm.eh.selector.i64") == 0) {
139 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_selector);
142 // Convert the old llvm.eh.typeid.for.i64 to a call to llvm.eh.typeid.for.
143 if (Name.compare("llvm.eh.typeid.for.i64") == 0) {
144 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_typeid_for);
150 // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the
151 // new format that allows overloading the pointer for different address
152 // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16)
153 const char* NewFnName = NULL;
154 if (Name.compare(5,8,"memcpy.i",8) == 0) {
156 NewFnName = "llvm.memcpy.p0i8.p0i8.i8";
157 else if (Name.compare(13,2,"16") == 0)
158 NewFnName = "llvm.memcpy.p0i8.p0i8.i16";
159 else if (Name.compare(13,2,"32") == 0)
160 NewFnName = "llvm.memcpy.p0i8.p0i8.i32";
161 else if (Name.compare(13,2,"64") == 0)
162 NewFnName = "llvm.memcpy.p0i8.p0i8.i64";
163 } else if (Name.compare(5,9,"memmove.i",9) == 0) {
165 NewFnName = "llvm.memmove.p0i8.p0i8.i8";
166 else if (Name.compare(14,2,"16") == 0)
167 NewFnName = "llvm.memmove.p0i8.p0i8.i16";
168 else if (Name.compare(14,2,"32") == 0)
169 NewFnName = "llvm.memmove.p0i8.p0i8.i32";
170 else if (Name.compare(14,2,"64") == 0)
171 NewFnName = "llvm.memmove.p0i8.p0i8.i64";
173 else if (Name.compare(5,8,"memset.i",8) == 0) {
175 NewFnName = "llvm.memset.p0i8.i8";
176 else if (Name.compare(13,2,"16") == 0)
177 NewFnName = "llvm.memset.p0i8.i16";
178 else if (Name.compare(13,2,"32") == 0)
179 NewFnName = "llvm.memset.p0i8.i32";
180 else if (Name.compare(13,2,"64") == 0)
181 NewFnName = "llvm.memset.p0i8.i64";
184 const FunctionType *FTy = F->getFunctionType();
185 NewFn = cast<Function>(M->getOrInsertFunction(NewFnName,
186 FTy->getReturnType(),
187 FTy->getParamType(0),
188 FTy->getParamType(1),
189 FTy->getParamType(2),
190 FTy->getParamType(3),
191 Type::getInt1Ty(F->getContext()),
198 // This upgrades the llvm.part.select overloaded intrinsic names to only
199 // use one type specifier in the name. We only care about the old format
200 // 'llvm.part.select.i*.i*', and solve as above with bswap.
201 if (Name.compare(5,12,"part.select.",12) == 0) {
202 std::string::size_type delim = Name.find('.',17);
204 if (delim != std::string::npos) {
205 // Construct a new name as 'llvm.part.select' + '.i*'
206 F->setName(Name.substr(0,16)+Name.substr(delim));
213 // This upgrades the llvm.part.set intrinsics similarly as above, however
214 // we care about 'llvm.part.set.i*.i*.i*', but only the first two types
215 // must match. There is an additional type specifier after these two
216 // matching types that we must retain when upgrading. Thus, we require
217 // finding 2 periods, not just one, after the intrinsic name.
218 if (Name.compare(5,9,"part.set.",9) == 0) {
219 std::string::size_type delim = Name.find('.',14);
221 if (delim != std::string::npos &&
222 Name.find('.',delim+1) != std::string::npos) {
223 // Construct a new name as 'llvm.part.select' + '.i*.i*'
224 F->setName(Name.substr(0,13)+Name.substr(delim));
233 // This fixes all MMX shift intrinsic instructions to take a
234 // v1i64 instead of a v2i32 as the second parameter.
235 if (Name.compare(5,10,"x86.mmx.ps",10) == 0 &&
236 (Name.compare(13,4,"psll", 4) == 0 ||
237 Name.compare(13,4,"psra", 4) == 0 ||
238 Name.compare(13,4,"psrl", 4) == 0) && Name[17] != 'i') {
240 const llvm::Type *VT =
241 VectorType::get(IntegerType::get(FTy->getContext(), 64), 1);
243 // We don't have to do anything if the parameter already has
245 if (FTy->getParamType(1) == VT)
248 // We first need to change the name of the old (bad) intrinsic, because
249 // its type is incorrect, but we cannot overload that name. We
250 // arbitrarily unique it here allowing us to construct a correctly named
251 // and typed function below.
254 assert(FTy->getNumParams() == 2 && "MMX shift intrinsics take 2 args!");
256 // Now construct the new intrinsic with the correct name and type. We
257 // leave the old function around in order to query its type, whatever it
258 // may be, and correctly convert up to the new type.
259 NewFn = cast<Function>(M->getOrInsertFunction(Name,
260 FTy->getReturnType(),
261 FTy->getParamType(0),
265 } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
266 Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
267 Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
268 Name.compare(5,15,"x86.sse2.movs.d",15) == 0 ||
269 Name.compare(5,16,"x86.sse2.shuf.pd",16) == 0 ||
270 Name.compare(5,18,"x86.sse2.unpckh.pd",18) == 0 ||
271 Name.compare(5,18,"x86.sse2.unpckl.pd",18) == 0 ||
272 Name.compare(5,20,"x86.sse2.punpckh.qdq",20) == 0 ||
273 Name.compare(5,20,"x86.sse2.punpckl.qdq",20) == 0) {
274 // Calls to these intrinsics are transformed into ShuffleVector's.
277 } else if (Name.compare(5, 16, "x86.sse41.pmulld", 16) == 0) {
278 // Calls to these intrinsics are transformed into vector multiplies.
281 } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
282 Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
283 // Calls to these intrinsics are transformed into vector shuffles, shifts,
292 // This may not belong here. This function is effectively being overloaded
293 // to both detect an intrinsic which needs upgrading, and to provide the
294 // upgraded form of the intrinsic. We should perhaps have two separate
295 // functions for this.
299 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
301 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
303 // Upgrade intrinsic attributes. This does not change the function.
306 if (unsigned id = F->getIntrinsicID())
307 F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id));
311 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
312 // upgraded intrinsic. All argument and return casting must be provided in
313 // order to seamlessly integrate with existing context.
314 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
315 Function *F = CI->getCalledFunction();
316 LLVMContext &C = CI->getContext();
318 assert(F && "CallInst has no function associated with it.");
321 bool isLoadH = false, isLoadL = false, isMovL = false;
322 bool isMovSD = false, isShufPD = false;
323 bool isUnpckhPD = false, isUnpcklPD = false;
324 bool isPunpckhQPD = false, isPunpcklQPD = false;
325 if (F->getName() == "llvm.x86.sse2.loadh.pd")
327 else if (F->getName() == "llvm.x86.sse2.loadl.pd")
329 else if (F->getName() == "llvm.x86.sse2.movl.dq")
331 else if (F->getName() == "llvm.x86.sse2.movs.d")
333 else if (F->getName() == "llvm.x86.sse2.shuf.pd")
335 else if (F->getName() == "llvm.x86.sse2.unpckh.pd")
337 else if (F->getName() == "llvm.x86.sse2.unpckl.pd")
339 else if (F->getName() == "llvm.x86.sse2.punpckh.qdq")
341 else if (F->getName() == "llvm.x86.sse2.punpckl.qdq")
344 if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD ||
345 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
346 std::vector<Constant*> Idxs;
347 Value *Op0 = CI->getOperand(1);
348 ShuffleVectorInst *SI = NULL;
349 if (isLoadH || isLoadL) {
350 Value *Op1 = UndefValue::get(Op0->getType());
351 Value *Addr = new BitCastInst(CI->getOperand(2),
352 Type::getDoublePtrTy(C),
354 Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI);
355 Value *Idx = ConstantInt::get(Type::getInt32Ty(C), 0);
356 Op1 = InsertElementInst::Create(Op1, Load, Idx, "upgraded.", CI);
359 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
360 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
362 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
363 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
365 Value *Mask = ConstantVector::get(Idxs);
366 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
368 Constant *Zero = ConstantInt::get(Type::getInt32Ty(C), 0);
369 Idxs.push_back(Zero);
370 Idxs.push_back(Zero);
371 Idxs.push_back(Zero);
372 Idxs.push_back(Zero);
373 Value *ZeroV = ConstantVector::get(Idxs);
376 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 4));
377 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 5));
378 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
379 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
380 Value *Mask = ConstantVector::get(Idxs);
381 SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI);
382 } else if (isMovSD ||
383 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
384 Value *Op1 = CI->getOperand(2);
386 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
387 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
388 } else if (isUnpckhPD || isPunpckhQPD) {
389 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
390 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
392 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
393 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
395 Value *Mask = ConstantVector::get(Idxs);
396 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
397 } else if (isShufPD) {
398 Value *Op1 = CI->getOperand(2);
399 unsigned MaskVal = cast<ConstantInt>(CI->getOperand(3))->getZExtValue();
400 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1));
401 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C),
402 ((MaskVal >> 1) & 1)+2));
403 Value *Mask = ConstantVector::get(Idxs);
404 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
407 assert(SI && "Unexpected!");
409 // Handle any uses of the old CallInst.
410 if (!CI->use_empty())
411 // Replace all uses of the old call with the new cast which has the
413 CI->replaceAllUsesWith(SI);
415 // Clean up the old call now that it has been completely upgraded.
416 CI->eraseFromParent();
417 } else if (F->getName() == "llvm.x86.sse41.pmulld") {
418 // Upgrade this set of intrinsics into vector multiplies.
419 Instruction *Mul = BinaryOperator::CreateMul(CI->getOperand(1),
423 // Fix up all the uses with our new multiply.
424 if (!CI->use_empty())
425 CI->replaceAllUsesWith(Mul);
427 // Remove upgraded multiply.
428 CI->eraseFromParent();
429 } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
430 Value *Op1 = CI->getOperand(1);
431 Value *Op2 = CI->getOperand(2);
432 Value *Op3 = CI->getOperand(3);
433 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
435 IRBuilder<> Builder(C);
436 Builder.SetInsertPoint(CI->getParent(), CI);
438 // If palignr is shifting the pair of input vectors less than 9 bytes,
439 // emit a shuffle instruction.
441 const Type *IntTy = Type::getInt32Ty(C);
442 const Type *EltTy = Type::getInt8Ty(C);
443 const Type *VecTy = VectorType::get(EltTy, 8);
445 Op2 = Builder.CreateBitCast(Op2, VecTy);
446 Op1 = Builder.CreateBitCast(Op1, VecTy);
448 llvm::SmallVector<llvm::Constant*, 8> Indices;
449 for (unsigned i = 0; i != 8; ++i)
450 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
452 Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
453 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
454 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
457 // If palignr is shifting the pair of input vectors more than 8 but less
458 // than 16 bytes, emit a logical right shift of the destination.
459 else if (shiftVal < 16) {
460 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
461 const Type *EltTy = Type::getInt64Ty(C);
462 const Type *VecTy = VectorType::get(EltTy, 1);
464 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
465 Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
467 // create i32 constant
469 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
470 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
473 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
475 Rep = Constant::getNullValue(F->getReturnType());
478 // Replace any uses with our new instruction.
479 if (!CI->use_empty())
480 CI->replaceAllUsesWith(Rep);
482 // Remove upgraded instruction.
483 CI->eraseFromParent();
485 } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
486 Value *Op1 = CI->getOperand(1);
487 Value *Op2 = CI->getOperand(2);
488 Value *Op3 = CI->getOperand(3);
489 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
491 IRBuilder<> Builder(C);
492 Builder.SetInsertPoint(CI->getParent(), CI);
494 // If palignr is shifting the pair of input vectors less than 17 bytes,
495 // emit a shuffle instruction.
496 if (shiftVal <= 16) {
497 const Type *IntTy = Type::getInt32Ty(C);
498 const Type *EltTy = Type::getInt8Ty(C);
499 const Type *VecTy = VectorType::get(EltTy, 16);
501 Op2 = Builder.CreateBitCast(Op2, VecTy);
502 Op1 = Builder.CreateBitCast(Op1, VecTy);
504 llvm::SmallVector<llvm::Constant*, 16> Indices;
505 for (unsigned i = 0; i != 16; ++i)
506 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
508 Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
509 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
510 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
513 // If palignr is shifting the pair of input vectors more than 16 but less
514 // than 32 bytes, emit a logical right shift of the destination.
515 else if (shiftVal < 32) {
516 const Type *EltTy = Type::getInt64Ty(C);
517 const Type *VecTy = VectorType::get(EltTy, 2);
518 const Type *IntTy = Type::getInt32Ty(C);
520 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
521 Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
523 // create i32 constant
525 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
526 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
529 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
531 Rep = Constant::getNullValue(F->getReturnType());
534 // Replace any uses with our new instruction.
535 if (!CI->use_empty())
536 CI->replaceAllUsesWith(Rep);
538 // Remove upgraded instruction.
539 CI->eraseFromParent();
542 llvm_unreachable("Unknown function for CallInst upgrade.");
547 switch (NewFn->getIntrinsicID()) {
548 default: llvm_unreachable("Unknown function for CallInst upgrade.");
549 case Intrinsic::x86_mmx_psll_d:
550 case Intrinsic::x86_mmx_psll_q:
551 case Intrinsic::x86_mmx_psll_w:
552 case Intrinsic::x86_mmx_psra_d:
553 case Intrinsic::x86_mmx_psra_w:
554 case Intrinsic::x86_mmx_psrl_d:
555 case Intrinsic::x86_mmx_psrl_q:
556 case Intrinsic::x86_mmx_psrl_w: {
559 Operands[0] = CI->getOperand(1);
561 // Cast the second parameter to the correct type.
562 BitCastInst *BC = new BitCastInst(CI->getOperand(2),
563 NewFn->getFunctionType()->getParamType(1),
567 // Construct a new CallInst
568 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+2,
569 "upgraded."+CI->getName(), CI);
570 NewCI->setTailCall(CI->isTailCall());
571 NewCI->setCallingConv(CI->getCallingConv());
573 // Handle any uses of the old CallInst.
574 if (!CI->use_empty())
575 // Replace all uses of the old call with the new cast which has the
577 CI->replaceAllUsesWith(NewCI);
579 // Clean up the old call now that it has been completely upgraded.
580 CI->eraseFromParent();
583 case Intrinsic::ctlz:
584 case Intrinsic::ctpop:
585 case Intrinsic::cttz: {
586 // Build a small vector of the 1..(N-1) operands, which are the
588 SmallVector<Value*, 8> Operands(CI->op_begin()+1, CI->op_end());
590 // Construct a new CallInst
591 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
592 "upgraded."+CI->getName(), CI);
593 NewCI->setTailCall(CI->isTailCall());
594 NewCI->setCallingConv(CI->getCallingConv());
596 // Handle any uses of the old CallInst.
597 if (!CI->use_empty()) {
598 // Check for sign extend parameter attributes on the return values.
599 bool SrcSExt = NewFn->getAttributes().paramHasAttr(0, Attribute::SExt);
600 bool DestSExt = F->getAttributes().paramHasAttr(0, Attribute::SExt);
602 // Construct an appropriate cast from the new return type to the old.
603 CastInst *RetCast = CastInst::Create(
604 CastInst::getCastOpcode(NewCI, SrcSExt,
607 NewCI, F->getReturnType(),
608 NewCI->getName(), CI);
609 NewCI->moveBefore(RetCast);
611 // Replace all uses of the old call with the new cast which has the
613 CI->replaceAllUsesWith(RetCast);
616 // Clean up the old call now that it has been completely upgraded.
617 CI->eraseFromParent();
620 case Intrinsic::eh_selector:
621 case Intrinsic::eh_typeid_for: {
622 // Only the return type changed.
623 SmallVector<Value*, 8> Operands(CI->op_begin() + 1, CI->op_end());
624 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
625 "upgraded." + CI->getName(), CI);
626 NewCI->setTailCall(CI->isTailCall());
627 NewCI->setCallingConv(CI->getCallingConv());
629 // Handle any uses of the old CallInst.
630 if (!CI->use_empty()) {
631 // Construct an appropriate cast from the new return type to the old.
633 CastInst::Create(CastInst::getCastOpcode(NewCI, true,
634 F->getReturnType(), true),
635 NewCI, F->getReturnType(), NewCI->getName(), CI);
636 CI->replaceAllUsesWith(RetCast);
638 CI->eraseFromParent();
641 case Intrinsic::memcpy:
642 case Intrinsic::memmove:
643 case Intrinsic::memset: {
645 const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
646 Value *Operands[5] = { CI->getOperand(1), CI->getOperand(2),
647 CI->getOperand(3), CI->getOperand(4),
648 llvm::ConstantInt::get(I1Ty, 0) };
649 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
651 NewCI->setTailCall(CI->isTailCall());
652 NewCI->setCallingConv(CI->getCallingConv());
653 // Handle any uses of the old CallInst.
654 if (!CI->use_empty())
655 // Replace all uses of the old call with the new cast which has the
657 CI->replaceAllUsesWith(NewCI);
659 // Clean up the old call now that it has been completely upgraded.
660 CI->eraseFromParent();
666 // This tests each Function to determine if it needs upgrading. When we find
667 // one we are interested in, we then upgrade all calls to reflect the new
669 void llvm::UpgradeCallsToIntrinsic(Function* F) {
670 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
672 // Upgrade the function and check if it is a totaly new function.
674 if (UpgradeIntrinsicFunction(F, NewFn)) {
676 // Replace all uses to the old function with the new one if necessary.
677 for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
679 if (CallInst* CI = dyn_cast<CallInst>(*UI++))
680 UpgradeIntrinsicCall(CI, NewFn);
682 // Remove old function, no longer used, from the module.
683 F->eraseFromParent();
688 /// This function strips all debug info intrinsics, except for llvm.dbg.declare.
689 /// If an llvm.dbg.declare intrinsic is invalid, then this function simply
691 void llvm::CheckDebugInfoIntrinsics(Module *M) {
694 if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) {
695 while (!FuncStart->use_empty()) {
696 CallInst *CI = cast<CallInst>(FuncStart->use_back());
697 CI->eraseFromParent();
699 FuncStart->eraseFromParent();
702 if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) {
703 while (!StopPoint->use_empty()) {
704 CallInst *CI = cast<CallInst>(StopPoint->use_back());
705 CI->eraseFromParent();
707 StopPoint->eraseFromParent();
710 if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) {
711 while (!RegionStart->use_empty()) {
712 CallInst *CI = cast<CallInst>(RegionStart->use_back());
713 CI->eraseFromParent();
715 RegionStart->eraseFromParent();
718 if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) {
719 while (!RegionEnd->use_empty()) {
720 CallInst *CI = cast<CallInst>(RegionEnd->use_back());
721 CI->eraseFromParent();
723 RegionEnd->eraseFromParent();
726 if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
727 if (!Declare->use_empty()) {
728 DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
729 if (!isa<MDNode>(DDI->getOperand(1)) ||!isa<MDNode>(DDI->getOperand(2))) {
730 while (!Declare->use_empty()) {
731 CallInst *CI = cast<CallInst>(Declare->use_back());
732 CI->eraseFromParent();
734 Declare->eraseFromParent();