1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the auto-upgrade helper functions
12 //===----------------------------------------------------------------------===//
14 #include "llvm/AutoUpgrade.h"
15 #include "llvm/Constants.h"
16 #include "llvm/Function.h"
17 #include "llvm/LLVMContext.h"
18 #include "llvm/Module.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Support/CallSite.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/IRBuilder.h"
28 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
29 assert(F && "Illegal to upgrade a non-existent Function.");
31 // Get the Function's name.
32 const std::string& Name = F->getName();
35 const FunctionType *FTy = F->getFunctionType();
37 // Quickly eliminate it, if it's not a candidate.
38 if (Name.length() <= 8 || Name[0] != 'l' || Name[1] != 'l' ||
39 Name[2] != 'v' || Name[3] != 'm' || Name[4] != '.')
42 Module *M = F->getParent();
46 // This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss,
47 // and atomics with default address spaces to their new names to their new
48 // function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32)
49 if (Name.compare(5,7,"atomic.",7) == 0) {
50 if (Name.compare(12,3,"lcs",3) == 0) {
51 std::string::size_type delim = Name.find('.',12);
52 F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) +
53 ".p0" + Name.substr(delim+1));
57 else if (Name.compare(12,3,"las",3) == 0) {
58 std::string::size_type delim = Name.find('.',12);
59 F->setName("llvm.atomic.load.add"+Name.substr(delim)
60 + ".p0" + Name.substr(delim+1));
64 else if (Name.compare(12,3,"lss",3) == 0) {
65 std::string::size_type delim = Name.find('.',12);
66 F->setName("llvm.atomic.load.sub"+Name.substr(delim)
67 + ".p0" + Name.substr(delim+1));
71 else if (Name.rfind(".p") == std::string::npos) {
72 // We don't have an address space qualifier so this has be upgraded
73 // to the new name. Copy the type name at the end of the intrinsic
75 std::string::size_type delim = Name.find_last_of('.');
76 assert(delim != std::string::npos && "can not find type");
77 F->setName(Name + ".p0" + Name.substr(delim+1));
81 } else if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
82 if (((Name.compare(14, 5, "vmovl", 5) == 0 ||
83 Name.compare(14, 5, "vaddl", 5) == 0 ||
84 Name.compare(14, 5, "vsubl", 5) == 0 ||
85 Name.compare(14, 5, "vaddw", 5) == 0 ||
86 Name.compare(14, 5, "vsubw", 5) == 0 ||
87 Name.compare(14, 5, "vmull", 5) == 0 ||
88 Name.compare(14, 5, "vmlal", 5) == 0 ||
89 Name.compare(14, 5, "vmlsl", 5) == 0 ||
90 Name.compare(14, 5, "vabdl", 5) == 0 ||
91 Name.compare(14, 5, "vabal", 5) == 0) &&
92 (Name.compare(19, 2, "s.", 2) == 0 ||
93 Name.compare(19, 2, "u.", 2) == 0)) ||
95 (Name.compare(14, 4, "vaba", 4) == 0 &&
96 (Name.compare(18, 2, "s.", 2) == 0 ||
97 Name.compare(18, 2, "u.", 2) == 0)) ||
99 (Name.compare(14, 6, "vmovn.", 6) == 0)) {
101 // Calls to these are transformed into IR without intrinsics.
105 // Old versions of NEON ld/st intrinsics are missing alignment arguments.
106 bool isVLd = (Name.compare(14, 3, "vld", 3) == 0);
107 bool isVSt = (Name.compare(14, 3, "vst", 3) == 0);
108 if (isVLd || isVSt) {
109 unsigned NumVecs = Name.at(17) - '0';
110 if (NumVecs == 0 || NumVecs > 4)
112 bool isLaneOp = (Name.compare(18, 5, "lane.", 5) == 0);
113 if (!isLaneOp && Name.at(18) != '.')
115 unsigned ExpectedArgs = 2; // for the address and alignment
116 if (isVSt || isLaneOp)
117 ExpectedArgs += NumVecs;
119 ExpectedArgs += 1; // for the lane number
120 unsigned NumP = FTy->getNumParams();
121 if (NumP != ExpectedArgs - 1)
124 // Change the name of the old (bad) intrinsic, because
125 // its type is incorrect, but we cannot overload that name.
128 // One argument is missing: add the alignment argument.
129 std::vector<const Type*> NewParams;
130 for (unsigned p = 0; p < NumP; ++p)
131 NewParams.push_back(FTy->getParamType(p));
132 NewParams.push_back(Type::getInt32Ty(F->getContext()));
133 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(),
135 NewFn = cast<Function>(M->getOrInsertFunction(Name, NewFTy));
141 // This upgrades the name of the llvm.bswap intrinsic function to only use
142 // a single type name for overloading. We only care about the old format
143 // 'llvm.bswap.i*.i*', so check for 'bswap.' and then for there being
144 // a '.' after 'bswap.'
145 if (Name.compare(5,6,"bswap.",6) == 0) {
146 std::string::size_type delim = Name.find('.',11);
148 if (delim != std::string::npos) {
149 // Construct the new name as 'llvm.bswap' + '.i*'
150 F->setName(Name.substr(0,10)+Name.substr(delim));
158 // We only want to fix the 'llvm.ct*' intrinsics which do not have the
159 // correct return type, so we check for the name, and then check if the
160 // return type does not match the parameter type.
161 if ( (Name.compare(5,5,"ctpop",5) == 0 ||
162 Name.compare(5,4,"ctlz",4) == 0 ||
163 Name.compare(5,4,"cttz",4) == 0) &&
164 FTy->getReturnType() != FTy->getParamType(0)) {
165 // We first need to change the name of the old (bad) intrinsic, because
166 // its type is incorrect, but we cannot overload that name. We
167 // arbitrarily unique it here allowing us to construct a correctly named
168 // and typed function below.
171 // Now construct the new intrinsic with the correct name and type. We
172 // leave the old function around in order to query its type, whatever it
173 // may be, and correctly convert up to the new type.
174 NewFn = cast<Function>(M->getOrInsertFunction(Name,
175 FTy->getParamType(0),
176 FTy->getParamType(0),
183 // The old llvm.eh.selector.i32 is equivalent to the new llvm.eh.selector.
184 if (Name.compare("llvm.eh.selector.i32") == 0) {
185 F->setName("llvm.eh.selector");
189 // The old llvm.eh.typeid.for.i32 is equivalent to llvm.eh.typeid.for.
190 if (Name.compare("llvm.eh.typeid.for.i32") == 0) {
191 F->setName("llvm.eh.typeid.for");
195 // Convert the old llvm.eh.selector.i64 to a call to llvm.eh.selector.
196 if (Name.compare("llvm.eh.selector.i64") == 0) {
197 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_selector);
200 // Convert the old llvm.eh.typeid.for.i64 to a call to llvm.eh.typeid.for.
201 if (Name.compare("llvm.eh.typeid.for.i64") == 0) {
202 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_typeid_for);
208 // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the
209 // new format that allows overloading the pointer for different address
210 // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16)
211 const char* NewFnName = NULL;
212 if (Name.compare(5,8,"memcpy.i",8) == 0) {
214 NewFnName = "llvm.memcpy.p0i8.p0i8.i8";
215 else if (Name.compare(13,2,"16") == 0)
216 NewFnName = "llvm.memcpy.p0i8.p0i8.i16";
217 else if (Name.compare(13,2,"32") == 0)
218 NewFnName = "llvm.memcpy.p0i8.p0i8.i32";
219 else if (Name.compare(13,2,"64") == 0)
220 NewFnName = "llvm.memcpy.p0i8.p0i8.i64";
221 } else if (Name.compare(5,9,"memmove.i",9) == 0) {
223 NewFnName = "llvm.memmove.p0i8.p0i8.i8";
224 else if (Name.compare(14,2,"16") == 0)
225 NewFnName = "llvm.memmove.p0i8.p0i8.i16";
226 else if (Name.compare(14,2,"32") == 0)
227 NewFnName = "llvm.memmove.p0i8.p0i8.i32";
228 else if (Name.compare(14,2,"64") == 0)
229 NewFnName = "llvm.memmove.p0i8.p0i8.i64";
231 else if (Name.compare(5,8,"memset.i",8) == 0) {
233 NewFnName = "llvm.memset.p0i8.i8";
234 else if (Name.compare(13,2,"16") == 0)
235 NewFnName = "llvm.memset.p0i8.i16";
236 else if (Name.compare(13,2,"32") == 0)
237 NewFnName = "llvm.memset.p0i8.i32";
238 else if (Name.compare(13,2,"64") == 0)
239 NewFnName = "llvm.memset.p0i8.i64";
242 NewFn = cast<Function>(M->getOrInsertFunction(NewFnName,
243 FTy->getReturnType(),
244 FTy->getParamType(0),
245 FTy->getParamType(1),
246 FTy->getParamType(2),
247 FTy->getParamType(3),
248 Type::getInt1Ty(F->getContext()),
255 // This upgrades the llvm.part.select overloaded intrinsic names to only
256 // use one type specifier in the name. We only care about the old format
257 // 'llvm.part.select.i*.i*', and solve as above with bswap.
258 if (Name.compare(5,12,"part.select.",12) == 0) {
259 std::string::size_type delim = Name.find('.',17);
261 if (delim != std::string::npos) {
262 // Construct a new name as 'llvm.part.select' + '.i*'
263 F->setName(Name.substr(0,16)+Name.substr(delim));
270 // This upgrades the llvm.part.set intrinsics similarly as above, however
271 // we care about 'llvm.part.set.i*.i*.i*', but only the first two types
272 // must match. There is an additional type specifier after these two
273 // matching types that we must retain when upgrading. Thus, we require
274 // finding 2 periods, not just one, after the intrinsic name.
275 if (Name.compare(5,9,"part.set.",9) == 0) {
276 std::string::size_type delim = Name.find('.',14);
278 if (delim != std::string::npos &&
279 Name.find('.',delim+1) != std::string::npos) {
280 // Construct a new name as 'llvm.part.select' + '.i*.i*'
281 F->setName(Name.substr(0,13)+Name.substr(delim));
290 // This fixes all MMX shift intrinsic instructions to take a
291 // x86_mmx instead of a v1i64, v2i32, v4i16, or v8i8.
292 if (Name.compare(5, 8, "x86.mmx.", 8) == 0) {
293 const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
295 if (Name.compare(13, 4, "padd", 4) == 0 ||
296 Name.compare(13, 4, "psub", 4) == 0 ||
297 Name.compare(13, 4, "pmul", 4) == 0 ||
298 Name.compare(13, 5, "pmadd", 5) == 0 ||
299 Name.compare(13, 4, "pand", 4) == 0 ||
300 Name.compare(13, 3, "por", 3) == 0 ||
301 Name.compare(13, 4, "pxor", 4) == 0 ||
302 Name.compare(13, 4, "pavg", 4) == 0 ||
303 Name.compare(13, 4, "pmax", 4) == 0 ||
304 Name.compare(13, 4, "pmin", 4) == 0 ||
305 Name.compare(13, 4, "psad", 4) == 0 ||
306 Name.compare(13, 4, "psll", 4) == 0 ||
307 Name.compare(13, 4, "psrl", 4) == 0 ||
308 Name.compare(13, 4, "psra", 4) == 0 ||
309 Name.compare(13, 4, "pack", 4) == 0 ||
310 Name.compare(13, 6, "punpck", 6) == 0 ||
311 Name.compare(13, 4, "pcmp", 4) == 0) {
312 assert(FTy->getNumParams() == 2 && "MMX intrinsic takes 2 args!");
313 const Type *SecondParamTy = X86_MMXTy;
315 if (Name.compare(13, 5, "pslli", 5) == 0 ||
316 Name.compare(13, 5, "psrli", 5) == 0 ||
317 Name.compare(13, 5, "psrai", 5) == 0)
318 SecondParamTy = FTy->getParamType(1);
320 // Don't do anything if it has the correct types.
321 if (FTy->getReturnType() == X86_MMXTy &&
322 FTy->getParamType(0) == X86_MMXTy &&
323 FTy->getParamType(1) == SecondParamTy)
326 // We first need to change the name of the old (bad) intrinsic, because
327 // its type is incorrect, but we cannot overload that name. We
328 // arbitrarily unique it here allowing us to construct a correctly named
329 // and typed function below.
332 // Now construct the new intrinsic with the correct name and type. We
333 // leave the old function around in order to query its type, whatever it
334 // may be, and correctly convert up to the new type.
335 NewFn = cast<Function>(M->getOrInsertFunction(Name,
336 X86_MMXTy, X86_MMXTy,
337 SecondParamTy, (Type*)0));
341 if (Name.compare(13, 8, "maskmovq", 8) == 0) {
342 // Don't do anything if it has the correct types.
343 if (FTy->getParamType(0) == X86_MMXTy &&
344 FTy->getParamType(1) == X86_MMXTy)
348 NewFn = cast<Function>(M->getOrInsertFunction(Name,
349 FTy->getReturnType(),
352 FTy->getParamType(2),
357 if (Name.compare(13, 8, "pmovmskb", 8) == 0) {
358 if (FTy->getParamType(0) == X86_MMXTy)
362 NewFn = cast<Function>(M->getOrInsertFunction(Name,
363 FTy->getReturnType(),
369 if (Name.compare(13, 5, "movnt", 5) == 0) {
370 if (FTy->getParamType(1) == X86_MMXTy)
374 NewFn = cast<Function>(M->getOrInsertFunction(Name,
375 FTy->getReturnType(),
376 FTy->getParamType(0),
382 if (Name.compare(13, 7, "palignr", 7) == 0) {
383 if (FTy->getReturnType() == X86_MMXTy &&
384 FTy->getParamType(0) == X86_MMXTy &&
385 FTy->getParamType(1) == X86_MMXTy)
389 NewFn = cast<Function>(M->getOrInsertFunction(Name,
393 FTy->getParamType(2),
398 if (Name.compare(13, 5, "pextr", 5) == 0) {
399 if (FTy->getParamType(0) == X86_MMXTy)
403 NewFn = cast<Function>(M->getOrInsertFunction(Name,
404 FTy->getReturnType(),
406 FTy->getParamType(1),
411 if (Name.compare(13, 5, "pinsr", 5) == 0) {
412 if (FTy->getReturnType() == X86_MMXTy &&
413 FTy->getParamType(0) == X86_MMXTy)
417 NewFn = cast<Function>(M->getOrInsertFunction(Name,
420 FTy->getParamType(1),
421 FTy->getParamType(2),
426 if (Name.compare(13, 12, "cvtsi32.si64", 12) == 0) {
427 if (FTy->getReturnType() == X86_MMXTy)
431 NewFn = cast<Function>(M->getOrInsertFunction(Name,
433 FTy->getParamType(0),
438 if (Name.compare(13, 12, "cvtsi64.si32", 12) == 0) {
439 if (FTy->getParamType(0) == X86_MMXTy)
443 NewFn = cast<Function>(M->getOrInsertFunction(Name,
444 FTy->getReturnType(),
450 if (Name.compare(13, 8, "vec.init", 8) == 0) {
451 if (FTy->getReturnType() == X86_MMXTy)
456 if (Name.compare(21, 2, ".b", 2) == 0)
457 NewFn = cast<Function>(M->getOrInsertFunction(Name,
459 FTy->getParamType(0),
460 FTy->getParamType(1),
461 FTy->getParamType(2),
462 FTy->getParamType(3),
463 FTy->getParamType(4),
464 FTy->getParamType(5),
465 FTy->getParamType(6),
466 FTy->getParamType(7),
468 else if (Name.compare(21, 2, ".w", 2) == 0)
469 NewFn = cast<Function>(M->getOrInsertFunction(Name,
471 FTy->getParamType(0),
472 FTy->getParamType(1),
473 FTy->getParamType(2),
474 FTy->getParamType(3),
476 else if (Name.compare(21, 2, ".d", 2) == 0)
477 NewFn = cast<Function>(M->getOrInsertFunction(Name,
479 FTy->getParamType(0),
480 FTy->getParamType(1),
486 if (Name.compare(13, 9, "vec.ext.d", 9) == 0) {
487 if (FTy->getReturnType() == X86_MMXTy &&
488 FTy->getParamType(0) == X86_MMXTy)
492 NewFn = cast<Function>(M->getOrInsertFunction(Name,
495 FTy->getParamType(1),
500 if (Name.compare(13, 9, "emms", 4) == 0 ||
501 Name.compare(13, 9, "femms", 5) == 0) {
506 // We really shouldn't get here ever.
507 assert(0 && "Invalid MMX intrinsic!");
509 } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
510 Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
511 Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
512 Name.compare(5,15,"x86.sse2.movs.d",15) == 0 ||
513 Name.compare(5,16,"x86.sse2.shuf.pd",16) == 0 ||
514 Name.compare(5,18,"x86.sse2.unpckh.pd",18) == 0 ||
515 Name.compare(5,18,"x86.sse2.unpckl.pd",18) == 0 ||
516 Name.compare(5,20,"x86.sse2.punpckh.qdq",20) == 0 ||
517 Name.compare(5,20,"x86.sse2.punpckl.qdq",20) == 0) {
518 // Calls to these intrinsics are transformed into ShuffleVector's.
521 } else if (Name.compare(5, 16, "x86.sse41.pmulld", 16) == 0) {
522 // Calls to these intrinsics are transformed into vector multiplies.
525 } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
526 Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
527 // Calls to these intrinsics are transformed into vector shuffles, shifts,
536 // This may not belong here. This function is effectively being overloaded
537 // to both detect an intrinsic which needs upgrading, and to provide the
538 // upgraded form of the intrinsic. We should perhaps have two separate
539 // functions for this.
543 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
545 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
547 // Upgrade intrinsic attributes. This does not change the function.
550 if (unsigned id = F->getIntrinsicID())
551 F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id));
555 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
556 StringRef Name(GV->getName());
558 // We are only upgrading one symbol here.
559 if (Name == ".llvm.eh.catch.all.value") {
560 GV->setName("llvm.eh.catch.all.value");
567 /// ExtendNEONArgs - For NEON "long" and "wide" operations, where the results
568 /// have vector elements twice as big as one or both source operands, do the
569 /// sign- or zero-extension that used to be handled by intrinsics. The
570 /// extended values are returned via V0 and V1.
571 static void ExtendNEONArgs(CallInst *CI, Value *Arg0, Value *Arg1,
572 Value *&V0, Value *&V1) {
573 Function *F = CI->getCalledFunction();
574 const std::string& Name = F->getName();
575 bool isLong = (Name.at(18) == 'l');
576 bool isSigned = (Name.at(19) == 's');
580 V0 = new SExtInst(Arg0, CI->getType(), "", CI);
583 V1 = new SExtInst(Arg1, CI->getType(), "", CI);
586 V0 = new ZExtInst(Arg0, CI->getType(), "", CI);
589 V1 = new ZExtInst(Arg1, CI->getType(), "", CI);
593 /// CallVABD - As part of expanding a call to one of the old NEON vabdl, vaba,
594 /// or vabal intrinsics, construct a call to a vabd intrinsic. Examine the
595 /// name of the old intrinsic to determine whether to use a signed or unsigned
596 /// vabd intrinsic. Get the type from the old call instruction, adjusted for
597 /// half-size vector elements if the old intrinsic was vabdl or vabal.
598 static Instruction *CallVABD(CallInst *CI, Value *Arg0, Value *Arg1) {
599 Function *F = CI->getCalledFunction();
600 const std::string& Name = F->getName();
601 bool isLong = (Name.at(18) == 'l');
602 bool isSigned = (Name.at(isLong ? 19 : 18) == 's');
606 intID = Intrinsic::arm_neon_vabds;
608 intID = Intrinsic::arm_neon_vabdu;
610 const Type *Ty = CI->getType();
612 Ty = VectorType::getTruncatedElementVectorType(cast<const VectorType>(Ty));
614 Function *VABD = Intrinsic::getDeclaration(F->getParent(), intID, &Ty, 1);
618 return CallInst::Create(VABD, Operands, Operands+2,
619 "upgraded."+CI->getName(), CI);
622 /// ConstructNewCallInst - Construct a new CallInst with the signature of NewFn.
623 static void ConstructNewCallInst(Function *NewFn, CallInst *OldCI,
624 Value **Operands, unsigned NumOps,
625 bool AssignName = true) {
626 // Construct a new CallInst.
628 CallInst::Create(NewFn, Operands, Operands + NumOps,
629 AssignName ? "upgraded." + OldCI->getName() : "", OldCI);
631 NewCI->setTailCall(OldCI->isTailCall());
632 NewCI->setCallingConv(OldCI->getCallingConv());
634 // Handle any uses of the old CallInst.
635 if (!OldCI->use_empty()) {
636 // If the type has changed, add a cast.
637 Instruction *I = OldCI;
638 if (OldCI->getType() != NewCI->getType()) {
639 Function *OldFn = OldCI->getCalledFunction();
641 CastInst::Create(CastInst::getCastOpcode(NewCI, true,
642 OldFn->getReturnType(), true),
643 NewCI, OldFn->getReturnType(), NewCI->getName(),OldCI);
646 // Replace all uses of the old call with the new cast which has the
648 OldCI->replaceAllUsesWith(I);
650 // Clean up the old call now that it has been completely upgraded.
651 OldCI->eraseFromParent();
654 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
655 // upgraded intrinsic. All argument and return casting must be provided in
656 // order to seamlessly integrate with existing context.
657 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
658 Function *F = CI->getCalledFunction();
659 LLVMContext &C = CI->getContext();
660 ImmutableCallSite CS(CI);
662 assert(F && "CallInst has no function associated with it.");
665 // Get the Function's name.
666 const std::string& Name = F->getName();
668 // Upgrade ARM NEON intrinsics.
669 if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
672 if (Name.compare(14, 7, "vmovls.", 7) == 0) {
673 NewI = new SExtInst(CI->getArgOperand(0), CI->getType(),
674 "upgraded." + CI->getName(), CI);
675 } else if (Name.compare(14, 7, "vmovlu.", 7) == 0) {
676 NewI = new ZExtInst(CI->getArgOperand(0), CI->getType(),
677 "upgraded." + CI->getName(), CI);
678 } else if (Name.compare(14, 4, "vadd", 4) == 0) {
679 ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
680 NewI = BinaryOperator::CreateAdd(V0, V1, "upgraded."+CI->getName(), CI);
681 } else if (Name.compare(14, 4, "vsub", 4) == 0) {
682 ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
683 NewI = BinaryOperator::CreateSub(V0, V1,"upgraded."+CI->getName(),CI);
684 } else if (Name.compare(14, 4, "vmul", 4) == 0) {
685 ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
686 NewI = BinaryOperator::CreateMul(V0, V1,"upgraded."+CI->getName(),CI);
687 } else if (Name.compare(14, 4, "vmla", 4) == 0) {
688 ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
689 Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
690 NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), MulI,
691 "upgraded."+CI->getName(), CI);
692 } else if (Name.compare(14, 4, "vmls", 4) == 0) {
693 ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
694 Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
695 NewI = BinaryOperator::CreateSub(CI->getArgOperand(0), MulI,
696 "upgraded."+CI->getName(), CI);
697 } else if (Name.compare(14, 4, "vabd", 4) == 0) {
698 NewI = CallVABD(CI, CI->getArgOperand(0), CI->getArgOperand(1));
699 NewI = new ZExtInst(NewI, CI->getType(), "upgraded."+CI->getName(), CI);
700 } else if (Name.compare(14, 4, "vaba", 4) == 0) {
701 NewI = CallVABD(CI, CI->getArgOperand(1), CI->getArgOperand(2));
702 if (Name.at(18) == 'l')
703 NewI = new ZExtInst(NewI, CI->getType(), "", CI);
704 NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), NewI,
705 "upgraded."+CI->getName(), CI);
706 } else if (Name.compare(14, 6, "vmovn.", 6) == 0) {
707 NewI = new TruncInst(CI->getArgOperand(0), CI->getType(),
708 "upgraded." + CI->getName(), CI);
710 llvm_unreachable("Unknown arm.neon function for CallInst upgrade.");
712 // Replace any uses of the old CallInst.
713 if (!CI->use_empty())
714 CI->replaceAllUsesWith(NewI);
715 CI->eraseFromParent();
719 bool isLoadH = false, isLoadL = false, isMovL = false;
720 bool isMovSD = false, isShufPD = false;
721 bool isUnpckhPD = false, isUnpcklPD = false;
722 bool isPunpckhQPD = false, isPunpcklQPD = false;
723 if (F->getName() == "llvm.x86.sse2.loadh.pd")
725 else if (F->getName() == "llvm.x86.sse2.loadl.pd")
727 else if (F->getName() == "llvm.x86.sse2.movl.dq")
729 else if (F->getName() == "llvm.x86.sse2.movs.d")
731 else if (F->getName() == "llvm.x86.sse2.shuf.pd")
733 else if (F->getName() == "llvm.x86.sse2.unpckh.pd")
735 else if (F->getName() == "llvm.x86.sse2.unpckl.pd")
737 else if (F->getName() == "llvm.x86.sse2.punpckh.qdq")
739 else if (F->getName() == "llvm.x86.sse2.punpckl.qdq")
742 if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD ||
743 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
744 std::vector<Constant*> Idxs;
745 Value *Op0 = CI->getArgOperand(0);
746 ShuffleVectorInst *SI = NULL;
747 if (isLoadH || isLoadL) {
748 Value *Op1 = UndefValue::get(Op0->getType());
749 Value *Addr = new BitCastInst(CI->getArgOperand(1),
750 Type::getDoublePtrTy(C),
752 Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI);
753 Value *Idx = ConstantInt::get(Type::getInt32Ty(C), 0);
754 Op1 = InsertElementInst::Create(Op1, Load, Idx, "upgraded.", CI);
757 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
758 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
760 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
761 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
763 Value *Mask = ConstantVector::get(Idxs);
764 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
766 Constant *Zero = ConstantInt::get(Type::getInt32Ty(C), 0);
767 Idxs.push_back(Zero);
768 Idxs.push_back(Zero);
769 Idxs.push_back(Zero);
770 Idxs.push_back(Zero);
771 Value *ZeroV = ConstantVector::get(Idxs);
774 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 4));
775 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 5));
776 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
777 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
778 Value *Mask = ConstantVector::get(Idxs);
779 SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI);
780 } else if (isMovSD ||
781 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
782 Value *Op1 = CI->getArgOperand(1);
784 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
785 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
786 } else if (isUnpckhPD || isPunpckhQPD) {
787 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
788 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
790 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
791 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
793 Value *Mask = ConstantVector::get(Idxs);
794 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
795 } else if (isShufPD) {
796 Value *Op1 = CI->getArgOperand(1);
798 cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
799 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1));
800 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C),
801 ((MaskVal >> 1) & 1)+2));
802 Value *Mask = ConstantVector::get(Idxs);
803 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
806 assert(SI && "Unexpected!");
808 // Handle any uses of the old CallInst.
809 if (!CI->use_empty())
810 // Replace all uses of the old call with the new cast which has the
812 CI->replaceAllUsesWith(SI);
814 // Clean up the old call now that it has been completely upgraded.
815 CI->eraseFromParent();
816 } else if (F->getName() == "llvm.x86.sse41.pmulld") {
817 // Upgrade this set of intrinsics into vector multiplies.
818 Instruction *Mul = BinaryOperator::CreateMul(CI->getArgOperand(0),
819 CI->getArgOperand(1),
822 // Fix up all the uses with our new multiply.
823 if (!CI->use_empty())
824 CI->replaceAllUsesWith(Mul);
826 // Remove upgraded multiply.
827 CI->eraseFromParent();
828 } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
829 Value *Op1 = CI->getArgOperand(0);
830 Value *Op2 = CI->getArgOperand(1);
831 Value *Op3 = CI->getArgOperand(2);
832 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
834 IRBuilder<> Builder(C);
835 Builder.SetInsertPoint(CI->getParent(), CI);
837 // If palignr is shifting the pair of input vectors less than 9 bytes,
838 // emit a shuffle instruction.
840 const Type *IntTy = Type::getInt32Ty(C);
841 const Type *EltTy = Type::getInt8Ty(C);
842 const Type *VecTy = VectorType::get(EltTy, 8);
844 Op2 = Builder.CreateBitCast(Op2, VecTy);
845 Op1 = Builder.CreateBitCast(Op1, VecTy);
847 llvm::SmallVector<llvm::Constant*, 8> Indices;
848 for (unsigned i = 0; i != 8; ++i)
849 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
851 Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
852 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
853 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
856 // If palignr is shifting the pair of input vectors more than 8 but less
857 // than 16 bytes, emit a logical right shift of the destination.
858 else if (shiftVal < 16) {
859 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
860 const Type *EltTy = Type::getInt64Ty(C);
861 const Type *VecTy = VectorType::get(EltTy, 1);
863 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
864 Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
866 // create i32 constant
868 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
869 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
872 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
874 Rep = Constant::getNullValue(F->getReturnType());
877 // Replace any uses with our new instruction.
878 if (!CI->use_empty())
879 CI->replaceAllUsesWith(Rep);
881 // Remove upgraded instruction.
882 CI->eraseFromParent();
884 } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
885 Value *Op1 = CI->getArgOperand(0);
886 Value *Op2 = CI->getArgOperand(1);
887 Value *Op3 = CI->getArgOperand(2);
888 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
890 IRBuilder<> Builder(C);
891 Builder.SetInsertPoint(CI->getParent(), CI);
893 // If palignr is shifting the pair of input vectors less than 17 bytes,
894 // emit a shuffle instruction.
895 if (shiftVal <= 16) {
896 const Type *IntTy = Type::getInt32Ty(C);
897 const Type *EltTy = Type::getInt8Ty(C);
898 const Type *VecTy = VectorType::get(EltTy, 16);
900 Op2 = Builder.CreateBitCast(Op2, VecTy);
901 Op1 = Builder.CreateBitCast(Op1, VecTy);
903 llvm::SmallVector<llvm::Constant*, 16> Indices;
904 for (unsigned i = 0; i != 16; ++i)
905 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
907 Value *SV = ConstantVector::get(Indices.begin(), Indices.size());
908 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
909 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
912 // If palignr is shifting the pair of input vectors more than 16 but less
913 // than 32 bytes, emit a logical right shift of the destination.
914 else if (shiftVal < 32) {
915 const Type *EltTy = Type::getInt64Ty(C);
916 const Type *VecTy = VectorType::get(EltTy, 2);
917 const Type *IntTy = Type::getInt32Ty(C);
919 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
920 Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
922 // create i32 constant
924 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
925 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
928 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
930 Rep = Constant::getNullValue(F->getReturnType());
933 // Replace any uses with our new instruction.
934 if (!CI->use_empty())
935 CI->replaceAllUsesWith(Rep);
937 // Remove upgraded instruction.
938 CI->eraseFromParent();
941 llvm_unreachable("Unknown function for CallInst upgrade.");
946 switch (NewFn->getIntrinsicID()) {
947 default: llvm_unreachable("Unknown function for CallInst upgrade.");
948 case Intrinsic::arm_neon_vld1:
949 case Intrinsic::arm_neon_vld2:
950 case Intrinsic::arm_neon_vld3:
951 case Intrinsic::arm_neon_vld4:
952 case Intrinsic::arm_neon_vst1:
953 case Intrinsic::arm_neon_vst2:
954 case Intrinsic::arm_neon_vst3:
955 case Intrinsic::arm_neon_vst4:
956 case Intrinsic::arm_neon_vld2lane:
957 case Intrinsic::arm_neon_vld3lane:
958 case Intrinsic::arm_neon_vld4lane:
959 case Intrinsic::arm_neon_vst2lane:
960 case Intrinsic::arm_neon_vst3lane:
961 case Intrinsic::arm_neon_vst4lane: {
962 // Add a default alignment argument of 1.
963 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
964 Operands.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
965 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
967 NewCI->setTailCall(CI->isTailCall());
968 NewCI->setCallingConv(CI->getCallingConv());
970 // Handle any uses of the old CallInst.
971 if (!CI->use_empty())
972 // Replace all uses of the old call with the new cast which has the
974 CI->replaceAllUsesWith(NewCI);
976 // Clean up the old call now that it has been completely upgraded.
977 CI->eraseFromParent();
981 case Intrinsic::x86_mmx_padd_b:
982 case Intrinsic::x86_mmx_padd_w:
983 case Intrinsic::x86_mmx_padd_d:
984 case Intrinsic::x86_mmx_padd_q:
985 case Intrinsic::x86_mmx_padds_b:
986 case Intrinsic::x86_mmx_padds_w:
987 case Intrinsic::x86_mmx_paddus_b:
988 case Intrinsic::x86_mmx_paddus_w:
989 case Intrinsic::x86_mmx_psub_b:
990 case Intrinsic::x86_mmx_psub_w:
991 case Intrinsic::x86_mmx_psub_d:
992 case Intrinsic::x86_mmx_psub_q:
993 case Intrinsic::x86_mmx_psubs_b:
994 case Intrinsic::x86_mmx_psubs_w:
995 case Intrinsic::x86_mmx_psubus_b:
996 case Intrinsic::x86_mmx_psubus_w:
997 case Intrinsic::x86_mmx_pmulh_w:
998 case Intrinsic::x86_mmx_pmull_w:
999 case Intrinsic::x86_mmx_pmulhu_w:
1000 case Intrinsic::x86_mmx_pmulu_dq:
1001 case Intrinsic::x86_mmx_pmadd_wd:
1002 case Intrinsic::x86_mmx_pand:
1003 case Intrinsic::x86_mmx_pandn:
1004 case Intrinsic::x86_mmx_por:
1005 case Intrinsic::x86_mmx_pxor:
1006 case Intrinsic::x86_mmx_pavg_b:
1007 case Intrinsic::x86_mmx_pavg_w:
1008 case Intrinsic::x86_mmx_pmaxu_b:
1009 case Intrinsic::x86_mmx_pmaxs_w:
1010 case Intrinsic::x86_mmx_pminu_b:
1011 case Intrinsic::x86_mmx_pmins_w:
1012 case Intrinsic::x86_mmx_psad_bw:
1013 case Intrinsic::x86_mmx_psll_w:
1014 case Intrinsic::x86_mmx_psll_d:
1015 case Intrinsic::x86_mmx_psll_q:
1016 case Intrinsic::x86_mmx_pslli_w:
1017 case Intrinsic::x86_mmx_pslli_d:
1018 case Intrinsic::x86_mmx_pslli_q:
1019 case Intrinsic::x86_mmx_psrl_w:
1020 case Intrinsic::x86_mmx_psrl_d:
1021 case Intrinsic::x86_mmx_psrl_q:
1022 case Intrinsic::x86_mmx_psrli_w:
1023 case Intrinsic::x86_mmx_psrli_d:
1024 case Intrinsic::x86_mmx_psrli_q:
1025 case Intrinsic::x86_mmx_psra_w:
1026 case Intrinsic::x86_mmx_psra_d:
1027 case Intrinsic::x86_mmx_psrai_w:
1028 case Intrinsic::x86_mmx_psrai_d:
1029 case Intrinsic::x86_mmx_packsswb:
1030 case Intrinsic::x86_mmx_packssdw:
1031 case Intrinsic::x86_mmx_packuswb:
1032 case Intrinsic::x86_mmx_punpckhbw:
1033 case Intrinsic::x86_mmx_punpckhwd:
1034 case Intrinsic::x86_mmx_punpckhdq:
1035 case Intrinsic::x86_mmx_punpcklbw:
1036 case Intrinsic::x86_mmx_punpcklwd:
1037 case Intrinsic::x86_mmx_punpckldq:
1038 case Intrinsic::x86_mmx_pcmpeq_b:
1039 case Intrinsic::x86_mmx_pcmpeq_w:
1040 case Intrinsic::x86_mmx_pcmpeq_d:
1041 case Intrinsic::x86_mmx_pcmpgt_b:
1042 case Intrinsic::x86_mmx_pcmpgt_w:
1043 case Intrinsic::x86_mmx_pcmpgt_d: {
1046 // Cast the operand to the X86 MMX type.
1047 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1048 NewFn->getFunctionType()->getParamType(0),
1051 switch (NewFn->getIntrinsicID()) {
1053 // Cast to the X86 MMX type.
1054 Operands[1] = new BitCastInst(CI->getArgOperand(1),
1055 NewFn->getFunctionType()->getParamType(1),
1058 case Intrinsic::x86_mmx_pslli_w:
1059 case Intrinsic::x86_mmx_pslli_d:
1060 case Intrinsic::x86_mmx_pslli_q:
1061 case Intrinsic::x86_mmx_psrli_w:
1062 case Intrinsic::x86_mmx_psrli_d:
1063 case Intrinsic::x86_mmx_psrli_q:
1064 case Intrinsic::x86_mmx_psrai_w:
1065 case Intrinsic::x86_mmx_psrai_d:
1066 // These take an i32 as their second parameter.
1067 Operands[1] = CI->getArgOperand(1);
1071 ConstructNewCallInst(NewFn, CI, Operands, 2);
1074 case Intrinsic::x86_mmx_maskmovq: {
1077 // Cast the operands to the X86 MMX type.
1078 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1079 NewFn->getFunctionType()->getParamType(0),
1081 Operands[1] = new BitCastInst(CI->getArgOperand(1),
1082 NewFn->getFunctionType()->getParamType(1),
1084 Operands[2] = CI->getArgOperand(2);
1086 ConstructNewCallInst(NewFn, CI, Operands, 3, false);
1089 case Intrinsic::x86_mmx_pmovmskb: {
1092 // Cast the operand to the X86 MMX type.
1093 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1094 NewFn->getFunctionType()->getParamType(0),
1097 ConstructNewCallInst(NewFn, CI, Operands, 1);
1100 case Intrinsic::x86_mmx_movnt_dq: {
1103 Operands[0] = CI->getArgOperand(0);
1105 // Cast the operand to the X86 MMX type.
1106 Operands[1] = new BitCastInst(CI->getArgOperand(1),
1107 NewFn->getFunctionType()->getParamType(1),
1110 ConstructNewCallInst(NewFn, CI, Operands, 2, false);
1113 case Intrinsic::x86_mmx_palignr_b: {
1116 // Cast the operands to the X86 MMX type.
1117 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1118 NewFn->getFunctionType()->getParamType(0),
1120 Operands[1] = new BitCastInst(CI->getArgOperand(1),
1121 NewFn->getFunctionType()->getParamType(1),
1123 Operands[2] = CI->getArgOperand(2);
1125 ConstructNewCallInst(NewFn, CI, Operands, 3);
1128 case Intrinsic::x86_mmx_pextr_w: {
1131 // Cast the operands to the X86 MMX type.
1132 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1133 NewFn->getFunctionType()->getParamType(0),
1135 Operands[1] = CI->getArgOperand(1);
1137 ConstructNewCallInst(NewFn, CI, Operands, 2);
1140 case Intrinsic::x86_mmx_pinsr_w: {
1143 // Cast the operands to the X86 MMX type.
1144 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1145 NewFn->getFunctionType()->getParamType(0),
1147 Operands[1] = CI->getArgOperand(1);
1148 Operands[2] = CI->getArgOperand(2);
1150 ConstructNewCallInst(NewFn, CI, Operands, 3);
1154 case Intrinsic::x86_mmx_cvtsi32_si64: {
1155 // The return type needs to be changed.
1157 Operands[0] = CI->getArgOperand(0);
1158 ConstructNewCallInst(NewFn, CI, Operands, 1);
1161 case Intrinsic::x86_mmx_cvtsi64_si32: {
1164 // Cast the operand to the X86 MMX type.
1165 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1166 NewFn->getFunctionType()->getParamType(0),
1169 ConstructNewCallInst(NewFn, CI, Operands, 1);
1172 case Intrinsic::x86_mmx_vec_init_b:
1173 case Intrinsic::x86_mmx_vec_init_w:
1174 case Intrinsic::x86_mmx_vec_init_d: {
1175 // The return type needs to be changed.
1177 unsigned NumOps = 0;
1179 switch (NewFn->getIntrinsicID()) {
1181 case Intrinsic::x86_mmx_vec_init_b: NumOps = 8; break;
1182 case Intrinsic::x86_mmx_vec_init_w: NumOps = 4; break;
1183 case Intrinsic::x86_mmx_vec_init_d: NumOps = 2; break;
1186 switch (NewFn->getIntrinsicID()) {
1188 case Intrinsic::x86_mmx_vec_init_b:
1189 Operands[7] = CI->getArgOperand(7);
1190 Operands[6] = CI->getArgOperand(6);
1191 Operands[5] = CI->getArgOperand(5);
1192 Operands[4] = CI->getArgOperand(4);
1194 case Intrinsic::x86_mmx_vec_init_w:
1195 Operands[3] = CI->getArgOperand(3);
1196 Operands[2] = CI->getArgOperand(2);
1198 case Intrinsic::x86_mmx_vec_init_d:
1199 Operands[1] = CI->getArgOperand(1);
1200 Operands[0] = CI->getArgOperand(0);
1204 ConstructNewCallInst(NewFn, CI, Operands, NumOps);
1207 case Intrinsic::x86_mmx_vec_ext_d: {
1210 // Cast the operand to the X86 MMX type.
1211 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1212 NewFn->getFunctionType()->getParamType(0),
1214 Operands[1] = CI->getArgOperand(1);
1216 ConstructNewCallInst(NewFn, CI, Operands, 2);
1221 case Intrinsic::ctlz:
1222 case Intrinsic::ctpop:
1223 case Intrinsic::cttz: {
1224 // Build a small vector of the original arguments.
1225 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
1227 // Construct a new CallInst
1228 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
1229 "upgraded."+CI->getName(), CI);
1230 NewCI->setTailCall(CI->isTailCall());
1231 NewCI->setCallingConv(CI->getCallingConv());
1233 // Handle any uses of the old CallInst.
1234 if (!CI->use_empty()) {
1235 // Check for sign extend parameter attributes on the return values.
1236 bool SrcSExt = NewFn->getAttributes().paramHasAttr(0, Attribute::SExt);
1237 bool DestSExt = F->getAttributes().paramHasAttr(0, Attribute::SExt);
1239 // Construct an appropriate cast from the new return type to the old.
1240 CastInst *RetCast = CastInst::Create(
1241 CastInst::getCastOpcode(NewCI, SrcSExt,
1244 NewCI, F->getReturnType(),
1245 NewCI->getName(), CI);
1246 NewCI->moveBefore(RetCast);
1248 // Replace all uses of the old call with the new cast which has the
1250 CI->replaceAllUsesWith(RetCast);
1253 // Clean up the old call now that it has been completely upgraded.
1254 CI->eraseFromParent();
1257 case Intrinsic::eh_selector:
1258 case Intrinsic::eh_typeid_for: {
1259 // Only the return type changed.
1260 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
1261 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
1262 "upgraded." + CI->getName(), CI);
1263 NewCI->setTailCall(CI->isTailCall());
1264 NewCI->setCallingConv(CI->getCallingConv());
1266 // Handle any uses of the old CallInst.
1267 if (!CI->use_empty()) {
1268 // Construct an appropriate cast from the new return type to the old.
1270 CastInst::Create(CastInst::getCastOpcode(NewCI, true,
1271 F->getReturnType(), true),
1272 NewCI, F->getReturnType(), NewCI->getName(), CI);
1273 CI->replaceAllUsesWith(RetCast);
1275 CI->eraseFromParent();
1278 case Intrinsic::memcpy:
1279 case Intrinsic::memmove:
1280 case Intrinsic::memset: {
1282 const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
1283 Value *Operands[5] = { CI->getArgOperand(0), CI->getArgOperand(1),
1284 CI->getArgOperand(2), CI->getArgOperand(3),
1285 llvm::ConstantInt::get(I1Ty, 0) };
1286 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
1288 NewCI->setTailCall(CI->isTailCall());
1289 NewCI->setCallingConv(CI->getCallingConv());
1290 // Handle any uses of the old CallInst.
1291 if (!CI->use_empty())
1292 // Replace all uses of the old call with the new cast which has the
1294 CI->replaceAllUsesWith(NewCI);
1296 // Clean up the old call now that it has been completely upgraded.
1297 CI->eraseFromParent();
1303 // This tests each Function to determine if it needs upgrading. When we find
1304 // one we are interested in, we then upgrade all calls to reflect the new
1306 void llvm::UpgradeCallsToIntrinsic(Function* F) {
1307 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
1309 // Upgrade the function and check if it is a totaly new function.
1311 if (UpgradeIntrinsicFunction(F, NewFn)) {
1313 // Replace all uses to the old function with the new one if necessary.
1314 for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
1316 if (CallInst* CI = dyn_cast<CallInst>(*UI++))
1317 UpgradeIntrinsicCall(CI, NewFn);
1319 // Remove old function, no longer used, from the module.
1320 F->eraseFromParent();
1325 /// This function strips all debug info intrinsics, except for llvm.dbg.declare.
1326 /// If an llvm.dbg.declare intrinsic is invalid, then this function simply
1327 /// strips that use.
1328 void llvm::CheckDebugInfoIntrinsics(Module *M) {
1331 if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) {
1332 while (!FuncStart->use_empty()) {
1333 CallInst *CI = cast<CallInst>(FuncStart->use_back());
1334 CI->eraseFromParent();
1336 FuncStart->eraseFromParent();
1339 if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) {
1340 while (!StopPoint->use_empty()) {
1341 CallInst *CI = cast<CallInst>(StopPoint->use_back());
1342 CI->eraseFromParent();
1344 StopPoint->eraseFromParent();
1347 if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) {
1348 while (!RegionStart->use_empty()) {
1349 CallInst *CI = cast<CallInst>(RegionStart->use_back());
1350 CI->eraseFromParent();
1352 RegionStart->eraseFromParent();
1355 if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) {
1356 while (!RegionEnd->use_empty()) {
1357 CallInst *CI = cast<CallInst>(RegionEnd->use_back());
1358 CI->eraseFromParent();
1360 RegionEnd->eraseFromParent();
1363 if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
1364 if (!Declare->use_empty()) {
1365 DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
1366 if (!isa<MDNode>(DDI->getArgOperand(0)) ||
1367 !isa<MDNode>(DDI->getArgOperand(1))) {
1368 while (!Declare->use_empty()) {
1369 CallInst *CI = cast<CallInst>(Declare->use_back());
1370 CI->eraseFromParent();
1372 Declare->eraseFromParent();