1 //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating functions from LLVM IR into
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "function-lowering-info"
16 #include "FunctionLoweringInfo.h"
17 #include "llvm/CallingConv.h"
18 #include "llvm/DerivedTypes.h"
19 #include "llvm/Function.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicInst.h"
22 #include "llvm/LLVMContext.h"
23 #include "llvm/Module.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/Analysis/DebugInfo.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Target/TargetData.h"
32 #include "llvm/Target/TargetFrameInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetIntrinsicInfo.h"
35 #include "llvm/Target/TargetLowering.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/raw_ostream.h"
45 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
46 /// of insertvalue or extractvalue indices that identify a member, return
47 /// the linearized index of the start of the member.
49 unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
50 const unsigned *Indices,
51 const unsigned *IndicesEnd,
53 // Base case: We're done.
54 if (Indices && Indices == IndicesEnd)
57 // Given a struct type, recursively traverse the elements.
58 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
59 for (StructType::element_iterator EB = STy->element_begin(),
61 EE = STy->element_end();
63 if (Indices && *Indices == unsigned(EI - EB))
64 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
65 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
69 // Given an array type, recursively traverse the elements.
70 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
71 const Type *EltTy = ATy->getElementType();
72 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
73 if (Indices && *Indices == i)
74 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
75 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
79 // We haven't found the type we're looking for, so keep searching.
83 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
84 /// EVTs that represent all the individual underlying
85 /// non-aggregate types that comprise it.
87 /// If Offsets is non-null, it points to a vector to be filled in
88 /// with the in-memory offsets of each of the individual values.
90 void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
91 SmallVectorImpl<EVT> &ValueVTs,
92 SmallVectorImpl<uint64_t> *Offsets,
93 uint64_t StartingOffset) {
94 // Given a struct type, recursively traverse the elements.
95 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
96 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
97 for (StructType::element_iterator EB = STy->element_begin(),
99 EE = STy->element_end();
101 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
102 StartingOffset + SL->getElementOffset(EI - EB));
105 // Given an array type, recursively traverse the elements.
106 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
107 const Type *EltTy = ATy->getElementType();
108 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
109 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
110 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
111 StartingOffset + i * EltSize);
114 // Interpret void as zero return values.
117 // Base case: we can get an EVT for this LLVM IR type.
118 ValueVTs.push_back(TLI.getValueType(Ty));
120 Offsets->push_back(StartingOffset);
123 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
124 /// PHI nodes or outside of the basic block that defines it, or used by a
125 /// switch or atomic instruction, which may expand to multiple basic blocks.
126 static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
127 if (isa<PHINode>(I)) return true;
128 const BasicBlock *BB = I->getParent();
129 for (Value::const_use_iterator UI = I->use_begin(), E = I->use_end();
131 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
136 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
137 /// entry block, return true. This includes arguments used by switches, since
138 /// the switch may expand into multiple basic blocks.
139 static bool isOnlyUsedInEntryBlock(const Argument *A, bool EnableFastISel) {
140 // With FastISel active, we may be splitting blocks, so force creation
141 // of virtual registers for all non-dead arguments.
142 // Don't force virtual registers for byval arguments though, because
143 // fast-isel can't handle those in all cases.
144 if (EnableFastISel && !A->hasByValAttr())
145 return A->use_empty();
147 const BasicBlock *Entry = A->getParent()->begin();
148 for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
150 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
151 return false; // Use not in entry block.
155 FunctionLoweringInfo::FunctionLoweringInfo(const TargetLowering &tli)
159 void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
160 bool EnableFastISel) {
163 RegInfo = &MF->getRegInfo();
165 // Create a vreg for each argument register that is not dead and is used
166 // outside of the entry block for the function.
167 for (Function::const_arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
169 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
170 InitializeRegForValue(AI);
172 // Initialize the mapping of values to registers. This is only set up for
173 // instruction values that are used outside of the block that defines
175 Function::const_iterator BB = Fn->begin(), EB = Fn->end();
176 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
177 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
178 if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
179 const Type *Ty = AI->getAllocatedType();
180 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
182 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
185 TySize *= CUI->getZExtValue(); // Get total allocated size.
186 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
187 StaticAllocaMap[AI] =
188 MF->getFrameInfo()->CreateStackObject(TySize, Align, false);
191 for (; BB != EB; ++BB)
192 for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
193 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
194 if (!isa<AllocaInst>(I) ||
195 !StaticAllocaMap.count(cast<AllocaInst>(I)))
196 InitializeRegForValue(I);
198 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
199 // also creates the initial PHI MachineInstrs, though none of the input
200 // operands are populated.
201 for (BB = Fn->begin(); BB != EB; ++BB) {
202 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
206 // Transfer the address-taken flag. This is necessary because there could
207 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
208 // the first one should be marked.
209 if (BB->hasAddressTaken())
210 MBB->setHasAddressTaken();
212 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
214 for (BasicBlock::const_iterator I = BB->begin();
215 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
216 if (PN->use_empty()) continue;
218 DebugLoc DL = PN->getDebugLoc();
219 unsigned PHIReg = ValueMap[PN];
220 assert(PHIReg && "PHI node does not have an assigned virtual register!");
222 SmallVector<EVT, 4> ValueVTs;
223 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
224 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
225 EVT VT = ValueVTs[vti];
226 unsigned NumRegisters = TLI.getNumRegisters(Fn->getContext(), VT);
227 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
228 for (unsigned i = 0; i != NumRegisters; ++i)
229 BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
230 PHIReg += NumRegisters;
235 // Mark landing pad blocks.
236 for (BB = Fn->begin(); BB != EB; ++BB)
237 if (const InvokeInst *Invoke = dyn_cast<InvokeInst>(BB->getTerminator()))
238 MBBMap[Invoke->getSuccessor(1)]->setIsLandingPad();
241 /// clear - Clear out all the function-specific state. This returns this
242 /// FunctionLoweringInfo to an empty state, ready to be used for a
243 /// different function.
244 void FunctionLoweringInfo::clear() {
245 assert(CatchInfoFound.size() == CatchInfoLost.size() &&
246 "Not all catch info was assigned to a landing pad!");
250 StaticAllocaMap.clear();
252 CatchInfoLost.clear();
253 CatchInfoFound.clear();
255 LiveOutRegInfo.clear();
258 unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
259 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
262 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
263 /// the correctly promoted or expanded types. Assign these registers
264 /// consecutive vreg numbers and return the first assigned number.
266 /// In the case that the given value has struct or array type, this function
267 /// will assign registers for each member or element.
269 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
270 SmallVector<EVT, 4> ValueVTs;
271 ComputeValueVTs(TLI, V->getType(), ValueVTs);
273 unsigned FirstReg = 0;
274 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
275 EVT ValueVT = ValueVTs[Value];
276 EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
278 unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
279 for (unsigned i = 0; i != NumRegs; ++i) {
280 unsigned R = MakeReg(RegisterVT);
281 if (!FirstReg) FirstReg = R;
287 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
288 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
289 V = V->stripPointerCasts();
290 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
292 if (GV && GV->getName() == ".llvm.eh.catch.all.value") {
293 assert(GV->hasInitializer() &&
294 "The EH catch-all value must have an initializer");
295 Value *Init = GV->getInitializer();
296 GV = dyn_cast<GlobalVariable>(Init);
297 if (!GV) V = cast<ConstantPointerNull>(Init);
300 assert((GV || isa<ConstantPointerNull>(V)) &&
301 "TypeInfo must be a global variable or NULL");
305 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
306 /// call, and add them to the specified machine basic block.
307 void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
308 MachineBasicBlock *MBB) {
309 // Inform the MachineModuleInfo of the personality for this landing pad.
310 const ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
311 assert(CE->getOpcode() == Instruction::BitCast &&
312 isa<Function>(CE->getOperand(0)) &&
313 "Personality should be a function");
314 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
316 // Gather all the type infos for this landing pad and pass them along to
317 // MachineModuleInfo.
318 std::vector<const GlobalVariable *> TyInfo;
319 unsigned N = I.getNumOperands();
321 for (unsigned i = N - 1; i > 2; --i) {
322 if (const ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
323 unsigned FilterLength = CI->getZExtValue();
324 unsigned FirstCatch = i + FilterLength + !FilterLength;
325 assert (FirstCatch <= N && "Invalid filter length");
327 if (FirstCatch < N) {
328 TyInfo.reserve(N - FirstCatch);
329 for (unsigned j = FirstCatch; j < N; ++j)
330 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
331 MMI->addCatchTypeInfo(MBB, TyInfo);
337 MMI->addCleanup(MBB);
340 TyInfo.reserve(FilterLength - 1);
341 for (unsigned j = i + 1; j < FirstCatch; ++j)
342 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
343 MMI->addFilterTypeInfo(MBB, TyInfo);
352 TyInfo.reserve(N - 3);
353 for (unsigned j = 3; j < N; ++j)
354 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
355 MMI->addCatchTypeInfo(MBB, TyInfo);
359 void llvm::CopyCatchInfo(const BasicBlock *SrcBB, const BasicBlock *DestBB,
360 MachineModuleInfo *MMI, FunctionLoweringInfo &FLI) {
361 for (BasicBlock::const_iterator I = SrcBB->begin(), E = --SrcBB->end();
363 if (const EHSelectorInst *EHSel = dyn_cast<EHSelectorInst>(I)) {
364 // Apply the catch info to DestBB.
365 AddCatchInfo(*EHSel, MMI, FLI.MBBMap[DestBB]);
367 if (!FLI.MBBMap[SrcBB]->isLandingPad())
368 FLI.CatchInfoFound.insert(EHSel);
373 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
374 /// processed uses a memory 'm' constraint.
376 llvm::hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
377 const TargetLowering &TLI) {
378 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
379 InlineAsm::ConstraintInfo &CI = CInfos[i];
380 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
381 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
382 if (CType == TargetLowering::C_Memory)
386 // Indirect operand accesses access memory.
394 /// getFCmpCondCode - Return the ISD condition code corresponding to
395 /// the given LLVM IR floating-point condition code. This includes
396 /// consideration of global floating-point math flags.
398 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
399 ISD::CondCode FPC, FOC;
401 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
402 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
403 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
404 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
405 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
406 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
407 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
408 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
409 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
410 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
411 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
412 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
413 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
414 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
415 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
416 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
418 llvm_unreachable("Invalid FCmp predicate opcode!");
419 FOC = FPC = ISD::SETFALSE;
422 if (FiniteOnlyFPMath())
428 /// getICmpCondCode - Return the ISD condition code corresponding to
429 /// the given LLVM IR integer condition code.
431 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
433 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
434 case ICmpInst::ICMP_NE: return ISD::SETNE;
435 case ICmpInst::ICMP_SLE: return ISD::SETLE;
436 case ICmpInst::ICMP_ULE: return ISD::SETULE;
437 case ICmpInst::ICMP_SGE: return ISD::SETGE;
438 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
439 case ICmpInst::ICMP_SLT: return ISD::SETLT;
440 case ICmpInst::ICMP_ULT: return ISD::SETULT;
441 case ICmpInst::ICMP_SGT: return ISD::SETGT;
442 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
444 llvm_unreachable("Invalid ICmp predicate opcode!");
449 /// Test if the given instruction is in a position to be optimized
450 /// with a tail-call. This roughly means that it's in a block with
451 /// a return and there's nothing that needs to be scheduled
452 /// between it and the return.
454 /// This function only tests target-independent requirements.
455 bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
456 const TargetLowering &TLI) {
457 const Instruction *I = CS.getInstruction();
458 const BasicBlock *ExitBB = I->getParent();
459 const TerminatorInst *Term = ExitBB->getTerminator();
460 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
461 const Function *F = ExitBB->getParent();
463 // The block must end in a return statement or unreachable.
465 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
466 // an unreachable, for now. The way tailcall optimization is currently
467 // implemented means it will add an epilogue followed by a jump. That is
468 // not profitable. Also, if the callee is a special function (e.g.
469 // longjmp on x86), it can end up causing miscompilation that has not
470 // been fully understood.
472 (!GuaranteedTailCallOpt || !isa<UnreachableInst>(Term))) return false;
474 // If I will have a chain, make sure no other instruction that will have a
475 // chain interposes between I and the return.
476 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
477 !I->isSafeToSpeculativelyExecute())
478 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
482 // Debug info intrinsics do not get in the way of tail call optimization.
483 if (isa<DbgInfoIntrinsic>(BBI))
485 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
486 !BBI->isSafeToSpeculativelyExecute())
490 // If the block ends with a void return or unreachable, it doesn't matter
491 // what the call's return type is.
492 if (!Ret || Ret->getNumOperands() == 0) return true;
494 // If the return value is undef, it doesn't matter what the call's
496 if (isa<UndefValue>(Ret->getOperand(0))) return true;
498 // Conservatively require the attributes of the call to match those of
499 // the return. Ignore noalias because it doesn't affect the call sequence.
500 unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
501 if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
504 // It's not safe to eliminate the sign / zero extension of the return value.
505 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
508 // Otherwise, make sure the unmodified return value of I is the return value.
509 for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
510 U = dyn_cast<Instruction>(U->getOperand(0))) {
517 // Check for a truly no-op truncate.
518 if (isa<TruncInst>(U) &&
519 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
521 // Check for a truly no-op bitcast.
522 if (isa<BitCastInst>(U) &&
523 (U->getOperand(0)->getType() == U->getType() ||
524 (U->getOperand(0)->getType()->isPointerTy() &&
525 U->getType()->isPointerTy())))
527 // Otherwise it's not a true no-op.