1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/Constants.h"
21 #include "llvm/CallingConv.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/GlobalVariable.h"
25 #include "llvm/InlineAsm.h"
26 #include "llvm/Instructions.h"
27 #include "llvm/Intrinsics.h"
28 #include "llvm/IntrinsicInst.h"
29 #include "llvm/Module.h"
30 #include "llvm/CodeGen/FastISel.h"
31 #include "llvm/CodeGen/GCStrategy.h"
32 #include "llvm/CodeGen/GCMetadata.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineInstrBuilder.h"
36 #include "llvm/CodeGen/MachineJumpTableInfo.h"
37 #include "llvm/CodeGen/MachineModuleInfo.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/PseudoSourceValue.h"
40 #include "llvm/CodeGen/SelectionDAG.h"
41 #include "llvm/CodeGen/DwarfWriter.h"
42 #include "llvm/Analysis/DebugInfo.h"
43 #include "llvm/Target/TargetRegisterInfo.h"
44 #include "llvm/Target/TargetData.h"
45 #include "llvm/Target/TargetFrameInfo.h"
46 #include "llvm/Target/TargetInstrInfo.h"
47 #include "llvm/Target/TargetIntrinsicInfo.h"
48 #include "llvm/Target/TargetLowering.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Support/raw_ostream.h"
59 /// LimitFloatPrecision - Generate low-precision inline sequences for
60 /// some float libcalls (6, 8 or 12 bits).
61 static unsigned LimitFloatPrecision;
63 static cl::opt<unsigned, true>
64 LimitFPPrecision("limit-float-precision",
65 cl::desc("Generate low-precision inline sequences "
66 "for some float libcalls"),
67 cl::location(LimitFloatPrecision),
70 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
71 /// of insertvalue or extractvalue indices that identify a member, return
72 /// the linearized index of the start of the member.
74 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
75 const unsigned *Indices,
76 const unsigned *IndicesEnd,
77 unsigned CurIndex = 0) {
78 // Base case: We're done.
79 if (Indices && Indices == IndicesEnd)
82 // Given a struct type, recursively traverse the elements.
83 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
84 for (StructType::element_iterator EB = STy->element_begin(),
86 EE = STy->element_end();
88 if (Indices && *Indices == unsigned(EI - EB))
89 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
90 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
94 // Given an array type, recursively traverse the elements.
95 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
96 const Type *EltTy = ATy->getElementType();
97 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
98 if (Indices && *Indices == i)
99 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
100 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
104 // We haven't found the type we're looking for, so keep searching.
108 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
109 /// EVTs that represent all the individual underlying
110 /// non-aggregate types that comprise it.
112 /// If Offsets is non-null, it points to a vector to be filled in
113 /// with the in-memory offsets of each of the individual values.
115 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
116 SmallVectorImpl<EVT> &ValueVTs,
117 SmallVectorImpl<uint64_t> *Offsets = 0,
118 uint64_t StartingOffset = 0) {
119 // Given a struct type, recursively traverse the elements.
120 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
121 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
122 for (StructType::element_iterator EB = STy->element_begin(),
124 EE = STy->element_end();
126 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
127 StartingOffset + SL->getElementOffset(EI - EB));
130 // Given an array type, recursively traverse the elements.
131 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
132 const Type *EltTy = ATy->getElementType();
133 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
134 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
135 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
136 StartingOffset + i * EltSize);
139 // Interpret void as zero return values.
140 if (Ty == Type::getVoidTy(Ty->getContext()))
142 // Base case: we can get an EVT for this LLVM IR type.
143 ValueVTs.push_back(TLI.getValueType(Ty));
145 Offsets->push_back(StartingOffset);
149 /// RegsForValue - This struct represents the registers (physical or virtual)
150 /// that a particular set of values is assigned, and the type information about
151 /// the value. The most common situation is to represent one value at a time,
152 /// but struct or array values are handled element-wise as multiple values.
153 /// The splitting of aggregates is performed recursively, so that we never
154 /// have aggregate-typed registers. The values at this point do not necessarily
155 /// have legal types, so each value may require one or more registers of some
158 struct VISIBILITY_HIDDEN RegsForValue {
159 /// TLI - The TargetLowering object.
161 const TargetLowering *TLI;
163 /// ValueVTs - The value types of the values, which may not be legal, and
164 /// may need be promoted or synthesized from one or more registers.
166 SmallVector<EVT, 4> ValueVTs;
168 /// RegVTs - The value types of the registers. This is the same size as
169 /// ValueVTs and it records, for each value, what the type of the assigned
170 /// register or registers are. (Individual values are never synthesized
171 /// from more than one type of register.)
173 /// With virtual registers, the contents of RegVTs is redundant with TLI's
174 /// getRegisterType member function, however when with physical registers
175 /// it is necessary to have a separate record of the types.
177 SmallVector<EVT, 4> RegVTs;
179 /// Regs - This list holds the registers assigned to the values.
180 /// Each legal or promoted value requires one register, and each
181 /// expanded value requires multiple registers.
183 SmallVector<unsigned, 4> Regs;
185 RegsForValue() : TLI(0) {}
187 RegsForValue(const TargetLowering &tli,
188 const SmallVector<unsigned, 4> ®s,
189 EVT regvt, EVT valuevt)
190 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
191 RegsForValue(const TargetLowering &tli,
192 const SmallVector<unsigned, 4> ®s,
193 const SmallVector<EVT, 4> ®vts,
194 const SmallVector<EVT, 4> &valuevts)
195 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
196 RegsForValue(LLVMContext &Context, const TargetLowering &tli,
197 unsigned Reg, const Type *Ty) : TLI(&tli) {
198 ComputeValueVTs(tli, Ty, ValueVTs);
200 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
201 EVT ValueVT = ValueVTs[Value];
202 unsigned NumRegs = TLI->getNumRegisters(Context, ValueVT);
203 EVT RegisterVT = TLI->getRegisterType(Context, ValueVT);
204 for (unsigned i = 0; i != NumRegs; ++i)
205 Regs.push_back(Reg + i);
206 RegVTs.push_back(RegisterVT);
211 /// append - Add the specified values to this one.
212 void append(const RegsForValue &RHS) {
214 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
215 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
216 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
220 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
221 /// this value and returns the result as a ValueVTs value. This uses
222 /// Chain/Flag as the input and updates them for the output Chain/Flag.
223 /// If the Flag pointer is NULL, no flag is used.
224 SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
225 SDValue &Chain, SDValue *Flag) const;
227 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
228 /// specified value into the registers specified by this object. This uses
229 /// Chain/Flag as the input and updates them for the output Chain/Flag.
230 /// If the Flag pointer is NULL, no flag is used.
231 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
232 SDValue &Chain, SDValue *Flag) const;
234 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
235 /// operand list. This adds the code marker, matching input operand index
236 /// (if applicable), and includes the number of values added into it.
237 void AddInlineAsmOperands(unsigned Code,
238 bool HasMatching, unsigned MatchingIdx,
239 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
243 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
244 /// PHI nodes or outside of the basic block that defines it, or used by a
245 /// switch or atomic instruction, which may expand to multiple basic blocks.
246 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
247 if (isa<PHINode>(I)) return true;
248 BasicBlock *BB = I->getParent();
249 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
250 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
255 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
256 /// entry block, return true. This includes arguments used by switches, since
257 /// the switch may expand into multiple basic blocks.
258 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
259 // With FastISel active, we may be splitting blocks, so force creation
260 // of virtual registers for all non-dead arguments.
261 // Don't force virtual registers for byval arguments though, because
262 // fast-isel can't handle those in all cases.
263 if (EnableFastISel && !A->hasByValAttr())
264 return A->use_empty();
266 BasicBlock *Entry = A->getParent()->begin();
267 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
268 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
269 return false; // Use not in entry block.
273 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
277 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
279 bool EnableFastISel) {
282 RegInfo = &MF->getRegInfo();
284 // Create a vreg for each argument register that is not dead and is used
285 // outside of the entry block for the function.
286 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
288 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
289 InitializeRegForValue(AI);
291 // Initialize the mapping of values to registers. This is only set up for
292 // instruction values that are used outside of the block that defines
294 Function::iterator BB = Fn->begin(), EB = Fn->end();
295 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
296 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
297 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
298 const Type *Ty = AI->getAllocatedType();
299 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
301 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
304 TySize *= CUI->getZExtValue(); // Get total allocated size.
305 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
306 StaticAllocaMap[AI] =
307 MF->getFrameInfo()->CreateStackObject(TySize, Align);
310 for (; BB != EB; ++BB)
311 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
312 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
313 if (!isa<AllocaInst>(I) ||
314 !StaticAllocaMap.count(cast<AllocaInst>(I)))
315 InitializeRegForValue(I);
317 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
318 // also creates the initial PHI MachineInstrs, though none of the input
319 // operands are populated.
320 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
321 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
325 // Transfer the address-taken flag. This is necessary because there could
326 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
327 // the first one should be marked.
328 if (BB->hasAddressTaken())
329 MBB->setHasAddressTaken();
331 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
335 for (BasicBlock::iterator
336 I = BB->begin(), E = BB->end(); I != E; ++I) {
337 if (CallInst *CI = dyn_cast<CallInst>(I)) {
338 if (Function *F = CI->getCalledFunction()) {
339 switch (F->getIntrinsicID()) {
341 case Intrinsic::dbg_stoppoint: {
342 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
343 if (isValidDebugInfoIntrinsic(*SPI, CodeGenOpt::Default))
344 DL = ExtractDebugLocation(*SPI, MF->getDebugLocInfo());
347 case Intrinsic::dbg_func_start: {
348 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
349 if (isValidDebugInfoIntrinsic(*FSI, CodeGenOpt::Default))
350 DL = ExtractDebugLocation(*FSI, MF->getDebugLocInfo());
357 PN = dyn_cast<PHINode>(I);
358 if (!PN || PN->use_empty()) continue;
360 unsigned PHIReg = ValueMap[PN];
361 assert(PHIReg && "PHI node does not have an assigned virtual register!");
363 SmallVector<EVT, 4> ValueVTs;
364 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
365 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
366 EVT VT = ValueVTs[vti];
367 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
368 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
369 for (unsigned i = 0; i != NumRegisters; ++i)
370 BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
371 PHIReg += NumRegisters;
377 unsigned FunctionLoweringInfo::MakeReg(EVT VT) {
378 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
381 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
382 /// the correctly promoted or expanded types. Assign these registers
383 /// consecutive vreg numbers and return the first assigned number.
385 /// In the case that the given value has struct or array type, this function
386 /// will assign registers for each member or element.
388 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
389 SmallVector<EVT, 4> ValueVTs;
390 ComputeValueVTs(TLI, V->getType(), ValueVTs);
392 unsigned FirstReg = 0;
393 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
394 EVT ValueVT = ValueVTs[Value];
395 EVT RegisterVT = TLI.getRegisterType(V->getContext(), ValueVT);
397 unsigned NumRegs = TLI.getNumRegisters(V->getContext(), ValueVT);
398 for (unsigned i = 0; i != NumRegs; ++i) {
399 unsigned R = MakeReg(RegisterVT);
400 if (!FirstReg) FirstReg = R;
406 /// getCopyFromParts - Create a value that contains the specified legal parts
407 /// combined into the value they represent. If the parts combine to a type
408 /// larger then ValueVT then AssertOp can be used to specify whether the extra
409 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
410 /// (ISD::AssertSext).
411 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
412 const SDValue *Parts,
413 unsigned NumParts, EVT PartVT, EVT ValueVT,
414 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
415 assert(NumParts > 0 && "No parts to assemble!");
416 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
417 SDValue Val = Parts[0];
420 // Assemble the value from multiple parts.
421 if (!ValueVT.isVector() && ValueVT.isInteger()) {
422 unsigned PartBits = PartVT.getSizeInBits();
423 unsigned ValueBits = ValueVT.getSizeInBits();
425 // Assemble the power of 2 part.
426 unsigned RoundParts = NumParts & (NumParts - 1) ?
427 1 << Log2_32(NumParts) : NumParts;
428 unsigned RoundBits = PartBits * RoundParts;
429 EVT RoundVT = RoundBits == ValueBits ?
430 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
433 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
435 if (RoundParts > 2) {
436 Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
437 Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
440 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
441 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
443 if (TLI.isBigEndian())
445 Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
447 if (RoundParts < NumParts) {
448 // Assemble the trailing non-power-of-2 part.
449 unsigned OddParts = NumParts - RoundParts;
450 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
451 Hi = getCopyFromParts(DAG, dl,
452 Parts+RoundParts, OddParts, PartVT, OddVT);
454 // Combine the round and odd parts.
456 if (TLI.isBigEndian())
458 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
459 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
460 Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
461 DAG.getConstant(Lo.getValueType().getSizeInBits(),
462 TLI.getPointerTy()));
463 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
464 Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
466 } else if (ValueVT.isVector()) {
467 // Handle a multi-element vector.
468 EVT IntermediateVT, RegisterVT;
469 unsigned NumIntermediates;
471 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
472 NumIntermediates, RegisterVT);
473 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
474 NumParts = NumRegs; // Silence a compiler warning.
475 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
476 assert(RegisterVT == Parts[0].getValueType() &&
477 "Part type doesn't match part!");
479 // Assemble the parts into intermediate operands.
480 SmallVector<SDValue, 8> Ops(NumIntermediates);
481 if (NumIntermediates == NumParts) {
482 // If the register was not expanded, truncate or copy the value,
484 for (unsigned i = 0; i != NumParts; ++i)
485 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
486 PartVT, IntermediateVT);
487 } else if (NumParts > 0) {
488 // If the intermediate type was expanded, build the intermediate operands
490 assert(NumParts % NumIntermediates == 0 &&
491 "Must expand into a divisible number of parts!");
492 unsigned Factor = NumParts / NumIntermediates;
493 for (unsigned i = 0; i != NumIntermediates; ++i)
494 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
495 PartVT, IntermediateVT);
498 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
500 Val = DAG.getNode(IntermediateVT.isVector() ?
501 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
502 ValueVT, &Ops[0], NumIntermediates);
503 } else if (PartVT.isFloatingPoint()) {
504 // FP split into multiple FP parts (for ppcf128)
505 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
508 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[0]);
509 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT(MVT::f64), Parts[1]);
510 if (TLI.isBigEndian())
512 Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
514 // FP split into integer parts (soft fp)
515 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
516 !PartVT.isVector() && "Unexpected split");
517 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
518 Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
522 // There is now one part, held in Val. Correct it to match ValueVT.
523 PartVT = Val.getValueType();
525 if (PartVT == ValueVT)
528 if (PartVT.isVector()) {
529 assert(ValueVT.isVector() && "Unknown vector conversion!");
530 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
533 if (ValueVT.isVector()) {
534 assert(ValueVT.getVectorElementType() == PartVT &&
535 ValueVT.getVectorNumElements() == 1 &&
536 "Only trivial scalar-to-vector conversions should get here!");
537 return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
540 if (PartVT.isInteger() &&
541 ValueVT.isInteger()) {
542 if (ValueVT.bitsLT(PartVT)) {
543 // For a truncate, see if we have any information to
544 // indicate whether the truncated bits will always be
545 // zero or sign-extension.
546 if (AssertOp != ISD::DELETED_NODE)
547 Val = DAG.getNode(AssertOp, dl, PartVT, Val,
548 DAG.getValueType(ValueVT));
549 return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
551 return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
555 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
556 if (ValueVT.bitsLT(Val.getValueType()))
557 // FP_ROUND's are always exact here.
558 return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
559 DAG.getIntPtrConstant(1));
560 return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
563 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
564 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
566 llvm_unreachable("Unknown mismatch!");
570 /// getCopyToParts - Create a series of nodes that contain the specified value
571 /// split into legal parts. If the parts contain more bits than Val, then, for
572 /// integers, ExtendKind can be used to specify how to generate the extra bits.
573 static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
574 SDValue *Parts, unsigned NumParts, EVT PartVT,
575 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
576 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
577 EVT PtrVT = TLI.getPointerTy();
578 EVT ValueVT = Val.getValueType();
579 unsigned PartBits = PartVT.getSizeInBits();
580 unsigned OrigNumParts = NumParts;
581 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
586 if (!ValueVT.isVector()) {
587 if (PartVT == ValueVT) {
588 assert(NumParts == 1 && "No-op copy with multiple parts!");
593 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
594 // If the parts cover more bits than the value has, promote the value.
595 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
596 assert(NumParts == 1 && "Do not know what to promote to!");
597 Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
598 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
599 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
600 Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
602 llvm_unreachable("Unknown mismatch!");
604 } else if (PartBits == ValueVT.getSizeInBits()) {
605 // Different types of the same size.
606 assert(NumParts == 1 && PartVT != ValueVT);
607 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
608 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
609 // If the parts cover less bits than value has, truncate the value.
610 if (PartVT.isInteger() && ValueVT.isInteger()) {
611 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
612 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
614 llvm_unreachable("Unknown mismatch!");
618 // The value may have changed - recompute ValueVT.
619 ValueVT = Val.getValueType();
620 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
621 "Failed to tile the value with PartVT!");
624 assert(PartVT == ValueVT && "Type conversion failed!");
629 // Expand the value into multiple parts.
630 if (NumParts & (NumParts - 1)) {
631 // The number of parts is not a power of 2. Split off and copy the tail.
632 assert(PartVT.isInteger() && ValueVT.isInteger() &&
633 "Do not know what to expand to!");
634 unsigned RoundParts = 1 << Log2_32(NumParts);
635 unsigned RoundBits = RoundParts * PartBits;
636 unsigned OddParts = NumParts - RoundParts;
637 SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
638 DAG.getConstant(RoundBits,
639 TLI.getPointerTy()));
640 getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
641 if (TLI.isBigEndian())
642 // The odd parts were reversed by getCopyToParts - unreverse them.
643 std::reverse(Parts + RoundParts, Parts + NumParts);
644 NumParts = RoundParts;
645 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
646 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
649 // The number of parts is a power of 2. Repeatedly bisect the value using
651 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
652 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()),
654 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
655 for (unsigned i = 0; i < NumParts; i += StepSize) {
656 unsigned ThisBits = StepSize * PartBits / 2;
657 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
658 SDValue &Part0 = Parts[i];
659 SDValue &Part1 = Parts[i+StepSize/2];
661 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
663 DAG.getConstant(1, PtrVT));
664 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
666 DAG.getConstant(0, PtrVT));
668 if (ThisBits == PartBits && ThisVT != PartVT) {
669 Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
671 Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
677 if (TLI.isBigEndian())
678 std::reverse(Parts, Parts + OrigNumParts);
685 if (PartVT != ValueVT) {
686 if (PartVT.isVector()) {
687 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
689 assert(ValueVT.getVectorElementType() == PartVT &&
690 ValueVT.getVectorNumElements() == 1 &&
691 "Only trivial vector-to-scalar conversions should get here!");
692 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
694 DAG.getConstant(0, PtrVT));
702 // Handle a multi-element vector.
703 EVT IntermediateVT, RegisterVT;
704 unsigned NumIntermediates;
705 unsigned NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT,
706 IntermediateVT, NumIntermediates, RegisterVT);
707 unsigned NumElements = ValueVT.getVectorNumElements();
709 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
710 NumParts = NumRegs; // Silence a compiler warning.
711 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
713 // Split the vector into intermediate operands.
714 SmallVector<SDValue, 8> Ops(NumIntermediates);
715 for (unsigned i = 0; i != NumIntermediates; ++i)
716 if (IntermediateVT.isVector())
717 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
719 DAG.getConstant(i * (NumElements / NumIntermediates),
722 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
724 DAG.getConstant(i, PtrVT));
726 // Split the intermediate operands into legal parts.
727 if (NumParts == NumIntermediates) {
728 // If the register was not expanded, promote or copy the value,
730 for (unsigned i = 0; i != NumParts; ++i)
731 getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
732 } else if (NumParts > 0) {
733 // If the intermediate type was expanded, split each the value into
735 assert(NumParts % NumIntermediates == 0 &&
736 "Must expand into a divisible number of parts!");
737 unsigned Factor = NumParts / NumIntermediates;
738 for (unsigned i = 0; i != NumIntermediates; ++i)
739 getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
744 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
747 TD = DAG.getTarget().getTargetData();
750 /// clear - Clear out the curret SelectionDAG and the associated
751 /// state and prepare this SelectionDAGLowering object to be used
752 /// for a new block. This doesn't clear out information about
753 /// additional blocks that are needed to complete switch lowering
754 /// or PHI node updating; that information is cleared out as it is
756 void SelectionDAGLowering::clear() {
758 PendingLoads.clear();
759 PendingExports.clear();
762 CurDebugLoc = DebugLoc::getUnknownLoc();
766 /// getRoot - Return the current virtual root of the Selection DAG,
767 /// flushing any PendingLoad items. This must be done before emitting
768 /// a store or any other node that may need to be ordered after any
769 /// prior load instructions.
771 SDValue SelectionDAGLowering::getRoot() {
772 if (PendingLoads.empty())
773 return DAG.getRoot();
775 if (PendingLoads.size() == 1) {
776 SDValue Root = PendingLoads[0];
778 PendingLoads.clear();
782 // Otherwise, we have to make a token factor node.
783 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
784 &PendingLoads[0], PendingLoads.size());
785 PendingLoads.clear();
790 /// getControlRoot - Similar to getRoot, but instead of flushing all the
791 /// PendingLoad items, flush all the PendingExports items. It is necessary
792 /// to do this before emitting a terminator instruction.
794 SDValue SelectionDAGLowering::getControlRoot() {
795 SDValue Root = DAG.getRoot();
797 if (PendingExports.empty())
800 // Turn all of the CopyToReg chains into one factored node.
801 if (Root.getOpcode() != ISD::EntryToken) {
802 unsigned i = 0, e = PendingExports.size();
803 for (; i != e; ++i) {
804 assert(PendingExports[i].getNode()->getNumOperands() > 1);
805 if (PendingExports[i].getNode()->getOperand(0) == Root)
806 break; // Don't add the root if we already indirectly depend on it.
810 PendingExports.push_back(Root);
813 Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
815 PendingExports.size());
816 PendingExports.clear();
821 void SelectionDAGLowering::visit(Instruction &I) {
822 visit(I.getOpcode(), I);
825 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
826 // Note: this doesn't use InstVisitor, because it has to work with
827 // ConstantExpr's in addition to instructions.
829 default: llvm_unreachable("Unknown instruction type encountered!");
830 // Build the switch statement using the Instruction.def file.
831 #define HANDLE_INST(NUM, OPCODE, CLASS) \
832 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
833 #include "llvm/Instruction.def"
837 SDValue SelectionDAGLowering::getValue(const Value *V) {
838 SDValue &N = NodeMap[V];
839 if (N.getNode()) return N;
841 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
842 EVT VT = TLI.getValueType(V->getType(), true);
844 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
845 return N = DAG.getConstant(*CI, VT);
847 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
848 return N = DAG.getGlobalAddress(GV, VT);
850 if (isa<ConstantPointerNull>(C))
851 return N = DAG.getConstant(0, TLI.getPointerTy());
853 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
854 return N = DAG.getConstantFP(*CFP, VT);
856 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
857 return N = DAG.getUNDEF(VT);
859 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
860 visit(CE->getOpcode(), *CE);
861 SDValue N1 = NodeMap[V];
862 assert(N1.getNode() && "visit didn't populate the ValueMap!");
866 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
867 SmallVector<SDValue, 4> Constants;
868 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
870 SDNode *Val = getValue(*OI).getNode();
871 // If the operand is an empty aggregate, there are no values.
873 // Add each leaf value from the operand to the Constants list
874 // to form a flattened list of all the values.
875 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
876 Constants.push_back(SDValue(Val, i));
878 return DAG.getMergeValues(&Constants[0], Constants.size(),
882 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
883 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
884 "Unknown struct or array constant!");
886 SmallVector<EVT, 4> ValueVTs;
887 ComputeValueVTs(TLI, C->getType(), ValueVTs);
888 unsigned NumElts = ValueVTs.size();
890 return SDValue(); // empty struct
891 SmallVector<SDValue, 4> Constants(NumElts);
892 for (unsigned i = 0; i != NumElts; ++i) {
893 EVT EltVT = ValueVTs[i];
894 if (isa<UndefValue>(C))
895 Constants[i] = DAG.getUNDEF(EltVT);
896 else if (EltVT.isFloatingPoint())
897 Constants[i] = DAG.getConstantFP(0, EltVT);
899 Constants[i] = DAG.getConstant(0, EltVT);
901 return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
904 if (BlockAddress *BA = dyn_cast<BlockAddress>(C))
905 return DAG.getBlockAddress(BA, getCurDebugLoc());
907 const VectorType *VecTy = cast<VectorType>(V->getType());
908 unsigned NumElements = VecTy->getNumElements();
910 // Now that we know the number and type of the elements, get that number of
911 // elements into the Ops array based on what kind of constant it is.
912 SmallVector<SDValue, 16> Ops;
913 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
914 for (unsigned i = 0; i != NumElements; ++i)
915 Ops.push_back(getValue(CP->getOperand(i)));
917 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
918 EVT EltVT = TLI.getValueType(VecTy->getElementType());
921 if (EltVT.isFloatingPoint())
922 Op = DAG.getConstantFP(0, EltVT);
924 Op = DAG.getConstant(0, EltVT);
925 Ops.assign(NumElements, Op);
928 // Create a BUILD_VECTOR node.
929 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
930 VT, &Ops[0], Ops.size());
933 // If this is a static alloca, generate it as the frameindex instead of
935 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
936 DenseMap<const AllocaInst*, int>::iterator SI =
937 FuncInfo.StaticAllocaMap.find(AI);
938 if (SI != FuncInfo.StaticAllocaMap.end())
939 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
942 unsigned InReg = FuncInfo.ValueMap[V];
943 assert(InReg && "Value not in map!");
945 RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType());
946 SDValue Chain = DAG.getEntryNode();
947 return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
951 void SelectionDAGLowering::visitRet(ReturnInst &I) {
952 SDValue Chain = getControlRoot();
953 SmallVector<ISD::OutputArg, 8> Outs;
954 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
955 SmallVector<EVT, 4> ValueVTs;
956 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
957 unsigned NumValues = ValueVTs.size();
958 if (NumValues == 0) continue;
960 SDValue RetOp = getValue(I.getOperand(i));
961 for (unsigned j = 0, f = NumValues; j != f; ++j) {
962 EVT VT = ValueVTs[j];
964 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
966 const Function *F = I.getParent()->getParent();
967 if (F->paramHasAttr(0, Attribute::SExt))
968 ExtendKind = ISD::SIGN_EXTEND;
969 else if (F->paramHasAttr(0, Attribute::ZExt))
970 ExtendKind = ISD::ZERO_EXTEND;
972 // FIXME: C calling convention requires the return type to be promoted to
973 // at least 32-bit. But this is not necessary for non-C calling
974 // conventions. The frontend should mark functions whose return values
975 // require promoting with signext or zeroext attributes.
976 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
977 EVT MinVT = TLI.getRegisterType(*DAG.getContext(), MVT::i32);
978 if (VT.bitsLT(MinVT))
982 unsigned NumParts = TLI.getNumRegisters(*DAG.getContext(), VT);
983 EVT PartVT = TLI.getRegisterType(*DAG.getContext(), VT);
984 SmallVector<SDValue, 4> Parts(NumParts);
985 getCopyToParts(DAG, getCurDebugLoc(),
986 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
987 &Parts[0], NumParts, PartVT, ExtendKind);
989 // 'inreg' on function refers to return value
990 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
991 if (F->paramHasAttr(0, Attribute::InReg))
994 // Propagate extension type if any
995 if (F->paramHasAttr(0, Attribute::SExt))
997 else if (F->paramHasAttr(0, Attribute::ZExt))
1000 for (unsigned i = 0; i < NumParts; ++i)
1001 Outs.push_back(ISD::OutputArg(Flags, Parts[i], /*isfixed=*/true));
1005 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1006 CallingConv::ID CallConv =
1007 DAG.getMachineFunction().getFunction()->getCallingConv();
1008 Chain = TLI.LowerReturn(Chain, CallConv, isVarArg,
1009 Outs, getCurDebugLoc(), DAG);
1011 // Verify that the target's LowerReturn behaved as expected.
1012 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1013 "LowerReturn didn't return a valid chain!");
1015 // Update the DAG with the new chain value resulting from return lowering.
1019 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1020 /// created for it, emit nodes to copy the value into the virtual
1022 void SelectionDAGLowering::CopyToExportRegsIfNeeded(Value *V) {
1023 if (!V->use_empty()) {
1024 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1025 if (VMI != FuncInfo.ValueMap.end())
1026 CopyValueToVirtualRegister(V, VMI->second);
1030 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1031 /// the current basic block, add it to ValueMap now so that we'll get a
1033 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
1034 // No need to export constants.
1035 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1037 // Already exported?
1038 if (FuncInfo.isExportedInst(V)) return;
1040 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1041 CopyValueToVirtualRegister(V, Reg);
1044 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
1045 const BasicBlock *FromBB) {
1046 // The operands of the setcc have to be in this block. We don't know
1047 // how to export them from some other block.
1048 if (Instruction *VI = dyn_cast<Instruction>(V)) {
1049 // Can export from current BB.
1050 if (VI->getParent() == FromBB)
1053 // Is already exported, noop.
1054 return FuncInfo.isExportedInst(V);
1057 // If this is an argument, we can export it if the BB is the entry block or
1058 // if it is already exported.
1059 if (isa<Argument>(V)) {
1060 if (FromBB == &FromBB->getParent()->getEntryBlock())
1063 // Otherwise, can only export this if it is already exported.
1064 return FuncInfo.isExportedInst(V);
1067 // Otherwise, constants can always be exported.
1071 static bool InBlock(const Value *V, const BasicBlock *BB) {
1072 if (const Instruction *I = dyn_cast<Instruction>(V))
1073 return I->getParent() == BB;
1077 /// getFCmpCondCode - Return the ISD condition code corresponding to
1078 /// the given LLVM IR floating-point condition code. This includes
1079 /// consideration of global floating-point math flags.
1081 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1082 ISD::CondCode FPC, FOC;
1084 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1085 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1086 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1087 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1088 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1089 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1090 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1091 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1092 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1093 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1094 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1095 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1096 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1097 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1098 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1099 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1101 llvm_unreachable("Invalid FCmp predicate opcode!");
1102 FOC = FPC = ISD::SETFALSE;
1105 if (FiniteOnlyFPMath())
1111 /// getICmpCondCode - Return the ISD condition code corresponding to
1112 /// the given LLVM IR integer condition code.
1114 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1116 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1117 case ICmpInst::ICMP_NE: return ISD::SETNE;
1118 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1119 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1120 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1121 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1122 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1123 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1124 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1125 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1127 llvm_unreachable("Invalid ICmp predicate opcode!");
1132 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1133 /// This function emits a branch and is used at the leaves of an OR or an
1134 /// AND operator tree.
1137 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1138 MachineBasicBlock *TBB,
1139 MachineBasicBlock *FBB,
1140 MachineBasicBlock *CurBB) {
1141 const BasicBlock *BB = CurBB->getBasicBlock();
1143 // If the leaf of the tree is a comparison, merge the condition into
1145 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1146 // The operands of the cmp have to be in this block. We don't know
1147 // how to export them from some other block. If this is the first block
1148 // of the sequence, no exporting is needed.
1149 if (CurBB == CurMBB ||
1150 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1151 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1152 ISD::CondCode Condition;
1153 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1154 Condition = getICmpCondCode(IC->getPredicate());
1155 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1156 Condition = getFCmpCondCode(FC->getPredicate());
1158 Condition = ISD::SETEQ; // silence warning.
1159 llvm_unreachable("Unknown compare instruction");
1162 CaseBlock CB(Condition, BOp->getOperand(0),
1163 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1164 SwitchCases.push_back(CB);
1169 // Create a CaseBlock record representing this branch.
1170 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(*DAG.getContext()),
1171 NULL, TBB, FBB, CurBB);
1172 SwitchCases.push_back(CB);
1175 /// FindMergedConditions - If Cond is an expression like
1176 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1177 MachineBasicBlock *TBB,
1178 MachineBasicBlock *FBB,
1179 MachineBasicBlock *CurBB,
1181 // If this node is not part of the or/and tree, emit it as a branch.
1182 Instruction *BOp = dyn_cast<Instruction>(Cond);
1183 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1184 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1185 BOp->getParent() != CurBB->getBasicBlock() ||
1186 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1187 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1188 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1192 // Create TmpBB after CurBB.
1193 MachineFunction::iterator BBI = CurBB;
1194 MachineFunction &MF = DAG.getMachineFunction();
1195 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1196 CurBB->getParent()->insert(++BBI, TmpBB);
1198 if (Opc == Instruction::Or) {
1199 // Codegen X | Y as:
1207 // Emit the LHS condition.
1208 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1210 // Emit the RHS condition into TmpBB.
1211 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1213 assert(Opc == Instruction::And && "Unknown merge op!");
1214 // Codegen X & Y as:
1221 // This requires creation of TmpBB after CurBB.
1223 // Emit the LHS condition.
1224 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1226 // Emit the RHS condition into TmpBB.
1227 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1231 /// If the set of cases should be emitted as a series of branches, return true.
1232 /// If we should emit this as a bunch of and/or'd together conditions, return
1235 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1236 if (Cases.size() != 2) return true;
1238 // If this is two comparisons of the same values or'd or and'd together, they
1239 // will get folded into a single comparison, so don't emit two blocks.
1240 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1241 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1242 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1243 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1250 void SelectionDAGLowering::visitBr(BranchInst &I) {
1251 // Update machine-CFG edges.
1252 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1254 // Figure out which block is immediately after the current one.
1255 MachineBasicBlock *NextBlock = 0;
1256 MachineFunction::iterator BBI = CurMBB;
1257 if (++BBI != FuncInfo.MF->end())
1260 if (I.isUnconditional()) {
1261 // Update machine-CFG edges.
1262 CurMBB->addSuccessor(Succ0MBB);
1264 // If this is not a fall-through branch, emit the branch.
1265 if (Succ0MBB != NextBlock)
1266 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1267 MVT::Other, getControlRoot(),
1268 DAG.getBasicBlock(Succ0MBB)));
1272 // If this condition is one of the special cases we handle, do special stuff
1274 Value *CondVal = I.getCondition();
1275 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1277 // If this is a series of conditions that are or'd or and'd together, emit
1278 // this as a sequence of branches instead of setcc's with and/or operations.
1279 // For example, instead of something like:
1292 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1293 if (BOp->hasOneUse() &&
1294 (BOp->getOpcode() == Instruction::And ||
1295 BOp->getOpcode() == Instruction::Or)) {
1296 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1297 // If the compares in later blocks need to use values not currently
1298 // exported from this block, export them now. This block should always
1299 // be the first entry.
1300 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1302 // Allow some cases to be rejected.
1303 if (ShouldEmitAsBranches(SwitchCases)) {
1304 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1305 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1306 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1309 // Emit the branch for this block.
1310 visitSwitchCase(SwitchCases[0]);
1311 SwitchCases.erase(SwitchCases.begin());
1315 // Okay, we decided not to do this, remove any inserted MBB's and clear
1317 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1318 FuncInfo.MF->erase(SwitchCases[i].ThisBB);
1320 SwitchCases.clear();
1324 // Create a CaseBlock record representing this branch.
1325 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
1326 NULL, Succ0MBB, Succ1MBB, CurMBB);
1327 // Use visitSwitchCase to actually insert the fast branch sequence for this
1329 visitSwitchCase(CB);
1332 /// visitSwitchCase - Emits the necessary code to represent a single node in
1333 /// the binary search tree resulting from lowering a switch instruction.
1334 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1336 SDValue CondLHS = getValue(CB.CmpLHS);
1337 DebugLoc dl = getCurDebugLoc();
1339 // Build the setcc now.
1340 if (CB.CmpMHS == NULL) {
1341 // Fold "(X == true)" to X and "(X == false)" to !X to
1342 // handle common cases produced by branch lowering.
1343 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
1344 CB.CC == ISD::SETEQ)
1346 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
1347 CB.CC == ISD::SETEQ) {
1348 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1349 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1351 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1353 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1355 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1356 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1358 SDValue CmpOp = getValue(CB.CmpMHS);
1359 EVT VT = CmpOp.getValueType();
1361 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1362 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1365 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1366 VT, CmpOp, DAG.getConstant(Low, VT));
1367 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1368 DAG.getConstant(High-Low, VT), ISD::SETULE);
1372 // Update successor info
1373 CurMBB->addSuccessor(CB.TrueBB);
1374 CurMBB->addSuccessor(CB.FalseBB);
1376 // Set NextBlock to be the MBB immediately after the current one, if any.
1377 // This is used to avoid emitting unnecessary branches to the next block.
1378 MachineBasicBlock *NextBlock = 0;
1379 MachineFunction::iterator BBI = CurMBB;
1380 if (++BBI != FuncInfo.MF->end())
1383 // If the lhs block is the next block, invert the condition so that we can
1384 // fall through to the lhs instead of the rhs block.
1385 if (CB.TrueBB == NextBlock) {
1386 std::swap(CB.TrueBB, CB.FalseBB);
1387 SDValue True = DAG.getConstant(1, Cond.getValueType());
1388 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1390 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1391 MVT::Other, getControlRoot(), Cond,
1392 DAG.getBasicBlock(CB.TrueBB));
1394 // If the branch was constant folded, fix up the CFG.
1395 if (BrCond.getOpcode() == ISD::BR) {
1396 CurMBB->removeSuccessor(CB.FalseBB);
1397 DAG.setRoot(BrCond);
1399 // Otherwise, go ahead and insert the false branch.
1400 if (BrCond == getControlRoot())
1401 CurMBB->removeSuccessor(CB.TrueBB);
1403 if (CB.FalseBB == NextBlock)
1404 DAG.setRoot(BrCond);
1406 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1407 DAG.getBasicBlock(CB.FalseBB)));
1411 /// visitJumpTable - Emit JumpTable node in the current MBB
1412 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1413 // Emit the code for the jump table
1414 assert(JT.Reg != -1U && "Should lower JT Header first!");
1415 EVT PTy = TLI.getPointerTy();
1416 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1418 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1419 DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1420 MVT::Other, Index.getValue(1),
1424 /// visitJumpTableHeader - This function emits necessary code to produce index
1425 /// in the JumpTable from switch case.
1426 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1427 JumpTableHeader &JTH) {
1428 // Subtract the lowest switch case value from the value being switched on and
1429 // conditional branch to default mbb if the result is greater than the
1430 // difference between smallest and largest cases.
1431 SDValue SwitchOp = getValue(JTH.SValue);
1432 EVT VT = SwitchOp.getValueType();
1433 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1434 DAG.getConstant(JTH.First, VT));
1436 // The SDNode we just created, which holds the value being switched on minus
1437 // the the smallest case value, needs to be copied to a virtual register so it
1438 // can be used as an index into the jump table in a subsequent basic block.
1439 // This value may be smaller or larger than the target's pointer type, and
1440 // therefore require extension or truncating.
1441 SwitchOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy());
1443 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1444 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1445 JumpTableReg, SwitchOp);
1446 JT.Reg = JumpTableReg;
1448 // Emit the range check for the jump table, and branch to the default block
1449 // for the switch statement if the value being switched on exceeds the largest
1450 // case in the switch.
1451 SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1452 TLI.getSetCCResultType(SUB.getValueType()), SUB,
1453 DAG.getConstant(JTH.Last-JTH.First,VT),
1456 // Set NextBlock to be the MBB immediately after the current one, if any.
1457 // This is used to avoid emitting unnecessary branches to the next block.
1458 MachineBasicBlock *NextBlock = 0;
1459 MachineFunction::iterator BBI = CurMBB;
1460 if (++BBI != FuncInfo.MF->end())
1463 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1464 MVT::Other, CopyTo, CMP,
1465 DAG.getBasicBlock(JT.Default));
1467 if (JT.MBB == NextBlock)
1468 DAG.setRoot(BrCond);
1470 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1471 DAG.getBasicBlock(JT.MBB)));
1474 /// visitBitTestHeader - This function emits necessary code to produce value
1475 /// suitable for "bit tests"
1476 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1477 // Subtract the minimum value
1478 SDValue SwitchOp = getValue(B.SValue);
1479 EVT VT = SwitchOp.getValueType();
1480 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1481 DAG.getConstant(B.First, VT));
1484 SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1485 TLI.getSetCCResultType(SUB.getValueType()),
1486 SUB, DAG.getConstant(B.Range, VT),
1489 SDValue ShiftOp = DAG.getZExtOrTrunc(SUB, getCurDebugLoc(), TLI.getPointerTy());
1491 B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1492 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1495 // Set NextBlock to be the MBB immediately after the current one, if any.
1496 // This is used to avoid emitting unnecessary branches to the next block.
1497 MachineBasicBlock *NextBlock = 0;
1498 MachineFunction::iterator BBI = CurMBB;
1499 if (++BBI != FuncInfo.MF->end())
1502 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1504 CurMBB->addSuccessor(B.Default);
1505 CurMBB->addSuccessor(MBB);
1507 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1508 MVT::Other, CopyTo, RangeCmp,
1509 DAG.getBasicBlock(B.Default));
1511 if (MBB == NextBlock)
1512 DAG.setRoot(BrRange);
1514 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1515 DAG.getBasicBlock(MBB)));
1518 /// visitBitTestCase - this function produces one "bit test"
1519 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1522 // Make desired shift
1523 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1524 TLI.getPointerTy());
1525 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1527 DAG.getConstant(1, TLI.getPointerTy()),
1530 // Emit bit tests and jumps
1531 SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1532 TLI.getPointerTy(), SwitchVal,
1533 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1534 SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1535 TLI.getSetCCResultType(AndOp.getValueType()),
1536 AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1539 CurMBB->addSuccessor(B.TargetBB);
1540 CurMBB->addSuccessor(NextMBB);
1542 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1543 MVT::Other, getControlRoot(),
1544 AndCmp, DAG.getBasicBlock(B.TargetBB));
1546 // Set NextBlock to be the MBB immediately after the current one, if any.
1547 // This is used to avoid emitting unnecessary branches to the next block.
1548 MachineBasicBlock *NextBlock = 0;
1549 MachineFunction::iterator BBI = CurMBB;
1550 if (++BBI != FuncInfo.MF->end())
1553 if (NextMBB == NextBlock)
1556 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1557 DAG.getBasicBlock(NextMBB)));
1560 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1561 // Retrieve successors.
1562 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1563 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1565 const Value *Callee(I.getCalledValue());
1566 if (isa<InlineAsm>(Callee))
1569 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1571 // If the value of the invoke is used outside of its defining block, make it
1572 // available as a virtual register.
1573 CopyToExportRegsIfNeeded(&I);
1575 // Update successor info
1576 CurMBB->addSuccessor(Return);
1577 CurMBB->addSuccessor(LandingPad);
1579 // Drop into normal successor.
1580 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1581 MVT::Other, getControlRoot(),
1582 DAG.getBasicBlock(Return)));
1585 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1588 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1589 /// small case ranges).
1590 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1591 CaseRecVector& WorkList,
1593 MachineBasicBlock* Default) {
1594 Case& BackCase = *(CR.Range.second-1);
1596 // Size is the number of Cases represented by this range.
1597 size_t Size = CR.Range.second - CR.Range.first;
1601 // Get the MachineFunction which holds the current MBB. This is used when
1602 // inserting any additional MBBs necessary to represent the switch.
1603 MachineFunction *CurMF = FuncInfo.MF;
1605 // Figure out which block is immediately after the current one.
1606 MachineBasicBlock *NextBlock = 0;
1607 MachineFunction::iterator BBI = CR.CaseBB;
1609 if (++BBI != FuncInfo.MF->end())
1612 // TODO: If any two of the cases has the same destination, and if one value
1613 // is the same as the other, but has one bit unset that the other has set,
1614 // use bit manipulation to do two compares at once. For example:
1615 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1617 // Rearrange the case blocks so that the last one falls through if possible.
1618 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1619 // The last case block won't fall through into 'NextBlock' if we emit the
1620 // branches in this order. See if rearranging a case value would help.
1621 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1622 if (I->BB == NextBlock) {
1623 std::swap(*I, BackCase);
1629 // Create a CaseBlock record representing a conditional branch to
1630 // the Case's target mbb if the value being switched on SV is equal
1632 MachineBasicBlock *CurBlock = CR.CaseBB;
1633 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1634 MachineBasicBlock *FallThrough;
1636 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1637 CurMF->insert(BBI, FallThrough);
1639 // Put SV in a virtual register to make it available from the new blocks.
1640 ExportFromCurrentBlock(SV);
1642 // If the last case doesn't match, go to the default block.
1643 FallThrough = Default;
1646 Value *RHS, *LHS, *MHS;
1648 if (I->High == I->Low) {
1649 // This is just small small case range :) containing exactly 1 case
1651 LHS = SV; RHS = I->High; MHS = NULL;
1654 LHS = I->Low; MHS = SV; RHS = I->High;
1656 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1658 // If emitting the first comparison, just call visitSwitchCase to emit the
1659 // code into the current block. Otherwise, push the CaseBlock onto the
1660 // vector to be later processed by SDISel, and insert the node's MBB
1661 // before the next MBB.
1662 if (CurBlock == CurMBB)
1663 visitSwitchCase(CB);
1665 SwitchCases.push_back(CB);
1667 CurBlock = FallThrough;
1673 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1674 return !DisableJumpTables &&
1675 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1676 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1679 static APInt ComputeRange(const APInt &First, const APInt &Last) {
1680 APInt LastExt(Last), FirstExt(First);
1681 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1682 LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1683 return (LastExt - FirstExt + 1ULL);
1686 /// handleJTSwitchCase - Emit jumptable for current switch case range
1687 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1688 CaseRecVector& WorkList,
1690 MachineBasicBlock* Default) {
1691 Case& FrontCase = *CR.Range.first;
1692 Case& BackCase = *(CR.Range.second-1);
1694 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1695 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1698 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1702 if (!areJTsAllowed(TLI) || TSize <= 3)
1705 APInt Range = ComputeRange(First, Last);
1706 double Density = (double)TSize / Range.roundToDouble();
1710 DEBUG(errs() << "Lowering jump table\n"
1711 << "First entry: " << First << ". Last entry: " << Last << '\n'
1712 << "Range: " << Range
1713 << "Size: " << TSize << ". Density: " << Density << "\n\n");
1715 // Get the MachineFunction which holds the current MBB. This is used when
1716 // inserting any additional MBBs necessary to represent the switch.
1717 MachineFunction *CurMF = FuncInfo.MF;
1719 // Figure out which block is immediately after the current one.
1720 MachineFunction::iterator BBI = CR.CaseBB;
1723 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1725 // Create a new basic block to hold the code for loading the address
1726 // of the jump table, and jumping to it. Update successor information;
1727 // we will either branch to the default case for the switch, or the jump
1729 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1730 CurMF->insert(BBI, JumpTableBB);
1731 CR.CaseBB->addSuccessor(Default);
1732 CR.CaseBB->addSuccessor(JumpTableBB);
1734 // Build a vector of destination BBs, corresponding to each target
1735 // of the jump table. If the value of the jump table slot corresponds to
1736 // a case statement, push the case's BB onto the vector, otherwise, push
1738 std::vector<MachineBasicBlock*> DestBBs;
1740 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1741 const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1742 const APInt& High = cast<ConstantInt>(I->High)->getValue();
1744 if (Low.sle(TEI) && TEI.sle(High)) {
1745 DestBBs.push_back(I->BB);
1749 DestBBs.push_back(Default);
1753 // Update successor info. Add one edge to each unique successor.
1754 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1755 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1756 E = DestBBs.end(); I != E; ++I) {
1757 if (!SuccsHandled[(*I)->getNumber()]) {
1758 SuccsHandled[(*I)->getNumber()] = true;
1759 JumpTableBB->addSuccessor(*I);
1763 // Create a jump table index for this jump table, or return an existing
1765 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1767 // Set the jump table information so that we can codegen it as a second
1768 // MachineBasicBlock
1769 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1770 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1771 if (CR.CaseBB == CurMBB)
1772 visitJumpTableHeader(JT, JTH);
1774 JTCases.push_back(JumpTableBlock(JTH, JT));
1779 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1781 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1782 CaseRecVector& WorkList,
1784 MachineBasicBlock* Default) {
1785 // Get the MachineFunction which holds the current MBB. This is used when
1786 // inserting any additional MBBs necessary to represent the switch.
1787 MachineFunction *CurMF = FuncInfo.MF;
1789 // Figure out which block is immediately after the current one.
1790 MachineFunction::iterator BBI = CR.CaseBB;
1793 Case& FrontCase = *CR.Range.first;
1794 Case& BackCase = *(CR.Range.second-1);
1795 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1797 // Size is the number of Cases represented by this range.
1798 unsigned Size = CR.Range.second - CR.Range.first;
1800 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1801 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1803 CaseItr Pivot = CR.Range.first + Size/2;
1805 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1806 // (heuristically) allow us to emit JumpTable's later.
1808 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1812 size_t LSize = FrontCase.size();
1813 size_t RSize = TSize-LSize;
1814 DEBUG(errs() << "Selecting best pivot: \n"
1815 << "First: " << First << ", Last: " << Last <<'\n'
1816 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1817 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1819 const APInt& LEnd = cast<ConstantInt>(I->High)->getValue();
1820 const APInt& RBegin = cast<ConstantInt>(J->Low)->getValue();
1821 APInt Range = ComputeRange(LEnd, RBegin);
1822 assert((Range - 2ULL).isNonNegative() &&
1823 "Invalid case distance");
1824 double LDensity = (double)LSize / (LEnd - First + 1ULL).roundToDouble();
1825 double RDensity = (double)RSize / (Last - RBegin + 1ULL).roundToDouble();
1826 double Metric = Range.logBase2()*(LDensity+RDensity);
1827 // Should always split in some non-trivial place
1828 DEBUG(errs() <<"=>Step\n"
1829 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1830 << "LDensity: " << LDensity
1831 << ", RDensity: " << RDensity << '\n'
1832 << "Metric: " << Metric << '\n');
1833 if (FMetric < Metric) {
1836 DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1842 if (areJTsAllowed(TLI)) {
1843 // If our case is dense we *really* should handle it earlier!
1844 assert((FMetric > 0) && "Should handle dense range earlier!");
1846 Pivot = CR.Range.first + Size/2;
1849 CaseRange LHSR(CR.Range.first, Pivot);
1850 CaseRange RHSR(Pivot, CR.Range.second);
1851 Constant *C = Pivot->Low;
1852 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1854 // We know that we branch to the LHS if the Value being switched on is
1855 // less than the Pivot value, C. We use this to optimize our binary
1856 // tree a bit, by recognizing that if SV is greater than or equal to the
1857 // LHS's Case Value, and that Case Value is exactly one less than the
1858 // Pivot's Value, then we can branch directly to the LHS's Target,
1859 // rather than creating a leaf node for it.
1860 if ((LHSR.second - LHSR.first) == 1 &&
1861 LHSR.first->High == CR.GE &&
1862 cast<ConstantInt>(C)->getValue() ==
1863 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1864 TrueBB = LHSR.first->BB;
1866 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1867 CurMF->insert(BBI, TrueBB);
1868 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1870 // Put SV in a virtual register to make it available from the new blocks.
1871 ExportFromCurrentBlock(SV);
1874 // Similar to the optimization above, if the Value being switched on is
1875 // known to be less than the Constant CR.LT, and the current Case Value
1876 // is CR.LT - 1, then we can branch directly to the target block for
1877 // the current Case Value, rather than emitting a RHS leaf node for it.
1878 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1879 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1880 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1881 FalseBB = RHSR.first->BB;
1883 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1884 CurMF->insert(BBI, FalseBB);
1885 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1887 // Put SV in a virtual register to make it available from the new blocks.
1888 ExportFromCurrentBlock(SV);
1891 // Create a CaseBlock record representing a conditional branch to
1892 // the LHS node if the value being switched on SV is less than C.
1893 // Otherwise, branch to LHS.
1894 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1896 if (CR.CaseBB == CurMBB)
1897 visitSwitchCase(CB);
1899 SwitchCases.push_back(CB);
1904 /// handleBitTestsSwitchCase - if current case range has few destination and
1905 /// range span less, than machine word bitwidth, encode case range into series
1906 /// of masks and emit bit tests with these masks.
1907 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1908 CaseRecVector& WorkList,
1910 MachineBasicBlock* Default){
1911 EVT PTy = TLI.getPointerTy();
1912 unsigned IntPtrBits = PTy.getSizeInBits();
1914 Case& FrontCase = *CR.Range.first;
1915 Case& BackCase = *(CR.Range.second-1);
1917 // Get the MachineFunction which holds the current MBB. This is used when
1918 // inserting any additional MBBs necessary to represent the switch.
1919 MachineFunction *CurMF = FuncInfo.MF;
1921 // If target does not have legal shift left, do not emit bit tests at all.
1922 if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1926 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1928 // Single case counts one, case range - two.
1929 numCmps += (I->Low == I->High ? 1 : 2);
1932 // Count unique destinations
1933 SmallSet<MachineBasicBlock*, 4> Dests;
1934 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1935 Dests.insert(I->BB);
1936 if (Dests.size() > 3)
1937 // Don't bother the code below, if there are too much unique destinations
1940 DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1941 << "Total number of comparisons: " << numCmps << '\n');
1943 // Compute span of values.
1944 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1945 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1946 APInt cmpRange = maxValue - minValue;
1948 DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1949 << "Low bound: " << minValue << '\n'
1950 << "High bound: " << maxValue << '\n');
1952 if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1953 (!(Dests.size() == 1 && numCmps >= 3) &&
1954 !(Dests.size() == 2 && numCmps >= 5) &&
1955 !(Dests.size() >= 3 && numCmps >= 6)))
1958 DEBUG(errs() << "Emitting bit tests\n");
1959 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1961 // Optimize the case where all the case values fit in a
1962 // word without having to subtract minValue. In this case,
1963 // we can optimize away the subtraction.
1964 if (minValue.isNonNegative() &&
1965 maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1966 cmpRange = maxValue;
1968 lowBound = minValue;
1971 CaseBitsVector CasesBits;
1972 unsigned i, count = 0;
1974 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1975 MachineBasicBlock* Dest = I->BB;
1976 for (i = 0; i < count; ++i)
1977 if (Dest == CasesBits[i].BB)
1981 assert((count < 3) && "Too much destinations to test!");
1982 CasesBits.push_back(CaseBits(0, Dest, 0));
1986 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1987 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1989 uint64_t lo = (lowValue - lowBound).getZExtValue();
1990 uint64_t hi = (highValue - lowBound).getZExtValue();
1992 for (uint64_t j = lo; j <= hi; j++) {
1993 CasesBits[i].Mask |= 1ULL << j;
1994 CasesBits[i].Bits++;
1998 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2002 // Figure out which block is immediately after the current one.
2003 MachineFunction::iterator BBI = CR.CaseBB;
2006 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2008 DEBUG(errs() << "Cases:\n");
2009 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2010 DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2011 << ", Bits: " << CasesBits[i].Bits
2012 << ", BB: " << CasesBits[i].BB << '\n');
2014 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2015 CurMF->insert(BBI, CaseBB);
2016 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2020 // Put SV in a virtual register to make it available from the new blocks.
2021 ExportFromCurrentBlock(SV);
2024 BitTestBlock BTB(lowBound, cmpRange, SV,
2025 -1U, (CR.CaseBB == CurMBB),
2026 CR.CaseBB, Default, BTC);
2028 if (CR.CaseBB == CurMBB)
2029 visitBitTestHeader(BTB);
2031 BitTestCases.push_back(BTB);
2037 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2038 size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
2039 const SwitchInst& SI) {
2042 // Start with "simple" cases
2043 for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2044 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2045 Cases.push_back(Case(SI.getSuccessorValue(i),
2046 SI.getSuccessorValue(i),
2049 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2051 // Merge case into clusters
2052 if (Cases.size() >= 2)
2053 // Must recompute end() each iteration because it may be
2054 // invalidated by erase if we hold on to it
2055 for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2056 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2057 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2058 MachineBasicBlock* nextBB = J->BB;
2059 MachineBasicBlock* currentBB = I->BB;
2061 // If the two neighboring cases go to the same destination, merge them
2062 // into a single case.
2063 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2071 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2072 if (I->Low != I->High)
2073 // A range counts double, since it requires two compares.
2080 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
2081 // Figure out which block is immediately after the current one.
2082 MachineBasicBlock *NextBlock = 0;
2084 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2086 // If there is only the default destination, branch to it if it is not the
2087 // next basic block. Otherwise, just fall through.
2088 if (SI.getNumOperands() == 2) {
2089 // Update machine-CFG edges.
2091 // If this is not a fall-through branch, emit the branch.
2092 CurMBB->addSuccessor(Default);
2093 if (Default != NextBlock)
2094 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2095 MVT::Other, getControlRoot(),
2096 DAG.getBasicBlock(Default)));
2100 // If there are any non-default case statements, create a vector of Cases
2101 // representing each one, and sort the vector so that we can efficiently
2102 // create a binary search tree from them.
2104 size_t numCmps = Clusterify(Cases, SI);
2105 DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2106 << ". Total compares: " << numCmps << '\n');
2109 // Get the Value to be switched on and default basic blocks, which will be
2110 // inserted into CaseBlock records, representing basic blocks in the binary
2112 Value *SV = SI.getOperand(0);
2114 // Push the initial CaseRec onto the worklist
2115 CaseRecVector WorkList;
2116 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2118 while (!WorkList.empty()) {
2119 // Grab a record representing a case range to process off the worklist
2120 CaseRec CR = WorkList.back();
2121 WorkList.pop_back();
2123 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2126 // If the range has few cases (two or less) emit a series of specific
2128 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2131 // If the switch has more than 5 blocks, and at least 40% dense, and the
2132 // target supports indirect branches, then emit a jump table rather than
2133 // lowering the switch to a binary tree of conditional branches.
2134 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2137 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2138 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2139 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2143 void SelectionDAGLowering::visitIndirectBr(IndirectBrInst &I) {
2144 // Update machine-CFG edges.
2145 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i)
2146 CurMBB->addSuccessor(FuncInfo.MBBMap[I.getSuccessor(i)]);
2148 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurDebugLoc(),
2149 MVT::Other, getControlRoot(),
2150 getValue(I.getAddress())));
2154 void SelectionDAGLowering::visitFSub(User &I) {
2155 // -0.0 - X --> fneg
2156 const Type *Ty = I.getType();
2157 if (isa<VectorType>(Ty)) {
2158 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2159 const VectorType *DestTy = cast<VectorType>(I.getType());
2160 const Type *ElTy = DestTy->getElementType();
2161 unsigned VL = DestTy->getNumElements();
2162 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2163 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2165 SDValue Op2 = getValue(I.getOperand(1));
2166 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2167 Op2.getValueType(), Op2));
2172 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2173 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2174 SDValue Op2 = getValue(I.getOperand(1));
2175 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2176 Op2.getValueType(), Op2));
2180 visitBinary(I, ISD::FSUB);
2183 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2184 SDValue Op1 = getValue(I.getOperand(0));
2185 SDValue Op2 = getValue(I.getOperand(1));
2187 setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2188 Op1.getValueType(), Op1, Op2));
2191 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2192 SDValue Op1 = getValue(I.getOperand(0));
2193 SDValue Op2 = getValue(I.getOperand(1));
2194 if (!isa<VectorType>(I.getType()) &&
2195 Op2.getValueType() != TLI.getShiftAmountTy()) {
2196 // If the operand is smaller than the shift count type, promote it.
2197 EVT PTy = TLI.getPointerTy();
2198 EVT STy = TLI.getShiftAmountTy();
2199 if (STy.bitsGT(Op2.getValueType()))
2200 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2201 TLI.getShiftAmountTy(), Op2);
2202 // If the operand is larger than the shift count type but the shift
2203 // count type has enough bits to represent any shift value, truncate
2204 // it now. This is a common case and it exposes the truncate to
2205 // optimization early.
2206 else if (STy.getSizeInBits() >=
2207 Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2208 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2209 TLI.getShiftAmountTy(), Op2);
2210 // Otherwise we'll need to temporarily settle for some other
2211 // convenient type; type legalization will make adjustments as
2213 else if (PTy.bitsLT(Op2.getValueType()))
2214 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2215 TLI.getPointerTy(), Op2);
2216 else if (PTy.bitsGT(Op2.getValueType()))
2217 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2218 TLI.getPointerTy(), Op2);
2221 setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2222 Op1.getValueType(), Op1, Op2));
2225 void SelectionDAGLowering::visitICmp(User &I) {
2226 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2227 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2228 predicate = IC->getPredicate();
2229 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2230 predicate = ICmpInst::Predicate(IC->getPredicate());
2231 SDValue Op1 = getValue(I.getOperand(0));
2232 SDValue Op2 = getValue(I.getOperand(1));
2233 ISD::CondCode Opcode = getICmpCondCode(predicate);
2235 EVT DestVT = TLI.getValueType(I.getType());
2236 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
2239 void SelectionDAGLowering::visitFCmp(User &I) {
2240 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2241 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2242 predicate = FC->getPredicate();
2243 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2244 predicate = FCmpInst::Predicate(FC->getPredicate());
2245 SDValue Op1 = getValue(I.getOperand(0));
2246 SDValue Op2 = getValue(I.getOperand(1));
2247 ISD::CondCode Condition = getFCmpCondCode(predicate);
2248 EVT DestVT = TLI.getValueType(I.getType());
2249 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2252 void SelectionDAGLowering::visitSelect(User &I) {
2253 SmallVector<EVT, 4> ValueVTs;
2254 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2255 unsigned NumValues = ValueVTs.size();
2256 if (NumValues != 0) {
2257 SmallVector<SDValue, 4> Values(NumValues);
2258 SDValue Cond = getValue(I.getOperand(0));
2259 SDValue TrueVal = getValue(I.getOperand(1));
2260 SDValue FalseVal = getValue(I.getOperand(2));
2262 for (unsigned i = 0; i != NumValues; ++i)
2263 Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2264 TrueVal.getValueType(), Cond,
2265 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2266 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2268 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2269 DAG.getVTList(&ValueVTs[0], NumValues),
2270 &Values[0], NumValues));
2275 void SelectionDAGLowering::visitTrunc(User &I) {
2276 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2277 SDValue N = getValue(I.getOperand(0));
2278 EVT DestVT = TLI.getValueType(I.getType());
2279 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2282 void SelectionDAGLowering::visitZExt(User &I) {
2283 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2284 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2285 SDValue N = getValue(I.getOperand(0));
2286 EVT DestVT = TLI.getValueType(I.getType());
2287 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2290 void SelectionDAGLowering::visitSExt(User &I) {
2291 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2292 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2293 SDValue N = getValue(I.getOperand(0));
2294 EVT DestVT = TLI.getValueType(I.getType());
2295 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2298 void SelectionDAGLowering::visitFPTrunc(User &I) {
2299 // FPTrunc is never a no-op cast, no need to check
2300 SDValue N = getValue(I.getOperand(0));
2301 EVT DestVT = TLI.getValueType(I.getType());
2302 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2303 DestVT, N, DAG.getIntPtrConstant(0)));
2306 void SelectionDAGLowering::visitFPExt(User &I){
2307 // FPTrunc is never a no-op cast, no need to check
2308 SDValue N = getValue(I.getOperand(0));
2309 EVT DestVT = TLI.getValueType(I.getType());
2310 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2313 void SelectionDAGLowering::visitFPToUI(User &I) {
2314 // FPToUI is never a no-op cast, no need to check
2315 SDValue N = getValue(I.getOperand(0));
2316 EVT DestVT = TLI.getValueType(I.getType());
2317 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2320 void SelectionDAGLowering::visitFPToSI(User &I) {
2321 // FPToSI is never a no-op cast, no need to check
2322 SDValue N = getValue(I.getOperand(0));
2323 EVT DestVT = TLI.getValueType(I.getType());
2324 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2327 void SelectionDAGLowering::visitUIToFP(User &I) {
2328 // UIToFP is never a no-op cast, no need to check
2329 SDValue N = getValue(I.getOperand(0));
2330 EVT DestVT = TLI.getValueType(I.getType());
2331 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2334 void SelectionDAGLowering::visitSIToFP(User &I){
2335 // SIToFP is never a no-op cast, no need to check
2336 SDValue N = getValue(I.getOperand(0));
2337 EVT DestVT = TLI.getValueType(I.getType());
2338 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2341 void SelectionDAGLowering::visitPtrToInt(User &I) {
2342 // What to do depends on the size of the integer and the size of the pointer.
2343 // We can either truncate, zero extend, or no-op, accordingly.
2344 SDValue N = getValue(I.getOperand(0));
2345 EVT SrcVT = N.getValueType();
2346 EVT DestVT = TLI.getValueType(I.getType());
2347 SDValue Result = DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT);
2348 setValue(&I, Result);
2351 void SelectionDAGLowering::visitIntToPtr(User &I) {
2352 // What to do depends on the size of the integer and the size of the pointer.
2353 // We can either truncate, zero extend, or no-op, accordingly.
2354 SDValue N = getValue(I.getOperand(0));
2355 EVT SrcVT = N.getValueType();
2356 EVT DestVT = TLI.getValueType(I.getType());
2357 setValue(&I, DAG.getZExtOrTrunc(N, getCurDebugLoc(), DestVT));
2360 void SelectionDAGLowering::visitBitCast(User &I) {
2361 SDValue N = getValue(I.getOperand(0));
2362 EVT DestVT = TLI.getValueType(I.getType());
2364 // BitCast assures us that source and destination are the same size so this
2365 // is either a BIT_CONVERT or a no-op.
2366 if (DestVT != N.getValueType())
2367 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2368 DestVT, N)); // convert types
2370 setValue(&I, N); // noop cast.
2373 void SelectionDAGLowering::visitInsertElement(User &I) {
2374 SDValue InVec = getValue(I.getOperand(0));
2375 SDValue InVal = getValue(I.getOperand(1));
2376 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2378 getValue(I.getOperand(2)));
2380 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2381 TLI.getValueType(I.getType()),
2382 InVec, InVal, InIdx));
2385 void SelectionDAGLowering::visitExtractElement(User &I) {
2386 SDValue InVec = getValue(I.getOperand(0));
2387 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2389 getValue(I.getOperand(1)));
2390 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2391 TLI.getValueType(I.getType()), InVec, InIdx));
2395 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2396 // from SIndx and increasing to the element length (undefs are allowed).
2397 static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2398 unsigned MaskNumElts = Mask.size();
2399 for (unsigned i = 0; i != MaskNumElts; ++i)
2400 if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2405 void SelectionDAGLowering::visitShuffleVector(User &I) {
2406 SmallVector<int, 8> Mask;
2407 SDValue Src1 = getValue(I.getOperand(0));
2408 SDValue Src2 = getValue(I.getOperand(1));
2410 // Convert the ConstantVector mask operand into an array of ints, with -1
2411 // representing undef values.
2412 SmallVector<Constant*, 8> MaskElts;
2413 cast<Constant>(I.getOperand(2))->getVectorElements(*DAG.getContext(),
2415 unsigned MaskNumElts = MaskElts.size();
2416 for (unsigned i = 0; i != MaskNumElts; ++i) {
2417 if (isa<UndefValue>(MaskElts[i]))
2420 Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2423 EVT VT = TLI.getValueType(I.getType());
2424 EVT SrcVT = Src1.getValueType();
2425 unsigned SrcNumElts = SrcVT.getVectorNumElements();
2427 if (SrcNumElts == MaskNumElts) {
2428 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2433 // Normalize the shuffle vector since mask and vector length don't match.
2434 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2435 // Mask is longer than the source vectors and is a multiple of the source
2436 // vectors. We can use concatenate vector to make the mask and vectors
2438 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2439 // The shuffle is concatenating two vectors together.
2440 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2445 // Pad both vectors with undefs to make them the same length as the mask.
2446 unsigned NumConcat = MaskNumElts / SrcNumElts;
2447 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2448 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2449 SDValue UndefVal = DAG.getUNDEF(SrcVT);
2451 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2452 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2456 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2457 getCurDebugLoc(), VT,
2458 &MOps1[0], NumConcat);
2459 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2460 getCurDebugLoc(), VT,
2461 &MOps2[0], NumConcat);
2463 // Readjust mask for new input vector length.
2464 SmallVector<int, 8> MappedOps;
2465 for (unsigned i = 0; i != MaskNumElts; ++i) {
2467 if (Idx < (int)SrcNumElts)
2468 MappedOps.push_back(Idx);
2470 MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2472 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2477 if (SrcNumElts > MaskNumElts) {
2478 // Analyze the access pattern of the vector to see if we can extract
2479 // two subvectors and do the shuffle. The analysis is done by calculating
2480 // the range of elements the mask access on both vectors.
2481 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2482 int MaxRange[2] = {-1, -1};
2484 for (unsigned i = 0; i != MaskNumElts; ++i) {
2490 if (Idx >= (int)SrcNumElts) {
2494 if (Idx > MaxRange[Input])
2495 MaxRange[Input] = Idx;
2496 if (Idx < MinRange[Input])
2497 MinRange[Input] = Idx;
2500 // Check if the access is smaller than the vector size and can we find
2501 // a reasonable extract index.
2502 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2503 int StartIdx[2]; // StartIdx to extract from
2504 for (int Input=0; Input < 2; ++Input) {
2505 if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2506 RangeUse[Input] = 0; // Unused
2507 StartIdx[Input] = 0;
2508 } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2509 // Fits within range but we should see if we can find a good
2510 // start index that is a multiple of the mask length.
2511 if (MaxRange[Input] < (int)MaskNumElts) {
2512 RangeUse[Input] = 1; // Extract from beginning of the vector
2513 StartIdx[Input] = 0;
2515 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2516 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2517 StartIdx[Input] + MaskNumElts < SrcNumElts)
2518 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2523 if (RangeUse[0] == 0 && RangeUse[1] == 0) {
2524 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2527 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2528 // Extract appropriate subvector and generate a vector shuffle
2529 for (int Input=0; Input < 2; ++Input) {
2530 SDValue& Src = Input == 0 ? Src1 : Src2;
2531 if (RangeUse[Input] == 0) {
2532 Src = DAG.getUNDEF(VT);
2534 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2535 Src, DAG.getIntPtrConstant(StartIdx[Input]));
2538 // Calculate new mask.
2539 SmallVector<int, 8> MappedOps;
2540 for (unsigned i = 0; i != MaskNumElts; ++i) {
2543 MappedOps.push_back(Idx);
2544 else if (Idx < (int)SrcNumElts)
2545 MappedOps.push_back(Idx - StartIdx[0]);
2547 MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2549 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2555 // We can't use either concat vectors or extract subvectors so fall back to
2556 // replacing the shuffle with extract and build vector.
2557 // to insert and build vector.
2558 EVT EltVT = VT.getVectorElementType();
2559 EVT PtrVT = TLI.getPointerTy();
2560 SmallVector<SDValue,8> Ops;
2561 for (unsigned i = 0; i != MaskNumElts; ++i) {
2563 Ops.push_back(DAG.getUNDEF(EltVT));
2566 if (Idx < (int)SrcNumElts)
2567 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2568 EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2570 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2572 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2575 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2576 VT, &Ops[0], Ops.size()));
2579 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2580 const Value *Op0 = I.getOperand(0);
2581 const Value *Op1 = I.getOperand(1);
2582 const Type *AggTy = I.getType();
2583 const Type *ValTy = Op1->getType();
2584 bool IntoUndef = isa<UndefValue>(Op0);
2585 bool FromUndef = isa<UndefValue>(Op1);
2587 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2588 I.idx_begin(), I.idx_end());
2590 SmallVector<EVT, 4> AggValueVTs;
2591 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2592 SmallVector<EVT, 4> ValValueVTs;
2593 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2595 unsigned NumAggValues = AggValueVTs.size();
2596 unsigned NumValValues = ValValueVTs.size();
2597 SmallVector<SDValue, 4> Values(NumAggValues);
2599 SDValue Agg = getValue(Op0);
2600 SDValue Val = getValue(Op1);
2602 // Copy the beginning value(s) from the original aggregate.
2603 for (; i != LinearIndex; ++i)
2604 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2605 SDValue(Agg.getNode(), Agg.getResNo() + i);
2606 // Copy values from the inserted value(s).
2607 for (; i != LinearIndex + NumValValues; ++i)
2608 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2609 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2610 // Copy remaining value(s) from the original aggregate.
2611 for (; i != NumAggValues; ++i)
2612 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2613 SDValue(Agg.getNode(), Agg.getResNo() + i);
2615 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2616 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2617 &Values[0], NumAggValues));
2620 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2621 const Value *Op0 = I.getOperand(0);
2622 const Type *AggTy = Op0->getType();
2623 const Type *ValTy = I.getType();
2624 bool OutOfUndef = isa<UndefValue>(Op0);
2626 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2627 I.idx_begin(), I.idx_end());
2629 SmallVector<EVT, 4> ValValueVTs;
2630 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2632 unsigned NumValValues = ValValueVTs.size();
2633 SmallVector<SDValue, 4> Values(NumValValues);
2635 SDValue Agg = getValue(Op0);
2636 // Copy out the selected value(s).
2637 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2638 Values[i - LinearIndex] =
2640 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2641 SDValue(Agg.getNode(), Agg.getResNo() + i);
2643 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2644 DAG.getVTList(&ValValueVTs[0], NumValValues),
2645 &Values[0], NumValValues));
2649 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2650 SDValue N = getValue(I.getOperand(0));
2651 const Type *Ty = I.getOperand(0)->getType();
2653 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2656 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2657 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2660 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2661 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2662 DAG.getIntPtrConstant(Offset));
2664 Ty = StTy->getElementType(Field);
2666 Ty = cast<SequentialType>(Ty)->getElementType();
2668 // If this is a constant subscript, handle it quickly.
2669 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2670 if (CI->getZExtValue() == 0) continue;
2672 TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2674 EVT PTy = TLI.getPointerTy();
2675 unsigned PtrBits = PTy.getSizeInBits();
2677 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2679 DAG.getConstant(Offs, MVT::i64));
2681 OffsVal = DAG.getIntPtrConstant(Offs);
2682 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2687 // N = N + Idx * ElementSize;
2688 APInt ElementSize = APInt(TLI.getPointerTy().getSizeInBits(),
2689 TD->getTypeAllocSize(Ty));
2690 SDValue IdxN = getValue(Idx);
2692 // If the index is smaller or larger than intptr_t, truncate or extend
2694 IdxN = DAG.getSExtOrTrunc(IdxN, getCurDebugLoc(), N.getValueType());
2696 // If this is a multiply by a power of two, turn it into a shl
2697 // immediately. This is a very common case.
2698 if (ElementSize != 1) {
2699 if (ElementSize.isPowerOf2()) {
2700 unsigned Amt = ElementSize.logBase2();
2701 IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2702 N.getValueType(), IdxN,
2703 DAG.getConstant(Amt, TLI.getPointerTy()));
2705 SDValue Scale = DAG.getConstant(ElementSize, TLI.getPointerTy());
2706 IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2707 N.getValueType(), IdxN, Scale);
2711 N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2712 N.getValueType(), N, IdxN);
2718 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2719 // If this is a fixed sized alloca in the entry block of the function,
2720 // allocate it statically on the stack.
2721 if (FuncInfo.StaticAllocaMap.count(&I))
2722 return; // getValue will auto-populate this.
2724 const Type *Ty = I.getAllocatedType();
2725 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2727 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2730 SDValue AllocSize = getValue(I.getArraySize());
2732 AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2734 DAG.getConstant(TySize, AllocSize.getValueType()));
2738 EVT IntPtr = TLI.getPointerTy();
2739 AllocSize = DAG.getZExtOrTrunc(AllocSize, getCurDebugLoc(), IntPtr);
2741 // Handle alignment. If the requested alignment is less than or equal to
2742 // the stack alignment, ignore it. If the size is greater than or equal to
2743 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2744 unsigned StackAlign =
2745 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2746 if (Align <= StackAlign)
2749 // Round the size of the allocation up to the stack alignment size
2750 // by add SA-1 to the size.
2751 AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2752 AllocSize.getValueType(), AllocSize,
2753 DAG.getIntPtrConstant(StackAlign-1));
2754 // Mask out the low bits for alignment purposes.
2755 AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2756 AllocSize.getValueType(), AllocSize,
2757 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2759 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2760 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2761 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2764 DAG.setRoot(DSA.getValue(1));
2766 // Inform the Frame Information that we have just allocated a variable-sized
2768 FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject();
2771 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2772 const Value *SV = I.getOperand(0);
2773 SDValue Ptr = getValue(SV);
2775 const Type *Ty = I.getType();
2776 bool isVolatile = I.isVolatile();
2777 unsigned Alignment = I.getAlignment();
2779 SmallVector<EVT, 4> ValueVTs;
2780 SmallVector<uint64_t, 4> Offsets;
2781 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2782 unsigned NumValues = ValueVTs.size();
2787 bool ConstantMemory = false;
2789 // Serialize volatile loads with other side effects.
2791 else if (AA->pointsToConstantMemory(SV)) {
2792 // Do not serialize (non-volatile) loads of constant memory with anything.
2793 Root = DAG.getEntryNode();
2794 ConstantMemory = true;
2796 // Do not serialize non-volatile loads against each other.
2797 Root = DAG.getRoot();
2800 SmallVector<SDValue, 4> Values(NumValues);
2801 SmallVector<SDValue, 4> Chains(NumValues);
2802 EVT PtrVT = Ptr.getValueType();
2803 for (unsigned i = 0; i != NumValues; ++i) {
2804 SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2805 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2807 DAG.getConstant(Offsets[i], PtrVT)),
2808 SV, Offsets[i], isVolatile, Alignment);
2810 Chains[i] = L.getValue(1);
2813 if (!ConstantMemory) {
2814 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2816 &Chains[0], NumValues);
2820 PendingLoads.push_back(Chain);
2823 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2824 DAG.getVTList(&ValueVTs[0], NumValues),
2825 &Values[0], NumValues));
2829 void SelectionDAGLowering::visitStore(StoreInst &I) {
2830 Value *SrcV = I.getOperand(0);
2831 Value *PtrV = I.getOperand(1);
2833 SmallVector<EVT, 4> ValueVTs;
2834 SmallVector<uint64_t, 4> Offsets;
2835 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2836 unsigned NumValues = ValueVTs.size();
2840 // Get the lowered operands. Note that we do this after
2841 // checking if NumResults is zero, because with zero results
2842 // the operands won't have values in the map.
2843 SDValue Src = getValue(SrcV);
2844 SDValue Ptr = getValue(PtrV);
2846 SDValue Root = getRoot();
2847 SmallVector<SDValue, 4> Chains(NumValues);
2848 EVT PtrVT = Ptr.getValueType();
2849 bool isVolatile = I.isVolatile();
2850 unsigned Alignment = I.getAlignment();
2851 for (unsigned i = 0; i != NumValues; ++i)
2852 Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2853 SDValue(Src.getNode(), Src.getResNo() + i),
2854 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2856 DAG.getConstant(Offsets[i], PtrVT)),
2857 PtrV, Offsets[i], isVolatile, Alignment);
2859 DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2860 MVT::Other, &Chains[0], NumValues));
2863 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2865 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2866 unsigned Intrinsic) {
2867 bool HasChain = !I.doesNotAccessMemory();
2868 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2870 // Build the operand list.
2871 SmallVector<SDValue, 8> Ops;
2872 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2874 // We don't need to serialize loads against other loads.
2875 Ops.push_back(DAG.getRoot());
2877 Ops.push_back(getRoot());
2881 // Info is set by getTgtMemInstrinsic
2882 TargetLowering::IntrinsicInfo Info;
2883 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2885 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2886 if (!IsTgtIntrinsic)
2887 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2889 // Add all operands of the call to the operand list.
2890 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2891 SDValue Op = getValue(I.getOperand(i));
2892 assert(TLI.isTypeLegal(Op.getValueType()) &&
2893 "Intrinsic uses a non-legal type?");
2897 SmallVector<EVT, 4> ValueVTs;
2898 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2900 for (unsigned Val = 0, E = ValueVTs.size(); Val != E; ++Val) {
2901 assert(TLI.isTypeLegal(ValueVTs[Val]) &&
2902 "Intrinsic uses a non-legal type?");
2906 ValueVTs.push_back(MVT::Other);
2908 SDVTList VTs = DAG.getVTList(ValueVTs.data(), ValueVTs.size());
2912 if (IsTgtIntrinsic) {
2913 // This is target intrinsic that touches memory
2914 Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2915 VTs, &Ops[0], Ops.size(),
2916 Info.memVT, Info.ptrVal, Info.offset,
2917 Info.align, Info.vol,
2918 Info.readMem, Info.writeMem);
2921 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2922 VTs, &Ops[0], Ops.size());
2923 else if (I.getType() != Type::getVoidTy(*DAG.getContext()))
2924 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2925 VTs, &Ops[0], Ops.size());
2927 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2928 VTs, &Ops[0], Ops.size());
2931 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2933 PendingLoads.push_back(Chain);
2937 if (I.getType() != Type::getVoidTy(*DAG.getContext())) {
2938 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2939 EVT VT = TLI.getValueType(PTy);
2940 Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2942 setValue(&I, Result);
2946 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
2947 static GlobalVariable *ExtractTypeInfo(Value *V) {
2948 V = V->stripPointerCasts();
2949 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2950 assert ((GV || isa<ConstantPointerNull>(V)) &&
2951 "TypeInfo must be a global variable or NULL");
2957 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
2958 /// call, and add them to the specified machine basic block.
2959 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
2960 MachineBasicBlock *MBB) {
2961 // Inform the MachineModuleInfo of the personality for this landing pad.
2962 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
2963 assert(CE->getOpcode() == Instruction::BitCast &&
2964 isa<Function>(CE->getOperand(0)) &&
2965 "Personality should be a function");
2966 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
2968 // Gather all the type infos for this landing pad and pass them along to
2969 // MachineModuleInfo.
2970 std::vector<GlobalVariable *> TyInfo;
2971 unsigned N = I.getNumOperands();
2973 for (unsigned i = N - 1; i > 2; --i) {
2974 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
2975 unsigned FilterLength = CI->getZExtValue();
2976 unsigned FirstCatch = i + FilterLength + !FilterLength;
2977 assert (FirstCatch <= N && "Invalid filter length");
2979 if (FirstCatch < N) {
2980 TyInfo.reserve(N - FirstCatch);
2981 for (unsigned j = FirstCatch; j < N; ++j)
2982 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2983 MMI->addCatchTypeInfo(MBB, TyInfo);
2987 if (!FilterLength) {
2989 MMI->addCleanup(MBB);
2992 TyInfo.reserve(FilterLength - 1);
2993 for (unsigned j = i + 1; j < FirstCatch; ++j)
2994 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2995 MMI->addFilterTypeInfo(MBB, TyInfo);
3004 TyInfo.reserve(N - 3);
3005 for (unsigned j = 3; j < N; ++j)
3006 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3007 MMI->addCatchTypeInfo(MBB, TyInfo);
3013 /// GetSignificand - Get the significand and build it into a floating-point
3014 /// number with exponent of 1:
3016 /// Op = (Op & 0x007fffff) | 0x3f800000;
3018 /// where Op is the hexidecimal representation of floating point value.
3020 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
3021 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3022 DAG.getConstant(0x007fffff, MVT::i32));
3023 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3024 DAG.getConstant(0x3f800000, MVT::i32));
3025 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3028 /// GetExponent - Get the exponent:
3030 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3032 /// where Op is the hexidecimal representation of floating point value.
3034 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3036 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3037 DAG.getConstant(0x7f800000, MVT::i32));
3038 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3039 DAG.getConstant(23, TLI.getPointerTy()));
3040 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3041 DAG.getConstant(127, MVT::i32));
3042 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3045 /// getF32Constant - Get 32-bit floating point constant.
3047 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3048 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3051 /// Inlined utility function to implement binary input atomic intrinsics for
3052 /// visitIntrinsicCall: I is a call instruction
3053 /// Op is the associated NodeType for I
3055 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3056 SDValue Root = getRoot();
3058 DAG.getAtomic(Op, getCurDebugLoc(),
3059 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3061 getValue(I.getOperand(1)),
3062 getValue(I.getOperand(2)),
3065 DAG.setRoot(L.getValue(1));
3069 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3071 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3072 SDValue Op1 = getValue(I.getOperand(1));
3073 SDValue Op2 = getValue(I.getOperand(2));
3075 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3076 SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3078 setValue(&I, Result);
3082 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
3083 /// limited-precision mode.
3085 SelectionDAGLowering::visitExp(CallInst &I) {
3087 DebugLoc dl = getCurDebugLoc();
3089 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3090 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3091 SDValue Op = getValue(I.getOperand(1));
3093 // Put the exponent in the right bit position for later addition to the
3096 // #define LOG2OFe 1.4426950f
3097 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3098 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3099 getF32Constant(DAG, 0x3fb8aa3b));
3100 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3102 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3103 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3104 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3106 // IntegerPartOfX <<= 23;
3107 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3108 DAG.getConstant(23, TLI.getPointerTy()));
3110 if (LimitFloatPrecision <= 6) {
3111 // For floating-point precision of 6:
3113 // TwoToFractionalPartOfX =
3115 // (0.735607626f + 0.252464424f * x) * x;
3117 // error 0.0144103317, which is 6 bits
3118 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3119 getF32Constant(DAG, 0x3e814304));
3120 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3121 getF32Constant(DAG, 0x3f3c50c8));
3122 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3123 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3124 getF32Constant(DAG, 0x3f7f5e7e));
3125 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3127 // Add the exponent into the result in integer domain.
3128 SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3129 TwoToFracPartOfX, IntegerPartOfX);
3131 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3132 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3133 // For floating-point precision of 12:
3135 // TwoToFractionalPartOfX =
3138 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3140 // 0.000107046256 error, which is 13 to 14 bits
3141 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3142 getF32Constant(DAG, 0x3da235e3));
3143 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3144 getF32Constant(DAG, 0x3e65b8f3));
3145 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3146 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3147 getF32Constant(DAG, 0x3f324b07));
3148 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3149 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3150 getF32Constant(DAG, 0x3f7ff8fd));
3151 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3153 // Add the exponent into the result in integer domain.
3154 SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3155 TwoToFracPartOfX, IntegerPartOfX);
3157 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3158 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3159 // For floating-point precision of 18:
3161 // TwoToFractionalPartOfX =
3165 // (0.554906021e-1f +
3166 // (0.961591928e-2f +
3167 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3169 // error 2.47208000*10^(-7), which is better than 18 bits
3170 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3171 getF32Constant(DAG, 0x3924b03e));
3172 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3173 getF32Constant(DAG, 0x3ab24b87));
3174 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3175 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3176 getF32Constant(DAG, 0x3c1d8c17));
3177 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3178 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3179 getF32Constant(DAG, 0x3d634a1d));
3180 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3181 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3182 getF32Constant(DAG, 0x3e75fe14));
3183 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3184 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3185 getF32Constant(DAG, 0x3f317234));
3186 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3187 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3188 getF32Constant(DAG, 0x3f800000));
3189 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3192 // Add the exponent into the result in integer domain.
3193 SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3194 TwoToFracPartOfX, IntegerPartOfX);
3196 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3199 // No special expansion.
3200 result = DAG.getNode(ISD::FEXP, dl,
3201 getValue(I.getOperand(1)).getValueType(),
3202 getValue(I.getOperand(1)));
3205 setValue(&I, result);
3208 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3209 /// limited-precision mode.
3211 SelectionDAGLowering::visitLog(CallInst &I) {
3213 DebugLoc dl = getCurDebugLoc();
3215 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3216 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3217 SDValue Op = getValue(I.getOperand(1));
3218 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3220 // Scale the exponent by log(2) [0.69314718f].
3221 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3222 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3223 getF32Constant(DAG, 0x3f317218));
3225 // Get the significand and build it into a floating-point number with
3227 SDValue X = GetSignificand(DAG, Op1, dl);
3229 if (LimitFloatPrecision <= 6) {
3230 // For floating-point precision of 6:
3234 // (1.4034025f - 0.23903021f * x) * x;
3236 // error 0.0034276066, which is better than 8 bits
3237 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3238 getF32Constant(DAG, 0xbe74c456));
3239 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3240 getF32Constant(DAG, 0x3fb3a2b1));
3241 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3242 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3243 getF32Constant(DAG, 0x3f949a29));
3245 result = DAG.getNode(ISD::FADD, dl,
3246 MVT::f32, LogOfExponent, LogOfMantissa);
3247 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3248 // For floating-point precision of 12:
3254 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3256 // error 0.000061011436, which is 14 bits
3257 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3258 getF32Constant(DAG, 0xbd67b6d6));
3259 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3260 getF32Constant(DAG, 0x3ee4f4b8));
3261 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3262 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3263 getF32Constant(DAG, 0x3fbc278b));
3264 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3265 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3266 getF32Constant(DAG, 0x40348e95));
3267 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3268 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3269 getF32Constant(DAG, 0x3fdef31a));
3271 result = DAG.getNode(ISD::FADD, dl,
3272 MVT::f32, LogOfExponent, LogOfMantissa);
3273 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3274 // For floating-point precision of 18:
3282 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3284 // error 0.0000023660568, which is better than 18 bits
3285 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3286 getF32Constant(DAG, 0xbc91e5ac));
3287 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3288 getF32Constant(DAG, 0x3e4350aa));
3289 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3290 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3291 getF32Constant(DAG, 0x3f60d3e3));
3292 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3293 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3294 getF32Constant(DAG, 0x4011cdf0));
3295 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3296 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3297 getF32Constant(DAG, 0x406cfd1c));
3298 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3299 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3300 getF32Constant(DAG, 0x408797cb));
3301 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3302 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3303 getF32Constant(DAG, 0x4006dcab));
3305 result = DAG.getNode(ISD::FADD, dl,
3306 MVT::f32, LogOfExponent, LogOfMantissa);
3309 // No special expansion.
3310 result = DAG.getNode(ISD::FLOG, dl,
3311 getValue(I.getOperand(1)).getValueType(),
3312 getValue(I.getOperand(1)));
3315 setValue(&I, result);
3318 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3319 /// limited-precision mode.
3321 SelectionDAGLowering::visitLog2(CallInst &I) {
3323 DebugLoc dl = getCurDebugLoc();
3325 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3326 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3327 SDValue Op = getValue(I.getOperand(1));
3328 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3330 // Get the exponent.
3331 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3333 // Get the significand and build it into a floating-point number with
3335 SDValue X = GetSignificand(DAG, Op1, dl);
3337 // Different possible minimax approximations of significand in
3338 // floating-point for various degrees of accuracy over [1,2].
3339 if (LimitFloatPrecision <= 6) {
3340 // For floating-point precision of 6:
3342 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3344 // error 0.0049451742, which is more than 7 bits
3345 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3346 getF32Constant(DAG, 0xbeb08fe0));
3347 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3348 getF32Constant(DAG, 0x40019463));
3349 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3350 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3351 getF32Constant(DAG, 0x3fd6633d));
3353 result = DAG.getNode(ISD::FADD, dl,
3354 MVT::f32, LogOfExponent, Log2ofMantissa);
3355 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3356 // For floating-point precision of 12:
3362 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3364 // error 0.0000876136000, which is better than 13 bits
3365 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3366 getF32Constant(DAG, 0xbda7262e));
3367 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3368 getF32Constant(DAG, 0x3f25280b));
3369 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3370 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3371 getF32Constant(DAG, 0x4007b923));
3372 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3373 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3374 getF32Constant(DAG, 0x40823e2f));
3375 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3376 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3377 getF32Constant(DAG, 0x4020d29c));
3379 result = DAG.getNode(ISD::FADD, dl,
3380 MVT::f32, LogOfExponent, Log2ofMantissa);
3381 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3382 // For floating-point precision of 18:
3391 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3393 // error 0.0000018516, which is better than 18 bits
3394 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3395 getF32Constant(DAG, 0xbcd2769e));
3396 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3397 getF32Constant(DAG, 0x3e8ce0b9));
3398 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3399 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3400 getF32Constant(DAG, 0x3fa22ae7));
3401 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3402 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3403 getF32Constant(DAG, 0x40525723));
3404 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3405 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3406 getF32Constant(DAG, 0x40aaf200));
3407 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3408 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3409 getF32Constant(DAG, 0x40c39dad));
3410 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3411 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3412 getF32Constant(DAG, 0x4042902c));
3414 result = DAG.getNode(ISD::FADD, dl,
3415 MVT::f32, LogOfExponent, Log2ofMantissa);
3418 // No special expansion.
3419 result = DAG.getNode(ISD::FLOG2, dl,
3420 getValue(I.getOperand(1)).getValueType(),
3421 getValue(I.getOperand(1)));
3424 setValue(&I, result);
3427 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3428 /// limited-precision mode.
3430 SelectionDAGLowering::visitLog10(CallInst &I) {
3432 DebugLoc dl = getCurDebugLoc();
3434 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3435 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3436 SDValue Op = getValue(I.getOperand(1));
3437 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3439 // Scale the exponent by log10(2) [0.30102999f].
3440 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3441 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3442 getF32Constant(DAG, 0x3e9a209a));
3444 // Get the significand and build it into a floating-point number with
3446 SDValue X = GetSignificand(DAG, Op1, dl);
3448 if (LimitFloatPrecision <= 6) {
3449 // For floating-point precision of 6:
3451 // Log10ofMantissa =
3453 // (0.60948995f - 0.10380950f * x) * x;
3455 // error 0.0014886165, which is 6 bits
3456 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3457 getF32Constant(DAG, 0xbdd49a13));
3458 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3459 getF32Constant(DAG, 0x3f1c0789));
3460 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3461 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3462 getF32Constant(DAG, 0x3f011300));
3464 result = DAG.getNode(ISD::FADD, dl,
3465 MVT::f32, LogOfExponent, Log10ofMantissa);
3466 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3467 // For floating-point precision of 12:
3469 // Log10ofMantissa =
3472 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3474 // error 0.00019228036, which is better than 12 bits
3475 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3476 getF32Constant(DAG, 0x3d431f31));
3477 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3478 getF32Constant(DAG, 0x3ea21fb2));
3479 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3480 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3481 getF32Constant(DAG, 0x3f6ae232));
3482 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3483 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3484 getF32Constant(DAG, 0x3f25f7c3));
3486 result = DAG.getNode(ISD::FADD, dl,
3487 MVT::f32, LogOfExponent, Log10ofMantissa);
3488 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3489 // For floating-point precision of 18:
3491 // Log10ofMantissa =
3496 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3498 // error 0.0000037995730, which is better than 18 bits
3499 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3500 getF32Constant(DAG, 0x3c5d51ce));
3501 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3502 getF32Constant(DAG, 0x3e00685a));
3503 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3504 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3505 getF32Constant(DAG, 0x3efb6798));
3506 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3507 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3508 getF32Constant(DAG, 0x3f88d192));
3509 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3510 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3511 getF32Constant(DAG, 0x3fc4316c));
3512 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3513 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3514 getF32Constant(DAG, 0x3f57ce70));
3516 result = DAG.getNode(ISD::FADD, dl,
3517 MVT::f32, LogOfExponent, Log10ofMantissa);
3520 // No special expansion.
3521 result = DAG.getNode(ISD::FLOG10, dl,
3522 getValue(I.getOperand(1)).getValueType(),
3523 getValue(I.getOperand(1)));
3526 setValue(&I, result);
3529 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3530 /// limited-precision mode.
3532 SelectionDAGLowering::visitExp2(CallInst &I) {
3534 DebugLoc dl = getCurDebugLoc();
3536 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3537 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3538 SDValue Op = getValue(I.getOperand(1));
3540 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3542 // FractionalPartOfX = x - (float)IntegerPartOfX;
3543 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3544 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3546 // IntegerPartOfX <<= 23;
3547 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3548 DAG.getConstant(23, TLI.getPointerTy()));
3550 if (LimitFloatPrecision <= 6) {
3551 // For floating-point precision of 6:
3553 // TwoToFractionalPartOfX =
3555 // (0.735607626f + 0.252464424f * x) * x;
3557 // error 0.0144103317, which is 6 bits
3558 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3559 getF32Constant(DAG, 0x3e814304));
3560 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3561 getF32Constant(DAG, 0x3f3c50c8));
3562 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3563 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3564 getF32Constant(DAG, 0x3f7f5e7e));
3565 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3566 SDValue TwoToFractionalPartOfX =
3567 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3569 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3570 MVT::f32, TwoToFractionalPartOfX);
3571 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3572 // For floating-point precision of 12:
3574 // TwoToFractionalPartOfX =
3577 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3579 // error 0.000107046256, which is 13 to 14 bits
3580 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3581 getF32Constant(DAG, 0x3da235e3));
3582 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3583 getF32Constant(DAG, 0x3e65b8f3));
3584 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3585 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3586 getF32Constant(DAG, 0x3f324b07));
3587 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3588 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3589 getF32Constant(DAG, 0x3f7ff8fd));
3590 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3591 SDValue TwoToFractionalPartOfX =
3592 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3594 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3595 MVT::f32, TwoToFractionalPartOfX);
3596 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3597 // For floating-point precision of 18:
3599 // TwoToFractionalPartOfX =
3603 // (0.554906021e-1f +
3604 // (0.961591928e-2f +
3605 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3606 // error 2.47208000*10^(-7), which is better than 18 bits
3607 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3608 getF32Constant(DAG, 0x3924b03e));
3609 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3610 getF32Constant(DAG, 0x3ab24b87));
3611 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3612 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3613 getF32Constant(DAG, 0x3c1d8c17));
3614 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3615 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3616 getF32Constant(DAG, 0x3d634a1d));
3617 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3618 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3619 getF32Constant(DAG, 0x3e75fe14));
3620 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3621 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3622 getF32Constant(DAG, 0x3f317234));
3623 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3624 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3625 getF32Constant(DAG, 0x3f800000));
3626 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3627 SDValue TwoToFractionalPartOfX =
3628 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3630 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3631 MVT::f32, TwoToFractionalPartOfX);
3634 // No special expansion.
3635 result = DAG.getNode(ISD::FEXP2, dl,
3636 getValue(I.getOperand(1)).getValueType(),
3637 getValue(I.getOperand(1)));
3640 setValue(&I, result);
3643 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3644 /// limited-precision mode with x == 10.0f.
3646 SelectionDAGLowering::visitPow(CallInst &I) {
3648 Value *Val = I.getOperand(1);
3649 DebugLoc dl = getCurDebugLoc();
3650 bool IsExp10 = false;
3652 if (getValue(Val).getValueType() == MVT::f32 &&
3653 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3654 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3655 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3656 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3658 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3663 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3664 SDValue Op = getValue(I.getOperand(2));
3666 // Put the exponent in the right bit position for later addition to the
3669 // #define LOG2OF10 3.3219281f
3670 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3671 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3672 getF32Constant(DAG, 0x40549a78));
3673 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3675 // FractionalPartOfX = x - (float)IntegerPartOfX;
3676 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3677 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3679 // IntegerPartOfX <<= 23;
3680 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3681 DAG.getConstant(23, TLI.getPointerTy()));
3683 if (LimitFloatPrecision <= 6) {
3684 // For floating-point precision of 6:
3686 // twoToFractionalPartOfX =
3688 // (0.735607626f + 0.252464424f * x) * x;
3690 // error 0.0144103317, which is 6 bits
3691 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3692 getF32Constant(DAG, 0x3e814304));
3693 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3694 getF32Constant(DAG, 0x3f3c50c8));
3695 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3696 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3697 getF32Constant(DAG, 0x3f7f5e7e));
3698 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3699 SDValue TwoToFractionalPartOfX =
3700 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3702 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3703 MVT::f32, TwoToFractionalPartOfX);
3704 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3705 // For floating-point precision of 12:
3707 // TwoToFractionalPartOfX =
3710 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3712 // error 0.000107046256, which is 13 to 14 bits
3713 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3714 getF32Constant(DAG, 0x3da235e3));
3715 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3716 getF32Constant(DAG, 0x3e65b8f3));
3717 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3718 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3719 getF32Constant(DAG, 0x3f324b07));
3720 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3721 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3722 getF32Constant(DAG, 0x3f7ff8fd));
3723 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3724 SDValue TwoToFractionalPartOfX =
3725 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3727 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3728 MVT::f32, TwoToFractionalPartOfX);
3729 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3730 // For floating-point precision of 18:
3732 // TwoToFractionalPartOfX =
3736 // (0.554906021e-1f +
3737 // (0.961591928e-2f +
3738 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3739 // error 2.47208000*10^(-7), which is better than 18 bits
3740 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3741 getF32Constant(DAG, 0x3924b03e));
3742 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3743 getF32Constant(DAG, 0x3ab24b87));
3744 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3745 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3746 getF32Constant(DAG, 0x3c1d8c17));
3747 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3748 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3749 getF32Constant(DAG, 0x3d634a1d));
3750 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3751 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3752 getF32Constant(DAG, 0x3e75fe14));
3753 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3754 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3755 getF32Constant(DAG, 0x3f317234));
3756 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3757 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3758 getF32Constant(DAG, 0x3f800000));
3759 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3760 SDValue TwoToFractionalPartOfX =
3761 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3763 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3764 MVT::f32, TwoToFractionalPartOfX);
3767 // No special expansion.
3768 result = DAG.getNode(ISD::FPOW, dl,
3769 getValue(I.getOperand(1)).getValueType(),
3770 getValue(I.getOperand(1)),
3771 getValue(I.getOperand(2)));
3774 setValue(&I, result);
3777 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3778 /// we want to emit this as a call to a named external function, return the name
3779 /// otherwise lower it and return null.
3781 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3782 DebugLoc dl = getCurDebugLoc();
3783 switch (Intrinsic) {
3785 // By default, turn this into a target intrinsic node.
3786 visitTargetIntrinsic(I, Intrinsic);
3788 case Intrinsic::vastart: visitVAStart(I); return 0;
3789 case Intrinsic::vaend: visitVAEnd(I); return 0;
3790 case Intrinsic::vacopy: visitVACopy(I); return 0;
3791 case Intrinsic::returnaddress:
3792 setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3793 getValue(I.getOperand(1))));
3795 case Intrinsic::frameaddress:
3796 setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3797 getValue(I.getOperand(1))));
3799 case Intrinsic::setjmp:
3800 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3802 case Intrinsic::longjmp:
3803 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3805 case Intrinsic::memcpy: {
3806 SDValue Op1 = getValue(I.getOperand(1));
3807 SDValue Op2 = getValue(I.getOperand(2));
3808 SDValue Op3 = getValue(I.getOperand(3));
3809 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3810 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3811 I.getOperand(1), 0, I.getOperand(2), 0));
3814 case Intrinsic::memset: {
3815 SDValue Op1 = getValue(I.getOperand(1));
3816 SDValue Op2 = getValue(I.getOperand(2));
3817 SDValue Op3 = getValue(I.getOperand(3));
3818 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3819 DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3820 I.getOperand(1), 0));
3823 case Intrinsic::memmove: {
3824 SDValue Op1 = getValue(I.getOperand(1));
3825 SDValue Op2 = getValue(I.getOperand(2));
3826 SDValue Op3 = getValue(I.getOperand(3));
3827 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3829 // If the source and destination are known to not be aliases, we can
3830 // lower memmove as memcpy.
3831 uint64_t Size = -1ULL;
3832 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3833 Size = C->getZExtValue();
3834 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3835 AliasAnalysis::NoAlias) {
3836 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3837 I.getOperand(1), 0, I.getOperand(2), 0));
3841 DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3842 I.getOperand(1), 0, I.getOperand(2), 0));
3845 case Intrinsic::dbg_stoppoint: {
3846 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3847 if (isValidDebugInfoIntrinsic(SPI, CodeGenOpt::Default)) {
3848 MachineFunction &MF = DAG.getMachineFunction();
3849 DebugLoc Loc = ExtractDebugLocation(SPI, MF.getDebugLocInfo());
3850 setCurDebugLoc(Loc);
3852 if (OptLevel == CodeGenOpt::None)
3853 DAG.setRoot(DAG.getDbgStopPoint(Loc, getRoot(),
3860 case Intrinsic::dbg_region_start: {
3861 DwarfWriter *DW = DAG.getDwarfWriter();
3862 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3863 if (isValidDebugInfoIntrinsic(RSI, OptLevel) && DW
3864 && DW->ShouldEmitDwarfDebug()) {
3866 DW->RecordRegionStart(RSI.getContext());
3867 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3868 getRoot(), LabelID));
3872 case Intrinsic::dbg_region_end: {
3873 DwarfWriter *DW = DAG.getDwarfWriter();
3874 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3876 if (!isValidDebugInfoIntrinsic(REI, OptLevel) || !DW
3877 || !DW->ShouldEmitDwarfDebug())
3880 MachineFunction &MF = DAG.getMachineFunction();
3881 DISubprogram Subprogram(REI.getContext());
3883 if (isInlinedFnEnd(REI, MF.getFunction())) {
3884 // This is end of inlined function. Debugging information for inlined
3885 // function is not handled yet (only supported by FastISel).
3886 if (OptLevel == CodeGenOpt::None) {
3887 unsigned ID = DW->RecordInlinedFnEnd(Subprogram);
3889 // Returned ID is 0 if this is unbalanced "end of inlined
3890 // scope". This could happen if optimizer eats dbg intrinsics or
3891 // "beginning of inlined scope" is not recoginized due to missing
3892 // location info. In such cases, do ignore this region.end.
3893 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3900 DW->RecordRegionEnd(REI.getContext());
3901 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3902 getRoot(), LabelID));
3905 case Intrinsic::dbg_func_start: {
3906 DwarfWriter *DW = DAG.getDwarfWriter();
3907 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3908 if (!isValidDebugInfoIntrinsic(FSI, CodeGenOpt::None))
3911 MachineFunction &MF = DAG.getMachineFunction();
3912 // This is a beginning of an inlined function.
3913 if (isInlinedFnStart(FSI, MF.getFunction())) {
3914 if (OptLevel != CodeGenOpt::None)
3915 // FIXME: Debugging informaation for inlined function is only
3916 // supported at CodeGenOpt::Node.
3919 DebugLoc PrevLoc = CurDebugLoc;
3920 // If llvm.dbg.func.start is seen in a new block before any
3921 // llvm.dbg.stoppoint intrinsic then the location info is unknown.
3922 // FIXME : Why DebugLoc is reset at the beginning of each block ?
3923 if (PrevLoc.isUnknown())
3926 // Record the source line.
3927 setCurDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3929 if (!DW || !DW->ShouldEmitDwarfDebug())
3931 DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
3932 DISubprogram SP(FSI.getSubprogram());
3933 DICompileUnit CU(PrevLocTpl.Scope);
3934 unsigned LabelID = DW->RecordInlinedFnStart(SP, CU,
3937 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3938 getRoot(), LabelID));
3942 // This is a beginning of a new function.
3943 MF.setDefaultDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3945 if (!DW || !DW->ShouldEmitDwarfDebug())
3947 // llvm.dbg.func_start also defines beginning of function scope.
3948 DW->RecordRegionStart(FSI.getSubprogram());
3951 case Intrinsic::dbg_declare: {
3952 if (OptLevel != CodeGenOpt::None)
3953 // FIXME: Variable debug info is not supported here.
3955 DwarfWriter *DW = DAG.getDwarfWriter();
3958 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3959 if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
3962 MDNode *Variable = DI.getVariable();
3963 Value *Address = DI.getAddress();
3964 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
3965 Address = BCI->getOperand(0);
3966 AllocaInst *AI = dyn_cast<AllocaInst>(Address);
3967 // Don't handle byval struct arguments or VLAs, for example.
3970 DenseMap<const AllocaInst*, int>::iterator SI =
3971 FuncInfo.StaticAllocaMap.find(AI);
3972 if (SI == FuncInfo.StaticAllocaMap.end())
3974 int FI = SI->second;
3975 #ifdef ATTACH_DEBUG_INFO_TO_AN_INSN
3976 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3978 MMI->setVariableDbgInfo(Variable, FI);
3980 DW->RecordVariable(Variable, FI);
3984 case Intrinsic::eh_exception: {
3985 // Insert the EXCEPTIONADDR instruction.
3986 assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
3987 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3989 Ops[0] = DAG.getRoot();
3990 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3992 DAG.setRoot(Op.getValue(1));
3996 case Intrinsic::eh_selector: {
3997 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3999 if (CurMBB->isLandingPad())
4000 AddCatchInfo(I, MMI, CurMBB);
4003 FuncInfo.CatchInfoLost.insert(&I);
4005 // FIXME: Mark exception selector register as live in. Hack for PR1508.
4006 unsigned Reg = TLI.getExceptionSelectorRegister();
4007 if (Reg) CurMBB->addLiveIn(Reg);
4010 // Insert the EHSELECTION instruction.
4011 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
4013 Ops[0] = getValue(I.getOperand(1));
4015 SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4017 DAG.setRoot(Op.getValue(1));
4019 setValue(&I, DAG.getSExtOrTrunc(Op, dl, MVT::i32));
4023 case Intrinsic::eh_typeid_for: {
4024 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4027 // Find the type id for the given typeinfo.
4028 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4030 unsigned TypeID = MMI->getTypeIDFor(GV);
4031 setValue(&I, DAG.getConstant(TypeID, MVT::i32));
4033 // Return something different to eh_selector.
4034 setValue(&I, DAG.getConstant(1, MVT::i32));
4040 case Intrinsic::eh_return_i32:
4041 case Intrinsic::eh_return_i64:
4042 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4043 MMI->setCallsEHReturn(true);
4044 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
4047 getValue(I.getOperand(1)),
4048 getValue(I.getOperand(2))));
4050 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4054 case Intrinsic::eh_unwind_init:
4055 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4056 MMI->setCallsUnwindInit(true);
4061 case Intrinsic::eh_dwarf_cfa: {
4062 EVT VT = getValue(I.getOperand(1)).getValueType();
4063 SDValue CfaArg = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), dl,
4064 TLI.getPointerTy());
4066 SDValue Offset = DAG.getNode(ISD::ADD, dl,
4068 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4069 TLI.getPointerTy()),
4071 setValue(&I, DAG.getNode(ISD::ADD, dl,
4073 DAG.getNode(ISD::FRAMEADDR, dl,
4076 TLI.getPointerTy())),
4080 case Intrinsic::convertff:
4081 case Intrinsic::convertfsi:
4082 case Intrinsic::convertfui:
4083 case Intrinsic::convertsif:
4084 case Intrinsic::convertuif:
4085 case Intrinsic::convertss:
4086 case Intrinsic::convertsu:
4087 case Intrinsic::convertus:
4088 case Intrinsic::convertuu: {
4089 ISD::CvtCode Code = ISD::CVT_INVALID;
4090 switch (Intrinsic) {
4091 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
4092 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4093 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4094 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4095 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4096 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
4097 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
4098 case Intrinsic::convertus: Code = ISD::CVT_US; break;
4099 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
4101 EVT DestVT = TLI.getValueType(I.getType());
4102 Value* Op1 = I.getOperand(1);
4103 setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4104 DAG.getValueType(DestVT),
4105 DAG.getValueType(getValue(Op1).getValueType()),
4106 getValue(I.getOperand(2)),
4107 getValue(I.getOperand(3)),
4112 case Intrinsic::sqrt:
4113 setValue(&I, DAG.getNode(ISD::FSQRT, dl,
4114 getValue(I.getOperand(1)).getValueType(),
4115 getValue(I.getOperand(1))));
4117 case Intrinsic::powi:
4118 setValue(&I, DAG.getNode(ISD::FPOWI, dl,
4119 getValue(I.getOperand(1)).getValueType(),
4120 getValue(I.getOperand(1)),
4121 getValue(I.getOperand(2))));
4123 case Intrinsic::sin:
4124 setValue(&I, DAG.getNode(ISD::FSIN, dl,
4125 getValue(I.getOperand(1)).getValueType(),
4126 getValue(I.getOperand(1))));
4128 case Intrinsic::cos:
4129 setValue(&I, DAG.getNode(ISD::FCOS, dl,
4130 getValue(I.getOperand(1)).getValueType(),
4131 getValue(I.getOperand(1))));
4133 case Intrinsic::log:
4136 case Intrinsic::log2:
4139 case Intrinsic::log10:
4142 case Intrinsic::exp:
4145 case Intrinsic::exp2:
4148 case Intrinsic::pow:
4151 case Intrinsic::pcmarker: {
4152 SDValue Tmp = getValue(I.getOperand(1));
4153 DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4156 case Intrinsic::readcyclecounter: {
4157 SDValue Op = getRoot();
4158 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4159 DAG.getVTList(MVT::i64, MVT::Other),
4162 DAG.setRoot(Tmp.getValue(1));
4165 case Intrinsic::bswap:
4166 setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4167 getValue(I.getOperand(1)).getValueType(),
4168 getValue(I.getOperand(1))));
4170 case Intrinsic::cttz: {
4171 SDValue Arg = getValue(I.getOperand(1));
4172 EVT Ty = Arg.getValueType();
4173 SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4174 setValue(&I, result);
4177 case Intrinsic::ctlz: {
4178 SDValue Arg = getValue(I.getOperand(1));
4179 EVT Ty = Arg.getValueType();
4180 SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4181 setValue(&I, result);
4184 case Intrinsic::ctpop: {
4185 SDValue Arg = getValue(I.getOperand(1));
4186 EVT Ty = Arg.getValueType();
4187 SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4188 setValue(&I, result);
4191 case Intrinsic::stacksave: {
4192 SDValue Op = getRoot();
4193 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4194 DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4196 DAG.setRoot(Tmp.getValue(1));
4199 case Intrinsic::stackrestore: {
4200 SDValue Tmp = getValue(I.getOperand(1));
4201 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4204 case Intrinsic::stackprotector: {
4205 // Emit code into the DAG to store the stack guard onto the stack.
4206 MachineFunction &MF = DAG.getMachineFunction();
4207 MachineFrameInfo *MFI = MF.getFrameInfo();
4208 EVT PtrTy = TLI.getPointerTy();
4210 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4211 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4213 int FI = FuncInfo.StaticAllocaMap[Slot];
4214 MFI->setStackProtectorIndex(FI);
4216 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4218 // Store the stack protector onto the stack.
4219 SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4220 PseudoSourceValue::getFixedStack(FI),
4222 setValue(&I, Result);
4223 DAG.setRoot(Result);
4226 case Intrinsic::objectsize: {
4227 // If we don't know by now, we're never going to know.
4228 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
4230 assert(CI && "Non-constant type in __builtin_object_size?");
4232 SDValue Arg = getValue(I.getOperand(0));
4233 EVT Ty = Arg.getValueType();
4235 if (CI->getZExtValue() < 2)
4236 setValue(&I, DAG.getConstant(-1, Ty));
4238 setValue(&I, DAG.getConstant(0, Ty));
4241 case Intrinsic::var_annotation:
4242 // Discard annotate attributes
4245 case Intrinsic::init_trampoline: {
4246 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4250 Ops[1] = getValue(I.getOperand(1));
4251 Ops[2] = getValue(I.getOperand(2));
4252 Ops[3] = getValue(I.getOperand(3));
4253 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4254 Ops[5] = DAG.getSrcValue(F);
4256 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4257 DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4261 DAG.setRoot(Tmp.getValue(1));
4265 case Intrinsic::gcroot:
4267 Value *Alloca = I.getOperand(1);
4268 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4270 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4271 GFI->addStackRoot(FI->getIndex(), TypeMap);
4275 case Intrinsic::gcread:
4276 case Intrinsic::gcwrite:
4277 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
4280 case Intrinsic::flt_rounds: {
4281 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4285 case Intrinsic::trap: {
4286 DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4290 case Intrinsic::uadd_with_overflow:
4291 return implVisitAluOverflow(I, ISD::UADDO);
4292 case Intrinsic::sadd_with_overflow:
4293 return implVisitAluOverflow(I, ISD::SADDO);
4294 case Intrinsic::usub_with_overflow:
4295 return implVisitAluOverflow(I, ISD::USUBO);
4296 case Intrinsic::ssub_with_overflow:
4297 return implVisitAluOverflow(I, ISD::SSUBO);
4298 case Intrinsic::umul_with_overflow:
4299 return implVisitAluOverflow(I, ISD::UMULO);
4300 case Intrinsic::smul_with_overflow:
4301 return implVisitAluOverflow(I, ISD::SMULO);
4303 case Intrinsic::prefetch: {
4306 Ops[1] = getValue(I.getOperand(1));
4307 Ops[2] = getValue(I.getOperand(2));
4308 Ops[3] = getValue(I.getOperand(3));
4309 DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4313 case Intrinsic::memory_barrier: {
4316 for (int x = 1; x < 6; ++x)
4317 Ops[x] = getValue(I.getOperand(x));
4319 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4322 case Intrinsic::atomic_cmp_swap: {
4323 SDValue Root = getRoot();
4325 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4326 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4328 getValue(I.getOperand(1)),
4329 getValue(I.getOperand(2)),
4330 getValue(I.getOperand(3)),
4333 DAG.setRoot(L.getValue(1));
4336 case Intrinsic::atomic_load_add:
4337 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4338 case Intrinsic::atomic_load_sub:
4339 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4340 case Intrinsic::atomic_load_or:
4341 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4342 case Intrinsic::atomic_load_xor:
4343 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4344 case Intrinsic::atomic_load_and:
4345 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4346 case Intrinsic::atomic_load_nand:
4347 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4348 case Intrinsic::atomic_load_max:
4349 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4350 case Intrinsic::atomic_load_min:
4351 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4352 case Intrinsic::atomic_load_umin:
4353 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4354 case Intrinsic::atomic_load_umax:
4355 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4356 case Intrinsic::atomic_swap:
4357 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4361 /// Test if the given instruction is in a position to be optimized
4362 /// with a tail-call. This roughly means that it's in a block with
4363 /// a return and there's nothing that needs to be scheduled
4364 /// between it and the return.
4366 /// This function only tests target-independent requirements.
4367 /// For target-dependent requirements, a target should override
4368 /// TargetLowering::IsEligibleForTailCallOptimization.
4371 isInTailCallPosition(const Instruction *I, Attributes RetAttr,
4372 const TargetLowering &TLI) {
4373 const BasicBlock *ExitBB = I->getParent();
4374 const TerminatorInst *Term = ExitBB->getTerminator();
4375 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
4376 const Function *F = ExitBB->getParent();
4378 // The block must end in a return statement or an unreachable.
4379 if (!Ret && !isa<UnreachableInst>(Term)) return false;
4381 // If I will have a chain, make sure no other instruction that will have a
4382 // chain interposes between I and the return.
4383 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
4384 !I->isSafeToSpeculativelyExecute())
4385 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
4389 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
4390 !BBI->isSafeToSpeculativelyExecute())
4394 // If the block ends with a void return or unreachable, it doesn't matter
4395 // what the call's return type is.
4396 if (!Ret || Ret->getNumOperands() == 0) return true;
4398 // Conservatively require the attributes of the call to match those of
4400 if (F->getAttributes().getRetAttributes() != RetAttr)
4403 // Otherwise, make sure the unmodified return value of I is the return value.
4404 for (const Instruction *U = dyn_cast<Instruction>(Ret->getOperand(0)); ;
4405 U = dyn_cast<Instruction>(U->getOperand(0))) {
4408 if (!U->hasOneUse())
4412 // Check for a truly no-op truncate.
4413 if (isa<TruncInst>(U) &&
4414 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
4416 // Check for a truly no-op bitcast.
4417 if (isa<BitCastInst>(U) &&
4418 (U->getOperand(0)->getType() == U->getType() ||
4419 (isa<PointerType>(U->getOperand(0)->getType()) &&
4420 isa<PointerType>(U->getType()))))
4422 // Otherwise it's not a true no-op.
4429 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4431 MachineBasicBlock *LandingPad) {
4432 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4433 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4434 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4435 unsigned BeginLabel = 0, EndLabel = 0;
4437 TargetLowering::ArgListTy Args;
4438 TargetLowering::ArgListEntry Entry;
4439 Args.reserve(CS.arg_size());
4441 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4443 SDValue ArgNode = getValue(*i);
4444 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4446 unsigned attrInd = i - CS.arg_begin() + 1;
4447 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4448 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4449 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4450 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4451 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4452 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4453 Entry.Alignment = CS.getParamAlignment(attrInd);
4454 Args.push_back(Entry);
4457 if (LandingPad && MMI) {
4458 // Insert a label before the invoke call to mark the try range. This can be
4459 // used to detect deletion of the invoke via the MachineModuleInfo.
4460 BeginLabel = MMI->NextLabelID();
4462 // Both PendingLoads and PendingExports must be flushed here;
4463 // this call might not return.
4465 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4466 getControlRoot(), BeginLabel));
4469 // Check if target-independent constraints permit a tail call here.
4470 // Target-dependent constraints are checked within TLI.LowerCallTo.
4472 !isInTailCallPosition(CS.getInstruction(),
4473 CS.getAttributes().getRetAttributes(),
4477 std::pair<SDValue,SDValue> Result =
4478 TLI.LowerCallTo(getRoot(), CS.getType(),
4479 CS.paramHasAttr(0, Attribute::SExt),
4480 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4481 CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
4482 CS.getCallingConv(),
4484 !CS.getInstruction()->use_empty(),
4485 Callee, Args, DAG, getCurDebugLoc());
4486 assert((isTailCall || Result.second.getNode()) &&
4487 "Non-null chain expected with non-tail call!");
4488 assert((Result.second.getNode() || !Result.first.getNode()) &&
4489 "Null value expected with tail call!");
4490 if (Result.first.getNode())
4491 setValue(CS.getInstruction(), Result.first);
4492 // As a special case, a null chain means that a tail call has
4493 // been emitted and the DAG root is already updated.
4494 if (Result.second.getNode())
4495 DAG.setRoot(Result.second);
4499 if (LandingPad && MMI) {
4500 // Insert a label at the end of the invoke call to mark the try range. This
4501 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4502 EndLabel = MMI->NextLabelID();
4503 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4504 getRoot(), EndLabel));
4506 // Inform MachineModuleInfo of range.
4507 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4512 void SelectionDAGLowering::visitCall(CallInst &I) {
4513 const char *RenameFn = 0;
4514 if (Function *F = I.getCalledFunction()) {
4515 if (F->isDeclaration()) {
4516 const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4518 if (unsigned IID = II->getIntrinsicID(F)) {
4519 RenameFn = visitIntrinsicCall(I, IID);
4524 if (unsigned IID = F->getIntrinsicID()) {
4525 RenameFn = visitIntrinsicCall(I, IID);
4531 // Check for well-known libc/libm calls. If the function is internal, it
4532 // can't be a library call.
4533 if (!F->hasLocalLinkage() && F->hasName()) {
4534 StringRef Name = F->getName();
4535 if (Name == "copysign" || Name == "copysignf") {
4536 if (I.getNumOperands() == 3 && // Basic sanity checks.
4537 I.getOperand(1)->getType()->isFloatingPoint() &&
4538 I.getType() == I.getOperand(1)->getType() &&
4539 I.getType() == I.getOperand(2)->getType()) {
4540 SDValue LHS = getValue(I.getOperand(1));
4541 SDValue RHS = getValue(I.getOperand(2));
4542 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4543 LHS.getValueType(), LHS, RHS));
4546 } else if (Name == "fabs" || Name == "fabsf" || Name == "fabsl") {
4547 if (I.getNumOperands() == 2 && // Basic sanity checks.
4548 I.getOperand(1)->getType()->isFloatingPoint() &&
4549 I.getType() == I.getOperand(1)->getType()) {
4550 SDValue Tmp = getValue(I.getOperand(1));
4551 setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4552 Tmp.getValueType(), Tmp));
4555 } else if (Name == "sin" || Name == "sinf" || Name == "sinl") {
4556 if (I.getNumOperands() == 2 && // Basic sanity checks.
4557 I.getOperand(1)->getType()->isFloatingPoint() &&
4558 I.getType() == I.getOperand(1)->getType() &&
4559 I.onlyReadsMemory()) {
4560 SDValue Tmp = getValue(I.getOperand(1));
4561 setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4562 Tmp.getValueType(), Tmp));
4565 } else if (Name == "cos" || Name == "cosf" || Name == "cosl") {
4566 if (I.getNumOperands() == 2 && // Basic sanity checks.
4567 I.getOperand(1)->getType()->isFloatingPoint() &&
4568 I.getType() == I.getOperand(1)->getType() &&
4569 I.onlyReadsMemory()) {
4570 SDValue Tmp = getValue(I.getOperand(1));
4571 setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4572 Tmp.getValueType(), Tmp));
4575 } else if (Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") {
4576 if (I.getNumOperands() == 2 && // Basic sanity checks.
4577 I.getOperand(1)->getType()->isFloatingPoint() &&
4578 I.getType() == I.getOperand(1)->getType() &&
4579 I.onlyReadsMemory()) {
4580 SDValue Tmp = getValue(I.getOperand(1));
4581 setValue(&I, DAG.getNode(ISD::FSQRT, getCurDebugLoc(),
4582 Tmp.getValueType(), Tmp));
4587 } else if (isa<InlineAsm>(I.getOperand(0))) {
4594 Callee = getValue(I.getOperand(0));
4596 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4598 // Check if we can potentially perform a tail call. More detailed
4599 // checking is be done within LowerCallTo, after more information
4600 // about the call is known.
4601 bool isTailCall = PerformTailCallOpt && I.isTailCall();
4603 LowerCallTo(&I, Callee, isTailCall);
4607 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4608 /// this value and returns the result as a ValueVT value. This uses
4609 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4610 /// If the Flag pointer is NULL, no flag is used.
4611 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4613 SDValue *Flag) const {
4614 // Assemble the legal parts into the final values.
4615 SmallVector<SDValue, 4> Values(ValueVTs.size());
4616 SmallVector<SDValue, 8> Parts;
4617 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4618 // Copy the legal parts from the registers.
4619 EVT ValueVT = ValueVTs[Value];
4620 unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4621 EVT RegisterVT = RegVTs[Value];
4623 Parts.resize(NumRegs);
4624 for (unsigned i = 0; i != NumRegs; ++i) {
4627 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4629 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4630 *Flag = P.getValue(2);
4632 Chain = P.getValue(1);
4634 // If the source register was virtual and if we know something about it,
4635 // add an assert node.
4636 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4637 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4638 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4639 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4640 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4641 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4643 unsigned RegSize = RegisterVT.getSizeInBits();
4644 unsigned NumSignBits = LOI.NumSignBits;
4645 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4647 // FIXME: We capture more information than the dag can represent. For
4648 // now, just use the tightest assertzext/assertsext possible.
4650 EVT FromVT(MVT::Other);
4651 if (NumSignBits == RegSize)
4652 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4653 else if (NumZeroBits >= RegSize-1)
4654 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4655 else if (NumSignBits > RegSize-8)
4656 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4657 else if (NumZeroBits >= RegSize-8)
4658 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4659 else if (NumSignBits > RegSize-16)
4660 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4661 else if (NumZeroBits >= RegSize-16)
4662 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4663 else if (NumSignBits > RegSize-32)
4664 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4665 else if (NumZeroBits >= RegSize-32)
4666 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4668 if (FromVT != MVT::Other) {
4669 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4670 RegisterVT, P, DAG.getValueType(FromVT));
4679 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4680 NumRegs, RegisterVT, ValueVT);
4685 return DAG.getNode(ISD::MERGE_VALUES, dl,
4686 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4687 &Values[0], ValueVTs.size());
4690 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4691 /// specified value into the registers specified by this object. This uses
4692 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4693 /// If the Flag pointer is NULL, no flag is used.
4694 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4695 SDValue &Chain, SDValue *Flag) const {
4696 // Get the list of the values's legal parts.
4697 unsigned NumRegs = Regs.size();
4698 SmallVector<SDValue, 8> Parts(NumRegs);
4699 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4700 EVT ValueVT = ValueVTs[Value];
4701 unsigned NumParts = TLI->getNumRegisters(*DAG.getContext(), ValueVT);
4702 EVT RegisterVT = RegVTs[Value];
4704 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4705 &Parts[Part], NumParts, RegisterVT);
4709 // Copy the parts into the registers.
4710 SmallVector<SDValue, 8> Chains(NumRegs);
4711 for (unsigned i = 0; i != NumRegs; ++i) {
4714 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4716 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4717 *Flag = Part.getValue(1);
4719 Chains[i] = Part.getValue(0);
4722 if (NumRegs == 1 || Flag)
4723 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4724 // flagged to it. That is the CopyToReg nodes and the user are considered
4725 // a single scheduling unit. If we create a TokenFactor and return it as
4726 // chain, then the TokenFactor is both a predecessor (operand) of the
4727 // user as well as a successor (the TF operands are flagged to the user).
4728 // c1, f1 = CopyToReg
4729 // c2, f2 = CopyToReg
4730 // c3 = TokenFactor c1, c2
4733 Chain = Chains[NumRegs-1];
4735 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4738 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4739 /// operand list. This adds the code marker and includes the number of
4740 /// values added into it.
4741 void RegsForValue::AddInlineAsmOperands(unsigned Code,
4742 bool HasMatching,unsigned MatchingIdx,
4744 std::vector<SDValue> &Ops) const {
4745 EVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4746 assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4747 unsigned Flag = Code | (Regs.size() << 3);
4749 Flag |= 0x80000000 | (MatchingIdx << 16);
4750 Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4751 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4752 unsigned NumRegs = TLI->getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
4753 EVT RegisterVT = RegVTs[Value];
4754 for (unsigned i = 0; i != NumRegs; ++i) {
4755 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4756 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4761 /// isAllocatableRegister - If the specified register is safe to allocate,
4762 /// i.e. it isn't a stack pointer or some other special register, return the
4763 /// register class for the register. Otherwise, return null.
4764 static const TargetRegisterClass *
4765 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4766 const TargetLowering &TLI,
4767 const TargetRegisterInfo *TRI) {
4768 EVT FoundVT = MVT::Other;
4769 const TargetRegisterClass *FoundRC = 0;
4770 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4771 E = TRI->regclass_end(); RCI != E; ++RCI) {
4772 EVT ThisVT = MVT::Other;
4774 const TargetRegisterClass *RC = *RCI;
4775 // If none of the the value types for this register class are valid, we
4776 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4777 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4779 if (TLI.isTypeLegal(*I)) {
4780 // If we have already found this register in a different register class,
4781 // choose the one with the largest VT specified. For example, on
4782 // PowerPC, we favor f64 register classes over f32.
4783 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4790 if (ThisVT == MVT::Other) continue;
4792 // NOTE: This isn't ideal. In particular, this might allocate the
4793 // frame pointer in functions that need it (due to them not being taken
4794 // out of allocation, because a variable sized allocation hasn't been seen
4795 // yet). This is a slight code pessimization, but should still work.
4796 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4797 E = RC->allocation_order_end(MF); I != E; ++I)
4799 // We found a matching register class. Keep looking at others in case
4800 // we find one with larger registers that this physreg is also in.
4811 /// AsmOperandInfo - This contains information for each constraint that we are
4813 class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4814 public TargetLowering::AsmOperandInfo {
4816 /// CallOperand - If this is the result output operand or a clobber
4817 /// this is null, otherwise it is the incoming operand to the CallInst.
4818 /// This gets modified as the asm is processed.
4819 SDValue CallOperand;
4821 /// AssignedRegs - If this is a register or register class operand, this
4822 /// contains the set of register corresponding to the operand.
4823 RegsForValue AssignedRegs;
4825 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4826 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4829 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4830 /// busy in OutputRegs/InputRegs.
4831 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4832 std::set<unsigned> &OutputRegs,
4833 std::set<unsigned> &InputRegs,
4834 const TargetRegisterInfo &TRI) const {
4836 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4837 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4840 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4841 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4845 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
4846 /// corresponds to. If there is no Value* for this operand, it returns
4848 EVT getCallOperandValEVT(LLVMContext &Context,
4849 const TargetLowering &TLI,
4850 const TargetData *TD) const {
4851 if (CallOperandVal == 0) return MVT::Other;
4853 if (isa<BasicBlock>(CallOperandVal))
4854 return TLI.getPointerTy();
4856 const llvm::Type *OpTy = CallOperandVal->getType();
4858 // If this is an indirect operand, the operand is a pointer to the
4861 OpTy = cast<PointerType>(OpTy)->getElementType();
4863 // If OpTy is not a single value, it may be a struct/union that we
4864 // can tile with integers.
4865 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4866 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4875 OpTy = IntegerType::get(Context, BitSize);
4880 return TLI.getValueType(OpTy, true);
4884 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4886 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4887 const TargetRegisterInfo &TRI) {
4888 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4890 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4891 for (; *Aliases; ++Aliases)
4892 Regs.insert(*Aliases);
4895 } // end llvm namespace.
4898 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4899 /// specified operand. We prefer to assign virtual registers, to allow the
4900 /// register allocator handle the assignment process. However, if the asm uses
4901 /// features that we can't model on machineinstrs, we have SDISel do the
4902 /// allocation. This produces generally horrible, but correct, code.
4904 /// OpInfo describes the operand.
4905 /// Input and OutputRegs are the set of already allocated physical registers.
4907 void SelectionDAGLowering::
4908 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4909 std::set<unsigned> &OutputRegs,
4910 std::set<unsigned> &InputRegs) {
4911 LLVMContext &Context = FuncInfo.Fn->getContext();
4913 // Compute whether this value requires an input register, an output register,
4915 bool isOutReg = false;
4916 bool isInReg = false;
4917 switch (OpInfo.Type) {
4918 case InlineAsm::isOutput:
4921 // If there is an input constraint that matches this, we need to reserve
4922 // the input register so no other inputs allocate to it.
4923 isInReg = OpInfo.hasMatchingInput();
4925 case InlineAsm::isInput:
4929 case InlineAsm::isClobber:
4936 MachineFunction &MF = DAG.getMachineFunction();
4937 SmallVector<unsigned, 4> Regs;
4939 // If this is a constraint for a single physreg, or a constraint for a
4940 // register class, find it.
4941 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4942 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4943 OpInfo.ConstraintVT);
4945 unsigned NumRegs = 1;
4946 if (OpInfo.ConstraintVT != MVT::Other) {
4947 // If this is a FP input in an integer register (or visa versa) insert a bit
4948 // cast of the input value. More generally, handle any case where the input
4949 // value disagrees with the register class we plan to stick this in.
4950 if (OpInfo.Type == InlineAsm::isInput &&
4951 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4952 // Try to convert to the first EVT that the reg class contains. If the
4953 // types are identical size, use a bitcast to convert (e.g. two differing
4955 EVT RegVT = *PhysReg.second->vt_begin();
4956 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4957 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4958 RegVT, OpInfo.CallOperand);
4959 OpInfo.ConstraintVT = RegVT;
4960 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4961 // If the input is a FP value and we want it in FP registers, do a
4962 // bitcast to the corresponding integer type. This turns an f64 value
4963 // into i64, which can be passed with two i32 values on a 32-bit
4965 RegVT = EVT::getIntegerVT(Context,
4966 OpInfo.ConstraintVT.getSizeInBits());
4967 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4968 RegVT, OpInfo.CallOperand);
4969 OpInfo.ConstraintVT = RegVT;
4973 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
4977 EVT ValueVT = OpInfo.ConstraintVT;
4979 // If this is a constraint for a specific physical register, like {r17},
4981 if (unsigned AssignedReg = PhysReg.first) {
4982 const TargetRegisterClass *RC = PhysReg.second;
4983 if (OpInfo.ConstraintVT == MVT::Other)
4984 ValueVT = *RC->vt_begin();
4986 // Get the actual register value type. This is important, because the user
4987 // may have asked for (e.g.) the AX register in i32 type. We need to
4988 // remember that AX is actually i16 to get the right extension.
4989 RegVT = *RC->vt_begin();
4991 // This is a explicit reference to a physical register.
4992 Regs.push_back(AssignedReg);
4994 // If this is an expanded reference, add the rest of the regs to Regs.
4996 TargetRegisterClass::iterator I = RC->begin();
4997 for (; *I != AssignedReg; ++I)
4998 assert(I != RC->end() && "Didn't find reg!");
5000 // Already added the first reg.
5002 for (; NumRegs; --NumRegs, ++I) {
5003 assert(I != RC->end() && "Ran out of registers to allocate!");
5007 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5008 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5009 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5013 // Otherwise, if this was a reference to an LLVM register class, create vregs
5014 // for this reference.
5015 if (const TargetRegisterClass *RC = PhysReg.second) {
5016 RegVT = *RC->vt_begin();
5017 if (OpInfo.ConstraintVT == MVT::Other)
5020 // Create the appropriate number of virtual registers.
5021 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5022 for (; NumRegs; --NumRegs)
5023 Regs.push_back(RegInfo.createVirtualRegister(RC));
5025 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5029 // This is a reference to a register class that doesn't directly correspond
5030 // to an LLVM register class. Allocate NumRegs consecutive, available,
5031 // registers from the class.
5032 std::vector<unsigned> RegClassRegs
5033 = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
5034 OpInfo.ConstraintVT);
5036 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5037 unsigned NumAllocated = 0;
5038 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
5039 unsigned Reg = RegClassRegs[i];
5040 // See if this register is available.
5041 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
5042 (isInReg && InputRegs.count(Reg))) { // Already used.
5043 // Make sure we find consecutive registers.
5048 // Check to see if this register is allocatable (i.e. don't give out the
5050 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
5051 if (!RC) { // Couldn't allocate this register.
5052 // Reset NumAllocated to make sure we return consecutive registers.
5057 // Okay, this register is good, we can use it.
5060 // If we allocated enough consecutive registers, succeed.
5061 if (NumAllocated == NumRegs) {
5062 unsigned RegStart = (i-NumAllocated)+1;
5063 unsigned RegEnd = i+1;
5064 // Mark all of the allocated registers used.
5065 for (unsigned i = RegStart; i != RegEnd; ++i)
5066 Regs.push_back(RegClassRegs[i]);
5068 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5069 OpInfo.ConstraintVT);
5070 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5075 // Otherwise, we couldn't allocate enough registers for this.
5078 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5079 /// processed uses a memory 'm' constraint.
5081 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5082 const TargetLowering &TLI) {
5083 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5084 InlineAsm::ConstraintInfo &CI = CInfos[i];
5085 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5086 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5087 if (CType == TargetLowering::C_Memory)
5091 // Indirect operand accesses access memory.
5099 /// visitInlineAsm - Handle a call to an InlineAsm object.
5101 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
5102 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5104 /// ConstraintOperands - Information about all of the constraints.
5105 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5107 std::set<unsigned> OutputRegs, InputRegs;
5109 // Do a prepass over the constraints, canonicalizing them, and building up the
5110 // ConstraintOperands list.
5111 std::vector<InlineAsm::ConstraintInfo>
5112 ConstraintInfos = IA->ParseConstraints();
5114 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5116 SDValue Chain, Flag;
5118 // We won't need to flush pending loads if this asm doesn't touch
5119 // memory and is nonvolatile.
5120 if (hasMemory || IA->hasSideEffects())
5123 Chain = DAG.getRoot();
5125 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
5126 unsigned ResNo = 0; // ResNo - The result number of the next output.
5127 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5128 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5129 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5131 EVT OpVT = MVT::Other;
5133 // Compute the value type for each operand.
5134 switch (OpInfo.Type) {
5135 case InlineAsm::isOutput:
5136 // Indirect outputs just consume an argument.
5137 if (OpInfo.isIndirect) {
5138 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5142 // The return value of the call is this value. As such, there is no
5143 // corresponding argument.
5144 assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5146 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5147 OpVT = TLI.getValueType(STy->getElementType(ResNo));
5149 assert(ResNo == 0 && "Asm only has one result!");
5150 OpVT = TLI.getValueType(CS.getType());
5154 case InlineAsm::isInput:
5155 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5157 case InlineAsm::isClobber:
5162 // If this is an input or an indirect output, process the call argument.
5163 // BasicBlocks are labels, currently appearing only in asm's.
5164 if (OpInfo.CallOperandVal) {
5165 // Strip bitcasts, if any. This mostly comes up for functions.
5166 OpInfo.CallOperandVal = OpInfo.CallOperandVal->stripPointerCasts();
5168 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5169 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5171 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5174 OpVT = OpInfo.getCallOperandValEVT(*DAG.getContext(), TLI, TD);
5177 OpInfo.ConstraintVT = OpVT;
5180 // Second pass over the constraints: compute which constraint option to use
5181 // and assign registers to constraints that want a specific physreg.
5182 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5183 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5185 // If this is an output operand with a matching input operand, look up the
5186 // matching input. If their types mismatch, e.g. one is an integer, the
5187 // other is floating point, or their sizes are different, flag it as an
5189 if (OpInfo.hasMatchingInput()) {
5190 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5191 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5192 if ((OpInfo.ConstraintVT.isInteger() !=
5193 Input.ConstraintVT.isInteger()) ||
5194 (OpInfo.ConstraintVT.getSizeInBits() !=
5195 Input.ConstraintVT.getSizeInBits())) {
5196 llvm_report_error("Unsupported asm: input constraint"
5197 " with a matching output constraint of incompatible"
5200 Input.ConstraintVT = OpInfo.ConstraintVT;
5204 // Compute the constraint code and ConstraintType to use.
5205 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5207 // If this is a memory input, and if the operand is not indirect, do what we
5208 // need to to provide an address for the memory input.
5209 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5210 !OpInfo.isIndirect) {
5211 assert(OpInfo.Type == InlineAsm::isInput &&
5212 "Can only indirectify direct input operands!");
5214 // Memory operands really want the address of the value. If we don't have
5215 // an indirect input, put it in the constpool if we can, otherwise spill
5216 // it to a stack slot.
5218 // If the operand is a float, integer, or vector constant, spill to a
5219 // constant pool entry to get its address.
5220 Value *OpVal = OpInfo.CallOperandVal;
5221 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5222 isa<ConstantVector>(OpVal)) {
5223 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5224 TLI.getPointerTy());
5226 // Otherwise, create a stack slot and emit a store to it before the
5228 const Type *Ty = OpVal->getType();
5229 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5230 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5231 MachineFunction &MF = DAG.getMachineFunction();
5232 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5233 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5234 Chain = DAG.getStore(Chain, getCurDebugLoc(),
5235 OpInfo.CallOperand, StackSlot, NULL, 0);
5236 OpInfo.CallOperand = StackSlot;
5239 // There is no longer a Value* corresponding to this operand.
5240 OpInfo.CallOperandVal = 0;
5241 // It is now an indirect operand.
5242 OpInfo.isIndirect = true;
5245 // If this constraint is for a specific register, allocate it before
5247 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5248 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5250 ConstraintInfos.clear();
5253 // Second pass - Loop over all of the operands, assigning virtual or physregs
5254 // to register class operands.
5255 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5256 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5258 // C_Register operands have already been allocated, Other/Memory don't need
5260 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5261 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5264 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5265 std::vector<SDValue> AsmNodeOperands;
5266 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5267 AsmNodeOperands.push_back(
5268 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5271 // Loop over all of the inputs, copying the operand values into the
5272 // appropriate registers and processing the output regs.
5273 RegsForValue RetValRegs;
5275 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5276 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5278 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5279 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5281 switch (OpInfo.Type) {
5282 case InlineAsm::isOutput: {
5283 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5284 OpInfo.ConstraintType != TargetLowering::C_Register) {
5285 // Memory output, or 'other' output (e.g. 'X' constraint).
5286 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5288 // Add information to the INLINEASM node to know about this output.
5289 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5290 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5291 TLI.getPointerTy()));
5292 AsmNodeOperands.push_back(OpInfo.CallOperand);
5296 // Otherwise, this is a register or register class output.
5298 // Copy the output from the appropriate register. Find a register that
5300 if (OpInfo.AssignedRegs.Regs.empty()) {
5301 llvm_report_error("Couldn't allocate output reg for"
5302 " constraint '" + OpInfo.ConstraintCode + "'!");
5305 // If this is an indirect operand, store through the pointer after the
5307 if (OpInfo.isIndirect) {
5308 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5309 OpInfo.CallOperandVal));
5311 // This is the result value of the call.
5312 assert(CS.getType() != Type::getVoidTy(*DAG.getContext()) &&
5314 // Concatenate this output onto the outputs list.
5315 RetValRegs.append(OpInfo.AssignedRegs);
5318 // Add information to the INLINEASM node to know that this register is
5320 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5321 6 /* EARLYCLOBBER REGDEF */ :
5325 DAG, AsmNodeOperands);
5328 case InlineAsm::isInput: {
5329 SDValue InOperandVal = OpInfo.CallOperand;
5331 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5332 // If this is required to match an output register we have already set,
5333 // just use its register.
5334 unsigned OperandNo = OpInfo.getMatchedOperand();
5336 // Scan until we find the definition we already emitted of this operand.
5337 // When we find it, create a RegsForValue operand.
5338 unsigned CurOp = 2; // The first operand.
5339 for (; OperandNo; --OperandNo) {
5340 // Advance to the next operand.
5342 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5343 assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5344 (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5345 (OpFlag & 7) == 4 /*MEM*/) &&
5346 "Skipped past definitions?");
5347 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5351 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5352 if ((OpFlag & 7) == 2 /*REGDEF*/
5353 || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5354 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5355 if (OpInfo.isIndirect) {
5356 llvm_report_error("Don't know how to handle tied indirect "
5357 "register inputs yet!");
5359 RegsForValue MatchedRegs;
5360 MatchedRegs.TLI = &TLI;
5361 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5362 EVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5363 MatchedRegs.RegVTs.push_back(RegVT);
5364 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5365 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5368 push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5370 // Use the produced MatchedRegs object to
5371 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5373 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5374 true, OpInfo.getMatchedOperand(),
5375 DAG, AsmNodeOperands);
5378 assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5379 assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5380 "Unexpected number of operands");
5381 // Add information to the INLINEASM node to know about this input.
5382 // See InlineAsm.h isUseOperandTiedToDef.
5383 OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5384 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5385 TLI.getPointerTy()));
5386 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5391 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5392 assert(!OpInfo.isIndirect &&
5393 "Don't know how to handle indirect other inputs yet!");
5395 std::vector<SDValue> Ops;
5396 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5397 hasMemory, Ops, DAG);
5399 llvm_report_error("Invalid operand for inline asm"
5400 " constraint '" + OpInfo.ConstraintCode + "'!");
5403 // Add information to the INLINEASM node to know about this input.
5404 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5405 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5406 TLI.getPointerTy()));
5407 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5409 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5410 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5411 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5412 "Memory operands expect pointer values");
5414 // Add information to the INLINEASM node to know about this input.
5415 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5416 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5417 TLI.getPointerTy()));
5418 AsmNodeOperands.push_back(InOperandVal);
5422 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5423 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5424 "Unknown constraint type!");
5425 assert(!OpInfo.isIndirect &&
5426 "Don't know how to handle indirect register inputs yet!");
5428 // Copy the input into the appropriate registers.
5429 if (OpInfo.AssignedRegs.Regs.empty()) {
5430 llvm_report_error("Couldn't allocate input reg for"
5431 " constraint '"+ OpInfo.ConstraintCode +"'!");
5434 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5437 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5438 DAG, AsmNodeOperands);
5441 case InlineAsm::isClobber: {
5442 // Add the clobbered value to the operand list, so that the register
5443 // allocator is aware that the physreg got clobbered.
5444 if (!OpInfo.AssignedRegs.Regs.empty())
5445 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5446 false, 0, DAG,AsmNodeOperands);
5452 // Finish up input operands.
5453 AsmNodeOperands[0] = Chain;
5454 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5456 Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5457 DAG.getVTList(MVT::Other, MVT::Flag),
5458 &AsmNodeOperands[0], AsmNodeOperands.size());
5459 Flag = Chain.getValue(1);
5461 // If this asm returns a register value, copy the result from that register
5462 // and set it as the value of the call.
5463 if (!RetValRegs.Regs.empty()) {
5464 SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5467 // FIXME: Why don't we do this for inline asms with MRVs?
5468 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5469 EVT ResultType = TLI.getValueType(CS.getType());
5471 // If any of the results of the inline asm is a vector, it may have the
5472 // wrong width/num elts. This can happen for register classes that can
5473 // contain multiple different value types. The preg or vreg allocated may
5474 // not have the same VT as was expected. Convert it to the right type
5475 // with bit_convert.
5476 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5477 Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5480 } else if (ResultType != Val.getValueType() &&
5481 ResultType.isInteger() && Val.getValueType().isInteger()) {
5482 // If a result value was tied to an input value, the computed result may
5483 // have a wider width than the expected result. Extract the relevant
5485 Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5488 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5491 setValue(CS.getInstruction(), Val);
5492 // Don't need to use this as a chain in this case.
5493 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5497 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5499 // Process indirect outputs, first output all of the flagged copies out of
5501 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5502 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5503 Value *Ptr = IndirectStoresToEmit[i].second;
5504 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5506 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5510 // Emit the non-flagged stores from the physregs.
5511 SmallVector<SDValue, 8> OutChains;
5512 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5513 OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5514 StoresToEmit[i].first,
5515 getValue(StoresToEmit[i].second),
5516 StoresToEmit[i].second, 0));
5517 if (!OutChains.empty())
5518 Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5519 &OutChains[0], OutChains.size());
5523 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5524 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5525 MVT::Other, getRoot(),
5526 getValue(I.getOperand(1)),
5527 DAG.getSrcValue(I.getOperand(1))));
5530 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5531 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5532 getRoot(), getValue(I.getOperand(0)),
5533 DAG.getSrcValue(I.getOperand(0)));
5535 DAG.setRoot(V.getValue(1));
5538 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5539 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5540 MVT::Other, getRoot(),
5541 getValue(I.getOperand(1)),
5542 DAG.getSrcValue(I.getOperand(1))));
5545 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5546 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5547 MVT::Other, getRoot(),
5548 getValue(I.getOperand(1)),
5549 getValue(I.getOperand(2)),
5550 DAG.getSrcValue(I.getOperand(1)),
5551 DAG.getSrcValue(I.getOperand(2))));
5554 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5555 /// implementation, which just calls LowerCall.
5556 /// FIXME: When all targets are
5557 /// migrated to using LowerCall, this hook should be integrated into SDISel.
5558 std::pair<SDValue, SDValue>
5559 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5560 bool RetSExt, bool RetZExt, bool isVarArg,
5561 bool isInreg, unsigned NumFixedArgs,
5562 CallingConv::ID CallConv, bool isTailCall,
5563 bool isReturnValueUsed,
5565 ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5567 assert((!isTailCall || PerformTailCallOpt) &&
5568 "isTailCall set when tail-call optimizations are disabled!");
5570 // Handle all of the outgoing arguments.
5571 SmallVector<ISD::OutputArg, 32> Outs;
5572 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5573 SmallVector<EVT, 4> ValueVTs;
5574 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5575 for (unsigned Value = 0, NumValues = ValueVTs.size();
5576 Value != NumValues; ++Value) {
5577 EVT VT = ValueVTs[Value];
5578 const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
5579 SDValue Op = SDValue(Args[i].Node.getNode(),
5580 Args[i].Node.getResNo() + Value);
5581 ISD::ArgFlagsTy Flags;
5582 unsigned OriginalAlignment =
5583 getTargetData()->getABITypeAlignment(ArgTy);
5589 if (Args[i].isInReg)
5593 if (Args[i].isByVal) {
5595 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5596 const Type *ElementTy = Ty->getElementType();
5597 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5598 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5599 // For ByVal, alignment should come from FE. BE will guess if this
5600 // info is not there but there are cases it cannot get right.
5601 if (Args[i].Alignment)
5602 FrameAlign = Args[i].Alignment;
5603 Flags.setByValAlign(FrameAlign);
5604 Flags.setByValSize(FrameSize);
5608 Flags.setOrigAlign(OriginalAlignment);
5610 EVT PartVT = getRegisterType(RetTy->getContext(), VT);
5611 unsigned NumParts = getNumRegisters(RetTy->getContext(), VT);
5612 SmallVector<SDValue, 4> Parts(NumParts);
5613 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5616 ExtendKind = ISD::SIGN_EXTEND;
5617 else if (Args[i].isZExt)
5618 ExtendKind = ISD::ZERO_EXTEND;
5620 getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5622 for (unsigned j = 0; j != NumParts; ++j) {
5623 // if it isn't first piece, alignment must be 1
5624 ISD::OutputArg MyFlags(Flags, Parts[j], i < NumFixedArgs);
5625 if (NumParts > 1 && j == 0)
5626 MyFlags.Flags.setSplit();
5628 MyFlags.Flags.setOrigAlign(1);
5630 Outs.push_back(MyFlags);
5635 // Handle the incoming return values from the call.
5636 SmallVector<ISD::InputArg, 32> Ins;
5637 SmallVector<EVT, 4> RetTys;
5638 ComputeValueVTs(*this, RetTy, RetTys);
5639 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5641 EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5642 unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5643 for (unsigned i = 0; i != NumRegs; ++i) {
5644 ISD::InputArg MyFlags;
5645 MyFlags.VT = RegisterVT;
5646 MyFlags.Used = isReturnValueUsed;
5648 MyFlags.Flags.setSExt();
5650 MyFlags.Flags.setZExt();
5652 MyFlags.Flags.setInReg();
5653 Ins.push_back(MyFlags);
5657 // Check if target-dependent constraints permit a tail call here.
5658 // Target-independent constraints should be checked by the caller.
5660 !IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG))
5663 SmallVector<SDValue, 4> InVals;
5664 Chain = LowerCall(Chain, Callee, CallConv, isVarArg, isTailCall,
5665 Outs, Ins, dl, DAG, InVals);
5667 // Verify that the target's LowerCall behaved as expected.
5668 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
5669 "LowerCall didn't return a valid chain!");
5670 assert((!isTailCall || InVals.empty()) &&
5671 "LowerCall emitted a return value for a tail call!");
5672 assert((isTailCall || InVals.size() == Ins.size()) &&
5673 "LowerCall didn't emit the correct number of values!");
5674 DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5675 assert(InVals[i].getNode() &&
5676 "LowerCall emitted a null value!");
5677 assert(Ins[i].VT == InVals[i].getValueType() &&
5678 "LowerCall emitted a value with the wrong type!");
5681 // For a tail call, the return value is merely live-out and there aren't
5682 // any nodes in the DAG representing it. Return a special value to
5683 // indicate that a tail call has been emitted and no more Instructions
5684 // should be processed in the current block.
5687 return std::make_pair(SDValue(), SDValue());
5690 // Collect the legal value parts into potentially illegal values
5691 // that correspond to the original function's return values.
5692 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5694 AssertOp = ISD::AssertSext;
5696 AssertOp = ISD::AssertZext;
5697 SmallVector<SDValue, 4> ReturnValues;
5698 unsigned CurReg = 0;
5699 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5701 EVT RegisterVT = getRegisterType(RetTy->getContext(), VT);
5702 unsigned NumRegs = getNumRegisters(RetTy->getContext(), VT);
5704 SDValue ReturnValue =
5705 getCopyFromParts(DAG, dl, &InVals[CurReg], NumRegs, RegisterVT, VT,
5707 ReturnValues.push_back(ReturnValue);
5711 // For a function returning void, there is no return value. We can't create
5712 // such a node, so we just return a null return value in that case. In
5713 // that case, nothing will actualy look at the value.
5714 if (ReturnValues.empty())
5715 return std::make_pair(SDValue(), Chain);
5717 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5718 DAG.getVTList(&RetTys[0], RetTys.size()),
5719 &ReturnValues[0], ReturnValues.size());
5721 return std::make_pair(Res, Chain);
5724 void TargetLowering::LowerOperationWrapper(SDNode *N,
5725 SmallVectorImpl<SDValue> &Results,
5726 SelectionDAG &DAG) {
5727 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5729 Results.push_back(Res);
5732 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5733 llvm_unreachable("LowerOperation not implemented for this target!");
5738 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5739 SDValue Op = getValue(V);
5740 assert((Op.getOpcode() != ISD::CopyFromReg ||
5741 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5742 "Copy from a reg to the same reg!");
5743 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5745 RegsForValue RFV(V->getContext(), TLI, Reg, V->getType());
5746 SDValue Chain = DAG.getEntryNode();
5747 RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5748 PendingExports.push_back(Chain);
5751 #include "llvm/CodeGen/SelectionDAGISel.h"
5753 void SelectionDAGISel::LowerArguments(BasicBlock *LLVMBB) {
5754 // If this is the entry block, emit arguments.
5755 Function &F = *LLVMBB->getParent();
5756 SelectionDAG &DAG = SDL->DAG;
5757 SDValue OldRoot = DAG.getRoot();
5758 DebugLoc dl = SDL->getCurDebugLoc();
5759 const TargetData *TD = TLI.getTargetData();
5761 // Set up the incoming argument description vector.
5762 SmallVector<ISD::InputArg, 16> Ins;
5764 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5765 I != E; ++I, ++Idx) {
5766 SmallVector<EVT, 4> ValueVTs;
5767 ComputeValueVTs(TLI, I->getType(), ValueVTs);
5768 bool isArgValueUsed = !I->use_empty();
5769 for (unsigned Value = 0, NumValues = ValueVTs.size();
5770 Value != NumValues; ++Value) {
5771 EVT VT = ValueVTs[Value];
5772 const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
5773 ISD::ArgFlagsTy Flags;
5774 unsigned OriginalAlignment =
5775 TD->getABITypeAlignment(ArgTy);
5777 if (F.paramHasAttr(Idx, Attribute::ZExt))
5779 if (F.paramHasAttr(Idx, Attribute::SExt))
5781 if (F.paramHasAttr(Idx, Attribute::InReg))
5783 if (F.paramHasAttr(Idx, Attribute::StructRet))
5785 if (F.paramHasAttr(Idx, Attribute::ByVal)) {
5787 const PointerType *Ty = cast<PointerType>(I->getType());
5788 const Type *ElementTy = Ty->getElementType();
5789 unsigned FrameAlign = TLI.getByValTypeAlignment(ElementTy);
5790 unsigned FrameSize = TD->getTypeAllocSize(ElementTy);
5791 // For ByVal, alignment should be passed from FE. BE will guess if
5792 // this info is not there but there are cases it cannot get right.
5793 if (F.getParamAlignment(Idx))
5794 FrameAlign = F.getParamAlignment(Idx);
5795 Flags.setByValAlign(FrameAlign);
5796 Flags.setByValSize(FrameSize);
5798 if (F.paramHasAttr(Idx, Attribute::Nest))
5800 Flags.setOrigAlign(OriginalAlignment);
5802 EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5803 unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5804 for (unsigned i = 0; i != NumRegs; ++i) {
5805 ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed);
5806 if (NumRegs > 1 && i == 0)
5807 MyFlags.Flags.setSplit();
5808 // if it isn't first piece, alignment must be 1
5810 MyFlags.Flags.setOrigAlign(1);
5811 Ins.push_back(MyFlags);
5816 // Call the target to set up the argument values.
5817 SmallVector<SDValue, 8> InVals;
5818 SDValue NewRoot = TLI.LowerFormalArguments(DAG.getRoot(), F.getCallingConv(),
5822 // Verify that the target's LowerFormalArguments behaved as expected.
5823 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
5824 "LowerFormalArguments didn't return a valid chain!");
5825 assert(InVals.size() == Ins.size() &&
5826 "LowerFormalArguments didn't emit the correct number of values!");
5827 DEBUG(for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5828 assert(InVals[i].getNode() &&
5829 "LowerFormalArguments emitted a null value!");
5830 assert(Ins[i].VT == InVals[i].getValueType() &&
5831 "LowerFormalArguments emitted a value with the wrong type!");
5834 // Update the DAG with the new chain value resulting from argument lowering.
5835 DAG.setRoot(NewRoot);
5837 // Set up the argument values.
5840 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5842 SmallVector<SDValue, 4> ArgValues;
5843 SmallVector<EVT, 4> ValueVTs;
5844 ComputeValueVTs(TLI, I->getType(), ValueVTs);
5845 unsigned NumValues = ValueVTs.size();
5846 for (unsigned Value = 0; Value != NumValues; ++Value) {
5847 EVT VT = ValueVTs[Value];
5848 EVT PartVT = TLI.getRegisterType(*CurDAG->getContext(), VT);
5849 unsigned NumParts = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5851 if (!I->use_empty()) {
5852 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5853 if (F.paramHasAttr(Idx, Attribute::SExt))
5854 AssertOp = ISD::AssertSext;
5855 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5856 AssertOp = ISD::AssertZext;
5858 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
5859 PartVT, VT, AssertOp));
5863 if (!I->use_empty()) {
5864 SDL->setValue(I, DAG.getMergeValues(&ArgValues[0], NumValues,
5865 SDL->getCurDebugLoc()));
5866 // If this argument is live outside of the entry block, insert a copy from
5867 // whereever we got it to the vreg that other BB's will reference it as.
5868 SDL->CopyToExportRegsIfNeeded(I);
5871 assert(i == InVals.size() && "Argument register count mismatch!");
5873 // Finally, if the target has anything special to do, allow it to do so.
5874 // FIXME: this should insert code into the DAG!
5875 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5878 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5879 /// ensure constants are generated when needed. Remember the virtual registers
5880 /// that need to be added to the Machine PHI nodes as input. We cannot just
5881 /// directly add them, because expansion might result in multiple MBB's for one
5882 /// BB. As such, the start of the BB might correspond to a different MBB than
5886 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5887 TerminatorInst *TI = LLVMBB->getTerminator();
5889 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5891 // Check successor nodes' PHI nodes that expect a constant to be available
5893 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5894 BasicBlock *SuccBB = TI->getSuccessor(succ);
5895 if (!isa<PHINode>(SuccBB->begin())) continue;
5896 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5898 // If this terminator has multiple identical successors (common for
5899 // switches), only handle each succ once.
5900 if (!SuccsHandled.insert(SuccMBB)) continue;
5902 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5905 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5906 // nodes and Machine PHI nodes, but the incoming operands have not been
5908 for (BasicBlock::iterator I = SuccBB->begin();
5909 (PN = dyn_cast<PHINode>(I)); ++I) {
5910 // Ignore dead phi's.
5911 if (PN->use_empty()) continue;
5914 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5916 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5917 unsigned &RegOut = SDL->ConstantsOut[C];
5919 RegOut = FuncInfo->CreateRegForValue(C);
5920 SDL->CopyValueToVirtualRegister(C, RegOut);
5924 Reg = FuncInfo->ValueMap[PHIOp];
5926 assert(isa<AllocaInst>(PHIOp) &&
5927 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5928 "Didn't codegen value into a register!??");
5929 Reg = FuncInfo->CreateRegForValue(PHIOp);
5930 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5934 // Remember that this register needs to added to the machine PHI node as
5935 // the input for this MBB.
5936 SmallVector<EVT, 4> ValueVTs;
5937 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5938 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5939 EVT VT = ValueVTs[vti];
5940 unsigned NumRegisters = TLI.getNumRegisters(*CurDAG->getContext(), VT);
5941 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5942 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5943 Reg += NumRegisters;
5947 SDL->ConstantsOut.clear();
5950 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5951 /// supports legal types, and it emits MachineInstrs directly instead of
5952 /// creating SelectionDAG nodes.
5955 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5957 TerminatorInst *TI = LLVMBB->getTerminator();
5959 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5960 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
5962 // Check successor nodes' PHI nodes that expect a constant to be available
5964 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5965 BasicBlock *SuccBB = TI->getSuccessor(succ);
5966 if (!isa<PHINode>(SuccBB->begin())) continue;
5967 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5969 // If this terminator has multiple identical successors (common for
5970 // switches), only handle each succ once.
5971 if (!SuccsHandled.insert(SuccMBB)) continue;
5973 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5976 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5977 // nodes and Machine PHI nodes, but the incoming operands have not been
5979 for (BasicBlock::iterator I = SuccBB->begin();
5980 (PN = dyn_cast<PHINode>(I)); ++I) {
5981 // Ignore dead phi's.
5982 if (PN->use_empty()) continue;
5984 // Only handle legal types. Two interesting things to note here. First,
5985 // by bailing out early, we may leave behind some dead instructions,
5986 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
5987 // own moves. Second, this check is necessary becuase FastISel doesn't
5988 // use CreateRegForValue to create registers, so it always creates
5989 // exactly one register for each non-void instruction.
5990 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
5991 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
5994 VT = TLI.getTypeToTransformTo(*CurDAG->getContext(), VT);
5996 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6001 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6003 unsigned Reg = F->getRegForValue(PHIOp);
6005 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6008 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));