1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/InlineAsm.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Module.h"
29 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/CodeGen/GCStrategy.h"
31 #include "llvm/CodeGen/GCMetadata.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/PseudoSourceValue.h"
39 #include "llvm/CodeGen/SelectionDAG.h"
40 #include "llvm/CodeGen/DwarfWriter.h"
41 #include "llvm/Analysis/DebugInfo.h"
42 #include "llvm/Target/TargetRegisterInfo.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Target/TargetFrameInfo.h"
45 #include "llvm/Target/TargetInstrInfo.h"
46 #include "llvm/Target/TargetIntrinsicInfo.h"
47 #include "llvm/Target/TargetLowering.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
57 /// LimitFloatPrecision - Generate low-precision inline sequences for
58 /// some float libcalls (6, 8 or 12 bits).
59 static unsigned LimitFloatPrecision;
61 static cl::opt<unsigned, true>
62 LimitFPPrecision("limit-float-precision",
63 cl::desc("Generate low-precision inline sequences "
64 "for some float libcalls"),
65 cl::location(LimitFloatPrecision),
68 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
69 /// of insertvalue or extractvalue indices that identify a member, return
70 /// the linearized index of the start of the member.
72 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
73 const unsigned *Indices,
74 const unsigned *IndicesEnd,
75 unsigned CurIndex = 0) {
76 // Base case: We're done.
77 if (Indices && Indices == IndicesEnd)
80 // Given a struct type, recursively traverse the elements.
81 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
82 for (StructType::element_iterator EB = STy->element_begin(),
84 EE = STy->element_end();
86 if (Indices && *Indices == unsigned(EI - EB))
87 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
88 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
92 // Given an array type, recursively traverse the elements.
93 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
94 const Type *EltTy = ATy->getElementType();
95 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
96 if (Indices && *Indices == i)
97 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
98 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
102 // We haven't found the type we're looking for, so keep searching.
106 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
107 /// MVTs that represent all the individual underlying
108 /// non-aggregate types that comprise it.
110 /// If Offsets is non-null, it points to a vector to be filled in
111 /// with the in-memory offsets of each of the individual values.
113 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
114 SmallVectorImpl<MVT> &ValueVTs,
115 SmallVectorImpl<uint64_t> *Offsets = 0,
116 uint64_t StartingOffset = 0) {
117 // Given a struct type, recursively traverse the elements.
118 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
119 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
120 for (StructType::element_iterator EB = STy->element_begin(),
122 EE = STy->element_end();
124 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
125 StartingOffset + SL->getElementOffset(EI - EB));
128 // Given an array type, recursively traverse the elements.
129 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
130 const Type *EltTy = ATy->getElementType();
131 uint64_t EltSize = TLI.getTargetData()->getTypePaddedSize(EltTy);
132 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
133 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
134 StartingOffset + i * EltSize);
137 // Interpret void as zero return values.
138 if (Ty == Type::VoidTy)
140 // Base case: we can get an MVT for this LLVM IR type.
141 ValueVTs.push_back(TLI.getValueType(Ty));
143 Offsets->push_back(StartingOffset);
147 /// RegsForValue - This struct represents the registers (physical or virtual)
148 /// that a particular set of values is assigned, and the type information about
149 /// the value. The most common situation is to represent one value at a time,
150 /// but struct or array values are handled element-wise as multiple values.
151 /// The splitting of aggregates is performed recursively, so that we never
152 /// have aggregate-typed registers. The values at this point do not necessarily
153 /// have legal types, so each value may require one or more registers of some
156 struct VISIBILITY_HIDDEN RegsForValue {
157 /// TLI - The TargetLowering object.
159 const TargetLowering *TLI;
161 /// ValueVTs - The value types of the values, which may not be legal, and
162 /// may need be promoted or synthesized from one or more registers.
164 SmallVector<MVT, 4> ValueVTs;
166 /// RegVTs - The value types of the registers. This is the same size as
167 /// ValueVTs and it records, for each value, what the type of the assigned
168 /// register or registers are. (Individual values are never synthesized
169 /// from more than one type of register.)
171 /// With virtual registers, the contents of RegVTs is redundant with TLI's
172 /// getRegisterType member function, however when with physical registers
173 /// it is necessary to have a separate record of the types.
175 SmallVector<MVT, 4> RegVTs;
177 /// Regs - This list holds the registers assigned to the values.
178 /// Each legal or promoted value requires one register, and each
179 /// expanded value requires multiple registers.
181 SmallVector<unsigned, 4> Regs;
183 RegsForValue() : TLI(0) {}
185 RegsForValue(const TargetLowering &tli,
186 const SmallVector<unsigned, 4> ®s,
187 MVT regvt, MVT valuevt)
188 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
189 RegsForValue(const TargetLowering &tli,
190 const SmallVector<unsigned, 4> ®s,
191 const SmallVector<MVT, 4> ®vts,
192 const SmallVector<MVT, 4> &valuevts)
193 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
194 RegsForValue(const TargetLowering &tli,
195 unsigned Reg, const Type *Ty) : TLI(&tli) {
196 ComputeValueVTs(tli, Ty, ValueVTs);
198 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
199 MVT ValueVT = ValueVTs[Value];
200 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
201 MVT RegisterVT = TLI->getRegisterType(ValueVT);
202 for (unsigned i = 0; i != NumRegs; ++i)
203 Regs.push_back(Reg + i);
204 RegVTs.push_back(RegisterVT);
209 /// append - Add the specified values to this one.
210 void append(const RegsForValue &RHS) {
212 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
213 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
214 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
218 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
219 /// this value and returns the result as a ValueVTs value. This uses
220 /// Chain/Flag as the input and updates them for the output Chain/Flag.
221 /// If the Flag pointer is NULL, no flag is used.
222 SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
223 SDValue &Chain, SDValue *Flag) const;
225 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
226 /// specified value into the registers specified by this object. This uses
227 /// Chain/Flag as the input and updates them for the output Chain/Flag.
228 /// If the Flag pointer is NULL, no flag is used.
229 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
230 SDValue &Chain, SDValue *Flag) const;
232 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
233 /// operand list. This adds the code marker, matching input operand index
234 /// (if applicable), and includes the number of values added into it.
235 void AddInlineAsmOperands(unsigned Code,
236 bool HasMatching, unsigned MatchingIdx,
237 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
241 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
242 /// PHI nodes or outside of the basic block that defines it, or used by a
243 /// switch or atomic instruction, which may expand to multiple basic blocks.
244 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
245 if (isa<PHINode>(I)) return true;
246 BasicBlock *BB = I->getParent();
247 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
248 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
253 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
254 /// entry block, return true. This includes arguments used by switches, since
255 /// the switch may expand into multiple basic blocks.
256 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
257 // With FastISel active, we may be splitting blocks, so force creation
258 // of virtual registers for all non-dead arguments.
259 // Don't force virtual registers for byval arguments though, because
260 // fast-isel can't handle those in all cases.
261 if (EnableFastISel && !A->hasByValAttr())
262 return A->use_empty();
264 BasicBlock *Entry = A->getParent()->begin();
265 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
266 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
267 return false; // Use not in entry block.
271 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
275 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
277 bool EnableFastISel) {
280 RegInfo = &MF->getRegInfo();
282 // Create a vreg for each argument register that is not dead and is used
283 // outside of the entry block for the function.
284 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
286 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
287 InitializeRegForValue(AI);
289 // Initialize the mapping of values to registers. This is only set up for
290 // instruction values that are used outside of the block that defines
292 Function::iterator BB = Fn->begin(), EB = Fn->end();
293 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
294 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
295 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
296 const Type *Ty = AI->getAllocatedType();
297 uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
299 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
302 TySize *= CUI->getZExtValue(); // Get total allocated size.
303 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
304 StaticAllocaMap[AI] =
305 MF->getFrameInfo()->CreateStackObject(TySize, Align);
308 for (; BB != EB; ++BB)
309 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
310 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
311 if (!isa<AllocaInst>(I) ||
312 !StaticAllocaMap.count(cast<AllocaInst>(I)))
313 InitializeRegForValue(I);
315 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
316 // also creates the initial PHI MachineInstrs, though none of the input
317 // operands are populated.
318 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
319 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
323 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
327 for (BasicBlock::iterator
328 I = BB->begin(), E = BB->end(); I != E; ++I) {
329 if (CallInst *CI = dyn_cast<CallInst>(I)) {
330 if (Function *F = CI->getCalledFunction()) {
331 switch (F->getIntrinsicID()) {
333 case Intrinsic::dbg_stoppoint: {
334 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
336 if (DIDescriptor::ValidDebugInfo(SPI->getContext(),
337 CodeGenOpt::Default)) {
338 DICompileUnit CU(cast<GlobalVariable>(SPI->getContext()));
339 unsigned idx = MF->getOrCreateDebugLocID(CU.getGV(),
342 DL = DebugLoc::get(idx);
347 case Intrinsic::dbg_func_start: {
348 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
349 Value *SP = FSI->getSubprogram();
351 if (DIDescriptor::ValidDebugInfo(SP, CodeGenOpt::Default)) {
352 DISubprogram Subprogram(cast<GlobalVariable>(SP));
353 DICompileUnit CU(Subprogram.getCompileUnit());
354 unsigned Line = Subprogram.getLineNumber();
355 DL = DebugLoc::get(MF->getOrCreateDebugLocID(CU.getGV(),
365 PN = dyn_cast<PHINode>(I);
366 if (!PN || PN->use_empty()) continue;
368 unsigned PHIReg = ValueMap[PN];
369 assert(PHIReg && "PHI node does not have an assigned virtual register!");
371 SmallVector<MVT, 4> ValueVTs;
372 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
373 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
374 MVT VT = ValueVTs[vti];
375 unsigned NumRegisters = TLI.getNumRegisters(VT);
376 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
377 for (unsigned i = 0; i != NumRegisters; ++i)
378 BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
379 PHIReg += NumRegisters;
385 unsigned FunctionLoweringInfo::MakeReg(MVT VT) {
386 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
389 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
390 /// the correctly promoted or expanded types. Assign these registers
391 /// consecutive vreg numbers and return the first assigned number.
393 /// In the case that the given value has struct or array type, this function
394 /// will assign registers for each member or element.
396 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
397 SmallVector<MVT, 4> ValueVTs;
398 ComputeValueVTs(TLI, V->getType(), ValueVTs);
400 unsigned FirstReg = 0;
401 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
402 MVT ValueVT = ValueVTs[Value];
403 MVT RegisterVT = TLI.getRegisterType(ValueVT);
405 unsigned NumRegs = TLI.getNumRegisters(ValueVT);
406 for (unsigned i = 0; i != NumRegs; ++i) {
407 unsigned R = MakeReg(RegisterVT);
408 if (!FirstReg) FirstReg = R;
414 /// getCopyFromParts - Create a value that contains the specified legal parts
415 /// combined into the value they represent. If the parts combine to a type
416 /// larger then ValueVT then AssertOp can be used to specify whether the extra
417 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
418 /// (ISD::AssertSext).
419 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
420 const SDValue *Parts,
421 unsigned NumParts, MVT PartVT, MVT ValueVT,
422 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
423 assert(NumParts > 0 && "No parts to assemble!");
424 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
425 SDValue Val = Parts[0];
428 // Assemble the value from multiple parts.
429 if (!ValueVT.isVector()) {
430 unsigned PartBits = PartVT.getSizeInBits();
431 unsigned ValueBits = ValueVT.getSizeInBits();
433 // Assemble the power of 2 part.
434 unsigned RoundParts = NumParts & (NumParts - 1) ?
435 1 << Log2_32(NumParts) : NumParts;
436 unsigned RoundBits = PartBits * RoundParts;
437 MVT RoundVT = RoundBits == ValueBits ?
438 ValueVT : MVT::getIntegerVT(RoundBits);
441 MVT HalfVT = ValueVT.isInteger() ?
442 MVT::getIntegerVT(RoundBits/2) :
443 MVT::getFloatingPointVT(RoundBits/2);
445 if (RoundParts > 2) {
446 Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
447 Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
450 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
451 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
453 if (TLI.isBigEndian())
455 Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
457 if (RoundParts < NumParts) {
458 // Assemble the trailing non-power-of-2 part.
459 unsigned OddParts = NumParts - RoundParts;
460 MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
461 Hi = getCopyFromParts(DAG, dl,
462 Parts+RoundParts, OddParts, PartVT, OddVT);
464 // Combine the round and odd parts.
466 if (TLI.isBigEndian())
468 MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
469 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
470 Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
471 DAG.getConstant(Lo.getValueType().getSizeInBits(),
472 TLI.getPointerTy()));
473 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
474 Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
477 // Handle a multi-element vector.
478 MVT IntermediateVT, RegisterVT;
479 unsigned NumIntermediates;
481 TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
483 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
484 NumParts = NumRegs; // Silence a compiler warning.
485 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
486 assert(RegisterVT == Parts[0].getValueType() &&
487 "Part type doesn't match part!");
489 // Assemble the parts into intermediate operands.
490 SmallVector<SDValue, 8> Ops(NumIntermediates);
491 if (NumIntermediates == NumParts) {
492 // If the register was not expanded, truncate or copy the value,
494 for (unsigned i = 0; i != NumParts; ++i)
495 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
496 PartVT, IntermediateVT);
497 } else if (NumParts > 0) {
498 // If the intermediate type was expanded, build the intermediate operands
500 assert(NumParts % NumIntermediates == 0 &&
501 "Must expand into a divisible number of parts!");
502 unsigned Factor = NumParts / NumIntermediates;
503 for (unsigned i = 0; i != NumIntermediates; ++i)
504 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
505 PartVT, IntermediateVT);
508 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
510 Val = DAG.getNode(IntermediateVT.isVector() ?
511 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
512 ValueVT, &Ops[0], NumIntermediates);
516 // There is now one part, held in Val. Correct it to match ValueVT.
517 PartVT = Val.getValueType();
519 if (PartVT == ValueVT)
522 if (PartVT.isVector()) {
523 assert(ValueVT.isVector() && "Unknown vector conversion!");
524 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
527 if (ValueVT.isVector()) {
528 assert(ValueVT.getVectorElementType() == PartVT &&
529 ValueVT.getVectorNumElements() == 1 &&
530 "Only trivial scalar-to-vector conversions should get here!");
531 return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
534 if (PartVT.isInteger() &&
535 ValueVT.isInteger()) {
536 if (ValueVT.bitsLT(PartVT)) {
537 // For a truncate, see if we have any information to
538 // indicate whether the truncated bits will always be
539 // zero or sign-extension.
540 if (AssertOp != ISD::DELETED_NODE)
541 Val = DAG.getNode(AssertOp, dl, PartVT, Val,
542 DAG.getValueType(ValueVT));
543 return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
545 return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
549 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
550 if (ValueVT.bitsLT(Val.getValueType()))
551 // FP_ROUND's are always exact here.
552 return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
553 DAG.getIntPtrConstant(1));
554 return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
557 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
558 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
560 assert(0 && "Unknown mismatch!");
564 /// getCopyToParts - Create a series of nodes that contain the specified value
565 /// split into legal parts. If the parts contain more bits than Val, then, for
566 /// integers, ExtendKind can be used to specify how to generate the extra bits.
567 static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
568 SDValue *Parts, unsigned NumParts, MVT PartVT,
569 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
570 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
571 MVT PtrVT = TLI.getPointerTy();
572 MVT ValueVT = Val.getValueType();
573 unsigned PartBits = PartVT.getSizeInBits();
574 unsigned OrigNumParts = NumParts;
575 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
580 if (!ValueVT.isVector()) {
581 if (PartVT == ValueVT) {
582 assert(NumParts == 1 && "No-op copy with multiple parts!");
587 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
588 // If the parts cover more bits than the value has, promote the value.
589 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
590 assert(NumParts == 1 && "Do not know what to promote to!");
591 Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
592 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
593 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
594 Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
596 assert(0 && "Unknown mismatch!");
598 } else if (PartBits == ValueVT.getSizeInBits()) {
599 // Different types of the same size.
600 assert(NumParts == 1 && PartVT != ValueVT);
601 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
602 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
603 // If the parts cover less bits than value has, truncate the value.
604 if (PartVT.isInteger() && ValueVT.isInteger()) {
605 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
606 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
608 assert(0 && "Unknown mismatch!");
612 // The value may have changed - recompute ValueVT.
613 ValueVT = Val.getValueType();
614 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
615 "Failed to tile the value with PartVT!");
618 assert(PartVT == ValueVT && "Type conversion failed!");
623 // Expand the value into multiple parts.
624 if (NumParts & (NumParts - 1)) {
625 // The number of parts is not a power of 2. Split off and copy the tail.
626 assert(PartVT.isInteger() && ValueVT.isInteger() &&
627 "Do not know what to expand to!");
628 unsigned RoundParts = 1 << Log2_32(NumParts);
629 unsigned RoundBits = RoundParts * PartBits;
630 unsigned OddParts = NumParts - RoundParts;
631 SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
632 DAG.getConstant(RoundBits,
633 TLI.getPointerTy()));
634 getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
635 if (TLI.isBigEndian())
636 // The odd parts were reversed by getCopyToParts - unreverse them.
637 std::reverse(Parts + RoundParts, Parts + NumParts);
638 NumParts = RoundParts;
639 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
640 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
643 // The number of parts is a power of 2. Repeatedly bisect the value using
645 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
646 MVT::getIntegerVT(ValueVT.getSizeInBits()),
648 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
649 for (unsigned i = 0; i < NumParts; i += StepSize) {
650 unsigned ThisBits = StepSize * PartBits / 2;
651 MVT ThisVT = MVT::getIntegerVT (ThisBits);
652 SDValue &Part0 = Parts[i];
653 SDValue &Part1 = Parts[i+StepSize/2];
655 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
657 DAG.getConstant(1, PtrVT));
658 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
660 DAG.getConstant(0, PtrVT));
662 if (ThisBits == PartBits && ThisVT != PartVT) {
663 Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
665 Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
671 if (TLI.isBigEndian())
672 std::reverse(Parts, Parts + OrigNumParts);
679 if (PartVT != ValueVT) {
680 if (PartVT.isVector()) {
681 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
683 assert(ValueVT.getVectorElementType() == PartVT &&
684 ValueVT.getVectorNumElements() == 1 &&
685 "Only trivial vector-to-scalar conversions should get here!");
686 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
688 DAG.getConstant(0, PtrVT));
696 // Handle a multi-element vector.
697 MVT IntermediateVT, RegisterVT;
698 unsigned NumIntermediates;
699 unsigned NumRegs = TLI
700 .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
702 unsigned NumElements = ValueVT.getVectorNumElements();
704 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
705 NumParts = NumRegs; // Silence a compiler warning.
706 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
708 // Split the vector into intermediate operands.
709 SmallVector<SDValue, 8> Ops(NumIntermediates);
710 for (unsigned i = 0; i != NumIntermediates; ++i)
711 if (IntermediateVT.isVector())
712 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
714 DAG.getConstant(i * (NumElements / NumIntermediates),
717 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
719 DAG.getConstant(i, PtrVT));
721 // Split the intermediate operands into legal parts.
722 if (NumParts == NumIntermediates) {
723 // If the register was not expanded, promote or copy the value,
725 for (unsigned i = 0; i != NumParts; ++i)
726 getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
727 } else if (NumParts > 0) {
728 // If the intermediate type was expanded, split each the value into
730 assert(NumParts % NumIntermediates == 0 &&
731 "Must expand into a divisible number of parts!");
732 unsigned Factor = NumParts / NumIntermediates;
733 for (unsigned i = 0; i != NumIntermediates; ++i)
734 getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
739 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
742 TD = DAG.getTarget().getTargetData();
745 /// clear - Clear out the curret SelectionDAG and the associated
746 /// state and prepare this SelectionDAGLowering object to be used
747 /// for a new block. This doesn't clear out information about
748 /// additional blocks that are needed to complete switch lowering
749 /// or PHI node updating; that information is cleared out as it is
751 void SelectionDAGLowering::clear() {
753 PendingLoads.clear();
754 PendingExports.clear();
756 CurDebugLoc = DebugLoc::getUnknownLoc();
759 /// getRoot - Return the current virtual root of the Selection DAG,
760 /// flushing any PendingLoad items. This must be done before emitting
761 /// a store or any other node that may need to be ordered after any
762 /// prior load instructions.
764 SDValue SelectionDAGLowering::getRoot() {
765 if (PendingLoads.empty())
766 return DAG.getRoot();
768 if (PendingLoads.size() == 1) {
769 SDValue Root = PendingLoads[0];
771 PendingLoads.clear();
775 // Otherwise, we have to make a token factor node.
776 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
777 &PendingLoads[0], PendingLoads.size());
778 PendingLoads.clear();
783 /// getControlRoot - Similar to getRoot, but instead of flushing all the
784 /// PendingLoad items, flush all the PendingExports items. It is necessary
785 /// to do this before emitting a terminator instruction.
787 SDValue SelectionDAGLowering::getControlRoot() {
788 SDValue Root = DAG.getRoot();
790 if (PendingExports.empty())
793 // Turn all of the CopyToReg chains into one factored node.
794 if (Root.getOpcode() != ISD::EntryToken) {
795 unsigned i = 0, e = PendingExports.size();
796 for (; i != e; ++i) {
797 assert(PendingExports[i].getNode()->getNumOperands() > 1);
798 if (PendingExports[i].getNode()->getOperand(0) == Root)
799 break; // Don't add the root if we already indirectly depend on it.
803 PendingExports.push_back(Root);
806 Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
808 PendingExports.size());
809 PendingExports.clear();
814 void SelectionDAGLowering::visit(Instruction &I) {
815 visit(I.getOpcode(), I);
818 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
819 // Note: this doesn't use InstVisitor, because it has to work with
820 // ConstantExpr's in addition to instructions.
822 default: assert(0 && "Unknown instruction type encountered!");
824 // Build the switch statement using the Instruction.def file.
825 #define HANDLE_INST(NUM, OPCODE, CLASS) \
826 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
827 #include "llvm/Instruction.def"
831 void SelectionDAGLowering::visitAdd(User &I) {
832 if (I.getType()->isFPOrFPVector())
833 visitBinary(I, ISD::FADD);
835 visitBinary(I, ISD::ADD);
838 void SelectionDAGLowering::visitMul(User &I) {
839 if (I.getType()->isFPOrFPVector())
840 visitBinary(I, ISD::FMUL);
842 visitBinary(I, ISD::MUL);
845 SDValue SelectionDAGLowering::getValue(const Value *V) {
846 SDValue &N = NodeMap[V];
847 if (N.getNode()) return N;
849 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
850 MVT VT = TLI.getValueType(V->getType(), true);
852 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
853 return N = DAG.getConstant(*CI, VT);
855 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
856 return N = DAG.getGlobalAddress(GV, VT);
858 if (isa<ConstantPointerNull>(C))
859 return N = DAG.getConstant(0, TLI.getPointerTy());
861 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
862 return N = DAG.getConstantFP(*CFP, VT);
864 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
865 return N = DAG.getUNDEF(VT);
867 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
868 visit(CE->getOpcode(), *CE);
869 SDValue N1 = NodeMap[V];
870 assert(N1.getNode() && "visit didn't populate the ValueMap!");
874 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
875 SmallVector<SDValue, 4> Constants;
876 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
878 SDNode *Val = getValue(*OI).getNode();
879 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
880 Constants.push_back(SDValue(Val, i));
882 return DAG.getMergeValues(&Constants[0], Constants.size(),
886 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
887 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
888 "Unknown struct or array constant!");
890 SmallVector<MVT, 4> ValueVTs;
891 ComputeValueVTs(TLI, C->getType(), ValueVTs);
892 unsigned NumElts = ValueVTs.size();
894 return SDValue(); // empty struct
895 SmallVector<SDValue, 4> Constants(NumElts);
896 for (unsigned i = 0; i != NumElts; ++i) {
897 MVT EltVT = ValueVTs[i];
898 if (isa<UndefValue>(C))
899 Constants[i] = DAG.getUNDEF(EltVT);
900 else if (EltVT.isFloatingPoint())
901 Constants[i] = DAG.getConstantFP(0, EltVT);
903 Constants[i] = DAG.getConstant(0, EltVT);
905 return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
908 const VectorType *VecTy = cast<VectorType>(V->getType());
909 unsigned NumElements = VecTy->getNumElements();
911 // Now that we know the number and type of the elements, get that number of
912 // elements into the Ops array based on what kind of constant it is.
913 SmallVector<SDValue, 16> Ops;
914 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
915 for (unsigned i = 0; i != NumElements; ++i)
916 Ops.push_back(getValue(CP->getOperand(i)));
918 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
919 MVT EltVT = TLI.getValueType(VecTy->getElementType());
922 if (EltVT.isFloatingPoint())
923 Op = DAG.getConstantFP(0, EltVT);
925 Op = DAG.getConstant(0, EltVT);
926 Ops.assign(NumElements, Op);
929 // Create a BUILD_VECTOR node.
930 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
931 VT, &Ops[0], Ops.size());
934 // If this is a static alloca, generate it as the frameindex instead of
936 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
937 DenseMap<const AllocaInst*, int>::iterator SI =
938 FuncInfo.StaticAllocaMap.find(AI);
939 if (SI != FuncInfo.StaticAllocaMap.end())
940 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
943 unsigned InReg = FuncInfo.ValueMap[V];
944 assert(InReg && "Value not in map!");
946 RegsForValue RFV(TLI, InReg, V->getType());
947 SDValue Chain = DAG.getEntryNode();
948 return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
952 void SelectionDAGLowering::visitRet(ReturnInst &I) {
953 if (I.getNumOperands() == 0) {
954 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(),
955 MVT::Other, getControlRoot()));
959 SmallVector<SDValue, 8> NewValues;
960 NewValues.push_back(getControlRoot());
961 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
962 SmallVector<MVT, 4> ValueVTs;
963 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
964 unsigned NumValues = ValueVTs.size();
965 if (NumValues == 0) continue;
967 SDValue RetOp = getValue(I.getOperand(i));
968 for (unsigned j = 0, f = NumValues; j != f; ++j) {
969 MVT VT = ValueVTs[j];
971 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
973 const Function *F = I.getParent()->getParent();
974 if (F->paramHasAttr(0, Attribute::SExt))
975 ExtendKind = ISD::SIGN_EXTEND;
976 else if (F->paramHasAttr(0, Attribute::ZExt))
977 ExtendKind = ISD::ZERO_EXTEND;
979 // FIXME: C calling convention requires the return type to be promoted to
980 // at least 32-bit. But this is not necessary for non-C calling
981 // conventions. The frontend should mark functions whose return values
982 // require promoting with signext or zeroext attributes.
983 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
984 MVT MinVT = TLI.getRegisterType(MVT::i32);
985 if (VT.bitsLT(MinVT))
989 unsigned NumParts = TLI.getNumRegisters(VT);
990 MVT PartVT = TLI.getRegisterType(VT);
991 SmallVector<SDValue, 4> Parts(NumParts);
992 getCopyToParts(DAG, getCurDebugLoc(),
993 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
994 &Parts[0], NumParts, PartVT, ExtendKind);
996 // 'inreg' on function refers to return value
997 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
998 if (F->paramHasAttr(0, Attribute::InReg))
1000 for (unsigned i = 0; i < NumParts; ++i) {
1001 NewValues.push_back(Parts[i]);
1002 NewValues.push_back(DAG.getArgFlags(Flags));
1006 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), MVT::Other,
1007 &NewValues[0], NewValues.size()));
1010 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1011 /// created for it, emit nodes to copy the value into the virtual
1013 void SelectionDAGLowering::CopyToExportRegsIfNeeded(Value *V) {
1014 if (!V->use_empty()) {
1015 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1016 if (VMI != FuncInfo.ValueMap.end())
1017 CopyValueToVirtualRegister(V, VMI->second);
1021 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1022 /// the current basic block, add it to ValueMap now so that we'll get a
1024 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
1025 // No need to export constants.
1026 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1028 // Already exported?
1029 if (FuncInfo.isExportedInst(V)) return;
1031 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1032 CopyValueToVirtualRegister(V, Reg);
1035 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
1036 const BasicBlock *FromBB) {
1037 // The operands of the setcc have to be in this block. We don't know
1038 // how to export them from some other block.
1039 if (Instruction *VI = dyn_cast<Instruction>(V)) {
1040 // Can export from current BB.
1041 if (VI->getParent() == FromBB)
1044 // Is already exported, noop.
1045 return FuncInfo.isExportedInst(V);
1048 // If this is an argument, we can export it if the BB is the entry block or
1049 // if it is already exported.
1050 if (isa<Argument>(V)) {
1051 if (FromBB == &FromBB->getParent()->getEntryBlock())
1054 // Otherwise, can only export this if it is already exported.
1055 return FuncInfo.isExportedInst(V);
1058 // Otherwise, constants can always be exported.
1062 static bool InBlock(const Value *V, const BasicBlock *BB) {
1063 if (const Instruction *I = dyn_cast<Instruction>(V))
1064 return I->getParent() == BB;
1068 /// getFCmpCondCode - Return the ISD condition code corresponding to
1069 /// the given LLVM IR floating-point condition code. This includes
1070 /// consideration of global floating-point math flags.
1072 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1073 ISD::CondCode FPC, FOC;
1075 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1076 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1077 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1078 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1079 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1080 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1081 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1082 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1083 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1084 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1085 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1086 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1087 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1088 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1089 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1090 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1092 assert(0 && "Invalid FCmp predicate opcode!");
1093 FOC = FPC = ISD::SETFALSE;
1096 if (FiniteOnlyFPMath())
1102 /// getICmpCondCode - Return the ISD condition code corresponding to
1103 /// the given LLVM IR integer condition code.
1105 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1107 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1108 case ICmpInst::ICMP_NE: return ISD::SETNE;
1109 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1110 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1111 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1112 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1113 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1114 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1115 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1116 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1118 assert(0 && "Invalid ICmp predicate opcode!");
1123 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1124 /// This function emits a branch and is used at the leaves of an OR or an
1125 /// AND operator tree.
1128 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1129 MachineBasicBlock *TBB,
1130 MachineBasicBlock *FBB,
1131 MachineBasicBlock *CurBB) {
1132 const BasicBlock *BB = CurBB->getBasicBlock();
1134 // If the leaf of the tree is a comparison, merge the condition into
1136 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1137 // The operands of the cmp have to be in this block. We don't know
1138 // how to export them from some other block. If this is the first block
1139 // of the sequence, no exporting is needed.
1140 if (CurBB == CurMBB ||
1141 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1142 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1143 ISD::CondCode Condition;
1144 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1145 Condition = getICmpCondCode(IC->getPredicate());
1146 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1147 Condition = getFCmpCondCode(FC->getPredicate());
1149 Condition = ISD::SETEQ; // silence warning.
1150 assert(0 && "Unknown compare instruction");
1153 CaseBlock CB(Condition, BOp->getOperand(0),
1154 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1155 SwitchCases.push_back(CB);
1160 // Create a CaseBlock record representing this branch.
1161 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
1162 NULL, TBB, FBB, CurBB);
1163 SwitchCases.push_back(CB);
1166 /// FindMergedConditions - If Cond is an expression like
1167 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1168 MachineBasicBlock *TBB,
1169 MachineBasicBlock *FBB,
1170 MachineBasicBlock *CurBB,
1172 // If this node is not part of the or/and tree, emit it as a branch.
1173 Instruction *BOp = dyn_cast<Instruction>(Cond);
1174 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1175 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1176 BOp->getParent() != CurBB->getBasicBlock() ||
1177 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1178 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1179 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1183 // Create TmpBB after CurBB.
1184 MachineFunction::iterator BBI = CurBB;
1185 MachineFunction &MF = DAG.getMachineFunction();
1186 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1187 CurBB->getParent()->insert(++BBI, TmpBB);
1189 if (Opc == Instruction::Or) {
1190 // Codegen X | Y as:
1198 // Emit the LHS condition.
1199 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1201 // Emit the RHS condition into TmpBB.
1202 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1204 assert(Opc == Instruction::And && "Unknown merge op!");
1205 // Codegen X & Y as:
1212 // This requires creation of TmpBB after CurBB.
1214 // Emit the LHS condition.
1215 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1217 // Emit the RHS condition into TmpBB.
1218 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1222 /// If the set of cases should be emitted as a series of branches, return true.
1223 /// If we should emit this as a bunch of and/or'd together conditions, return
1226 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1227 if (Cases.size() != 2) return true;
1229 // If this is two comparisons of the same values or'd or and'd together, they
1230 // will get folded into a single comparison, so don't emit two blocks.
1231 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1232 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1233 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1234 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1241 void SelectionDAGLowering::visitBr(BranchInst &I) {
1242 // Update machine-CFG edges.
1243 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1245 // Figure out which block is immediately after the current one.
1246 MachineBasicBlock *NextBlock = 0;
1247 MachineFunction::iterator BBI = CurMBB;
1248 if (++BBI != CurMBB->getParent()->end())
1251 if (I.isUnconditional()) {
1252 // Update machine-CFG edges.
1253 CurMBB->addSuccessor(Succ0MBB);
1255 // If this is not a fall-through branch, emit the branch.
1256 if (Succ0MBB != NextBlock)
1257 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1258 MVT::Other, getControlRoot(),
1259 DAG.getBasicBlock(Succ0MBB)));
1263 // If this condition is one of the special cases we handle, do special stuff
1265 Value *CondVal = I.getCondition();
1266 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1268 // If this is a series of conditions that are or'd or and'd together, emit
1269 // this as a sequence of branches instead of setcc's with and/or operations.
1270 // For example, instead of something like:
1283 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1284 if (BOp->hasOneUse() &&
1285 (BOp->getOpcode() == Instruction::And ||
1286 BOp->getOpcode() == Instruction::Or)) {
1287 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1288 // If the compares in later blocks need to use values not currently
1289 // exported from this block, export them now. This block should always
1290 // be the first entry.
1291 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1293 // Allow some cases to be rejected.
1294 if (ShouldEmitAsBranches(SwitchCases)) {
1295 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1296 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1297 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1300 // Emit the branch for this block.
1301 visitSwitchCase(SwitchCases[0]);
1302 SwitchCases.erase(SwitchCases.begin());
1306 // Okay, we decided not to do this, remove any inserted MBB's and clear
1308 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1309 CurMBB->getParent()->erase(SwitchCases[i].ThisBB);
1311 SwitchCases.clear();
1315 // Create a CaseBlock record representing this branch.
1316 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
1317 NULL, Succ0MBB, Succ1MBB, CurMBB);
1318 // Use visitSwitchCase to actually insert the fast branch sequence for this
1320 visitSwitchCase(CB);
1323 /// visitSwitchCase - Emits the necessary code to represent a single node in
1324 /// the binary search tree resulting from lowering a switch instruction.
1325 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1327 SDValue CondLHS = getValue(CB.CmpLHS);
1328 DebugLoc dl = getCurDebugLoc();
1330 // Build the setcc now.
1331 if (CB.CmpMHS == NULL) {
1332 // Fold "(X == true)" to X and "(X == false)" to !X to
1333 // handle common cases produced by branch lowering.
1334 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
1336 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
1337 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1338 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1340 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1342 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1344 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1345 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1347 SDValue CmpOp = getValue(CB.CmpMHS);
1348 MVT VT = CmpOp.getValueType();
1350 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1351 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1354 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1355 VT, CmpOp, DAG.getConstant(Low, VT));
1356 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1357 DAG.getConstant(High-Low, VT), ISD::SETULE);
1361 // Update successor info
1362 CurMBB->addSuccessor(CB.TrueBB);
1363 CurMBB->addSuccessor(CB.FalseBB);
1365 // Set NextBlock to be the MBB immediately after the current one, if any.
1366 // This is used to avoid emitting unnecessary branches to the next block.
1367 MachineBasicBlock *NextBlock = 0;
1368 MachineFunction::iterator BBI = CurMBB;
1369 if (++BBI != CurMBB->getParent()->end())
1372 // If the lhs block is the next block, invert the condition so that we can
1373 // fall through to the lhs instead of the rhs block.
1374 if (CB.TrueBB == NextBlock) {
1375 std::swap(CB.TrueBB, CB.FalseBB);
1376 SDValue True = DAG.getConstant(1, Cond.getValueType());
1377 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1379 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1380 MVT::Other, getControlRoot(), Cond,
1381 DAG.getBasicBlock(CB.TrueBB));
1383 // If the branch was constant folded, fix up the CFG.
1384 if (BrCond.getOpcode() == ISD::BR) {
1385 CurMBB->removeSuccessor(CB.FalseBB);
1386 DAG.setRoot(BrCond);
1388 // Otherwise, go ahead and insert the false branch.
1389 if (BrCond == getControlRoot())
1390 CurMBB->removeSuccessor(CB.TrueBB);
1392 if (CB.FalseBB == NextBlock)
1393 DAG.setRoot(BrCond);
1395 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1396 DAG.getBasicBlock(CB.FalseBB)));
1400 /// visitJumpTable - Emit JumpTable node in the current MBB
1401 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1402 // Emit the code for the jump table
1403 assert(JT.Reg != -1U && "Should lower JT Header first!");
1404 MVT PTy = TLI.getPointerTy();
1405 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1407 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1408 DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1409 MVT::Other, Index.getValue(1),
1413 /// visitJumpTableHeader - This function emits necessary code to produce index
1414 /// in the JumpTable from switch case.
1415 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1416 JumpTableHeader &JTH) {
1417 // Subtract the lowest switch case value from the value being switched on and
1418 // conditional branch to default mbb if the result is greater than the
1419 // difference between smallest and largest cases.
1420 SDValue SwitchOp = getValue(JTH.SValue);
1421 MVT VT = SwitchOp.getValueType();
1422 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1423 DAG.getConstant(JTH.First, VT));
1425 // The SDNode we just created, which holds the value being switched on minus
1426 // the the smallest case value, needs to be copied to a virtual register so it
1427 // can be used as an index into the jump table in a subsequent basic block.
1428 // This value may be smaller or larger than the target's pointer type, and
1429 // therefore require extension or truncating.
1430 if (VT.bitsGT(TLI.getPointerTy()))
1431 SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1432 TLI.getPointerTy(), SUB);
1434 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1435 TLI.getPointerTy(), SUB);
1437 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1438 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1439 JumpTableReg, SwitchOp);
1440 JT.Reg = JumpTableReg;
1442 // Emit the range check for the jump table, and branch to the default block
1443 // for the switch statement if the value being switched on exceeds the largest
1444 // case in the switch.
1445 SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1446 TLI.getSetCCResultType(SUB.getValueType()), SUB,
1447 DAG.getConstant(JTH.Last-JTH.First,VT),
1450 // Set NextBlock to be the MBB immediately after the current one, if any.
1451 // This is used to avoid emitting unnecessary branches to the next block.
1452 MachineBasicBlock *NextBlock = 0;
1453 MachineFunction::iterator BBI = CurMBB;
1454 if (++BBI != CurMBB->getParent()->end())
1457 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1458 MVT::Other, CopyTo, CMP,
1459 DAG.getBasicBlock(JT.Default));
1461 if (JT.MBB == NextBlock)
1462 DAG.setRoot(BrCond);
1464 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1465 DAG.getBasicBlock(JT.MBB)));
1468 /// visitBitTestHeader - This function emits necessary code to produce value
1469 /// suitable for "bit tests"
1470 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1471 // Subtract the minimum value
1472 SDValue SwitchOp = getValue(B.SValue);
1473 MVT VT = SwitchOp.getValueType();
1474 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1475 DAG.getConstant(B.First, VT));
1478 SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1479 TLI.getSetCCResultType(SUB.getValueType()),
1480 SUB, DAG.getConstant(B.Range, VT),
1484 if (VT.bitsGT(TLI.getPointerTy()))
1485 ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1486 TLI.getPointerTy(), SUB);
1488 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1489 TLI.getPointerTy(), SUB);
1491 B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1492 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1495 // Set NextBlock to be the MBB immediately after the current one, if any.
1496 // This is used to avoid emitting unnecessary branches to the next block.
1497 MachineBasicBlock *NextBlock = 0;
1498 MachineFunction::iterator BBI = CurMBB;
1499 if (++BBI != CurMBB->getParent()->end())
1502 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1504 CurMBB->addSuccessor(B.Default);
1505 CurMBB->addSuccessor(MBB);
1507 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1508 MVT::Other, CopyTo, RangeCmp,
1509 DAG.getBasicBlock(B.Default));
1511 if (MBB == NextBlock)
1512 DAG.setRoot(BrRange);
1514 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1515 DAG.getBasicBlock(MBB)));
1518 /// visitBitTestCase - this function produces one "bit test"
1519 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1522 // Make desired shift
1523 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1524 TLI.getPointerTy());
1525 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1527 DAG.getConstant(1, TLI.getPointerTy()),
1530 // Emit bit tests and jumps
1531 SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1532 TLI.getPointerTy(), SwitchVal,
1533 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1534 SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1535 TLI.getSetCCResultType(AndOp.getValueType()),
1536 AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1539 CurMBB->addSuccessor(B.TargetBB);
1540 CurMBB->addSuccessor(NextMBB);
1542 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1543 MVT::Other, getControlRoot(),
1544 AndCmp, DAG.getBasicBlock(B.TargetBB));
1546 // Set NextBlock to be the MBB immediately after the current one, if any.
1547 // This is used to avoid emitting unnecessary branches to the next block.
1548 MachineBasicBlock *NextBlock = 0;
1549 MachineFunction::iterator BBI = CurMBB;
1550 if (++BBI != CurMBB->getParent()->end())
1553 if (NextMBB == NextBlock)
1556 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1557 DAG.getBasicBlock(NextMBB)));
1560 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1561 // Retrieve successors.
1562 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1563 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1565 const Value *Callee(I.getCalledValue());
1566 if (isa<InlineAsm>(Callee))
1569 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1571 // If the value of the invoke is used outside of its defining block, make it
1572 // available as a virtual register.
1573 CopyToExportRegsIfNeeded(&I);
1575 // Update successor info
1576 CurMBB->addSuccessor(Return);
1577 CurMBB->addSuccessor(LandingPad);
1579 // Drop into normal successor.
1580 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1581 MVT::Other, getControlRoot(),
1582 DAG.getBasicBlock(Return)));
1585 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1588 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1589 /// small case ranges).
1590 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1591 CaseRecVector& WorkList,
1593 MachineBasicBlock* Default) {
1594 Case& BackCase = *(CR.Range.second-1);
1596 // Size is the number of Cases represented by this range.
1597 size_t Size = CR.Range.second - CR.Range.first;
1601 // Get the MachineFunction which holds the current MBB. This is used when
1602 // inserting any additional MBBs necessary to represent the switch.
1603 MachineFunction *CurMF = CurMBB->getParent();
1605 // Figure out which block is immediately after the current one.
1606 MachineBasicBlock *NextBlock = 0;
1607 MachineFunction::iterator BBI = CR.CaseBB;
1609 if (++BBI != CurMBB->getParent()->end())
1612 // TODO: If any two of the cases has the same destination, and if one value
1613 // is the same as the other, but has one bit unset that the other has set,
1614 // use bit manipulation to do two compares at once. For example:
1615 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1617 // Rearrange the case blocks so that the last one falls through if possible.
1618 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1619 // The last case block won't fall through into 'NextBlock' if we emit the
1620 // branches in this order. See if rearranging a case value would help.
1621 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1622 if (I->BB == NextBlock) {
1623 std::swap(*I, BackCase);
1629 // Create a CaseBlock record representing a conditional branch to
1630 // the Case's target mbb if the value being switched on SV is equal
1632 MachineBasicBlock *CurBlock = CR.CaseBB;
1633 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1634 MachineBasicBlock *FallThrough;
1636 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1637 CurMF->insert(BBI, FallThrough);
1639 // Put SV in a virtual register to make it available from the new blocks.
1640 ExportFromCurrentBlock(SV);
1642 // If the last case doesn't match, go to the default block.
1643 FallThrough = Default;
1646 Value *RHS, *LHS, *MHS;
1648 if (I->High == I->Low) {
1649 // This is just small small case range :) containing exactly 1 case
1651 LHS = SV; RHS = I->High; MHS = NULL;
1654 LHS = I->Low; MHS = SV; RHS = I->High;
1656 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1658 // If emitting the first comparison, just call visitSwitchCase to emit the
1659 // code into the current block. Otherwise, push the CaseBlock onto the
1660 // vector to be later processed by SDISel, and insert the node's MBB
1661 // before the next MBB.
1662 if (CurBlock == CurMBB)
1663 visitSwitchCase(CB);
1665 SwitchCases.push_back(CB);
1667 CurBlock = FallThrough;
1673 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1674 return !DisableJumpTables &&
1675 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1676 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1679 static APInt ComputeRange(const APInt &First, const APInt &Last) {
1680 APInt LastExt(Last), FirstExt(First);
1681 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1682 LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1683 return (LastExt - FirstExt + 1ULL);
1686 /// handleJTSwitchCase - Emit jumptable for current switch case range
1687 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1688 CaseRecVector& WorkList,
1690 MachineBasicBlock* Default) {
1691 Case& FrontCase = *CR.Range.first;
1692 Case& BackCase = *(CR.Range.second-1);
1694 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1695 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1698 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1702 if (!areJTsAllowed(TLI) || TSize <= 3)
1705 APInt Range = ComputeRange(First, Last);
1706 double Density = (double)TSize / Range.roundToDouble();
1710 DEBUG(errs() << "Lowering jump table\n"
1711 << "First entry: " << First << ". Last entry: " << Last << '\n'
1712 << "Range: " << Range
1713 << "Size: " << TSize << ". Density: " << Density << "\n\n");
1715 // Get the MachineFunction which holds the current MBB. This is used when
1716 // inserting any additional MBBs necessary to represent the switch.
1717 MachineFunction *CurMF = CurMBB->getParent();
1719 // Figure out which block is immediately after the current one.
1720 MachineBasicBlock *NextBlock = 0;
1721 MachineFunction::iterator BBI = CR.CaseBB;
1723 if (++BBI != CurMBB->getParent()->end())
1726 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1728 // Create a new basic block to hold the code for loading the address
1729 // of the jump table, and jumping to it. Update successor information;
1730 // we will either branch to the default case for the switch, or the jump
1732 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1733 CurMF->insert(BBI, JumpTableBB);
1734 CR.CaseBB->addSuccessor(Default);
1735 CR.CaseBB->addSuccessor(JumpTableBB);
1737 // Build a vector of destination BBs, corresponding to each target
1738 // of the jump table. If the value of the jump table slot corresponds to
1739 // a case statement, push the case's BB onto the vector, otherwise, push
1741 std::vector<MachineBasicBlock*> DestBBs;
1743 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1744 const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1745 const APInt& High = cast<ConstantInt>(I->High)->getValue();
1747 if (Low.sle(TEI) && TEI.sle(High)) {
1748 DestBBs.push_back(I->BB);
1752 DestBBs.push_back(Default);
1756 // Update successor info. Add one edge to each unique successor.
1757 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1758 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1759 E = DestBBs.end(); I != E; ++I) {
1760 if (!SuccsHandled[(*I)->getNumber()]) {
1761 SuccsHandled[(*I)->getNumber()] = true;
1762 JumpTableBB->addSuccessor(*I);
1766 // Create a jump table index for this jump table, or return an existing
1768 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1770 // Set the jump table information so that we can codegen it as a second
1771 // MachineBasicBlock
1772 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1773 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1774 if (CR.CaseBB == CurMBB)
1775 visitJumpTableHeader(JT, JTH);
1777 JTCases.push_back(JumpTableBlock(JTH, JT));
1782 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1784 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1785 CaseRecVector& WorkList,
1787 MachineBasicBlock* Default) {
1788 // Get the MachineFunction which holds the current MBB. This is used when
1789 // inserting any additional MBBs necessary to represent the switch.
1790 MachineFunction *CurMF = CurMBB->getParent();
1792 // Figure out which block is immediately after the current one.
1793 MachineBasicBlock *NextBlock = 0;
1794 MachineFunction::iterator BBI = CR.CaseBB;
1796 if (++BBI != CurMBB->getParent()->end())
1799 Case& FrontCase = *CR.Range.first;
1800 Case& BackCase = *(CR.Range.second-1);
1801 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1803 // Size is the number of Cases represented by this range.
1804 unsigned Size = CR.Range.second - CR.Range.first;
1806 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1807 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1809 CaseItr Pivot = CR.Range.first + Size/2;
1811 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1812 // (heuristically) allow us to emit JumpTable's later.
1814 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1818 size_t LSize = FrontCase.size();
1819 size_t RSize = TSize-LSize;
1820 DEBUG(errs() << "Selecting best pivot: \n"
1821 << "First: " << First << ", Last: " << Last <<'\n'
1822 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1823 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1825 const APInt& LEnd = cast<ConstantInt>(I->High)->getValue();
1826 const APInt& RBegin = cast<ConstantInt>(J->Low)->getValue();
1827 APInt Range = ComputeRange(LEnd, RBegin);
1828 assert((Range - 2ULL).isNonNegative() &&
1829 "Invalid case distance");
1830 double LDensity = (double)LSize / (LEnd - First + 1ULL).roundToDouble();
1831 double RDensity = (double)RSize / (Last - RBegin + 1ULL).roundToDouble();
1832 double Metric = Range.logBase2()*(LDensity+RDensity);
1833 // Should always split in some non-trivial place
1834 DEBUG(errs() <<"=>Step\n"
1835 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1836 << "LDensity: " << LDensity
1837 << ", RDensity: " << RDensity << '\n'
1838 << "Metric: " << Metric << '\n');
1839 if (FMetric < Metric) {
1842 DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1848 if (areJTsAllowed(TLI)) {
1849 // If our case is dense we *really* should handle it earlier!
1850 assert((FMetric > 0) && "Should handle dense range earlier!");
1852 Pivot = CR.Range.first + Size/2;
1855 CaseRange LHSR(CR.Range.first, Pivot);
1856 CaseRange RHSR(Pivot, CR.Range.second);
1857 Constant *C = Pivot->Low;
1858 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1860 // We know that we branch to the LHS if the Value being switched on is
1861 // less than the Pivot value, C. We use this to optimize our binary
1862 // tree a bit, by recognizing that if SV is greater than or equal to the
1863 // LHS's Case Value, and that Case Value is exactly one less than the
1864 // Pivot's Value, then we can branch directly to the LHS's Target,
1865 // rather than creating a leaf node for it.
1866 if ((LHSR.second - LHSR.first) == 1 &&
1867 LHSR.first->High == CR.GE &&
1868 cast<ConstantInt>(C)->getValue() ==
1869 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1870 TrueBB = LHSR.first->BB;
1872 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1873 CurMF->insert(BBI, TrueBB);
1874 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1876 // Put SV in a virtual register to make it available from the new blocks.
1877 ExportFromCurrentBlock(SV);
1880 // Similar to the optimization above, if the Value being switched on is
1881 // known to be less than the Constant CR.LT, and the current Case Value
1882 // is CR.LT - 1, then we can branch directly to the target block for
1883 // the current Case Value, rather than emitting a RHS leaf node for it.
1884 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1885 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1886 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1887 FalseBB = RHSR.first->BB;
1889 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1890 CurMF->insert(BBI, FalseBB);
1891 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1893 // Put SV in a virtual register to make it available from the new blocks.
1894 ExportFromCurrentBlock(SV);
1897 // Create a CaseBlock record representing a conditional branch to
1898 // the LHS node if the value being switched on SV is less than C.
1899 // Otherwise, branch to LHS.
1900 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1902 if (CR.CaseBB == CurMBB)
1903 visitSwitchCase(CB);
1905 SwitchCases.push_back(CB);
1910 /// handleBitTestsSwitchCase - if current case range has few destination and
1911 /// range span less, than machine word bitwidth, encode case range into series
1912 /// of masks and emit bit tests with these masks.
1913 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1914 CaseRecVector& WorkList,
1916 MachineBasicBlock* Default){
1917 unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits();
1919 Case& FrontCase = *CR.Range.first;
1920 Case& BackCase = *(CR.Range.second-1);
1922 // Get the MachineFunction which holds the current MBB. This is used when
1923 // inserting any additional MBBs necessary to represent the switch.
1924 MachineFunction *CurMF = CurMBB->getParent();
1926 // If target does not have legal shift left, do not emit bit tests at all.
1927 if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1931 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1933 // Single case counts one, case range - two.
1934 numCmps += (I->Low == I->High ? 1 : 2);
1937 // Count unique destinations
1938 SmallSet<MachineBasicBlock*, 4> Dests;
1939 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1940 Dests.insert(I->BB);
1941 if (Dests.size() > 3)
1942 // Don't bother the code below, if there are too much unique destinations
1945 DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1946 << "Total number of comparisons: " << numCmps << '\n');
1948 // Compute span of values.
1949 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1950 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1951 APInt cmpRange = maxValue - minValue;
1953 DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1954 << "Low bound: " << minValue << '\n'
1955 << "High bound: " << maxValue << '\n');
1957 if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1958 (!(Dests.size() == 1 && numCmps >= 3) &&
1959 !(Dests.size() == 2 && numCmps >= 5) &&
1960 !(Dests.size() >= 3 && numCmps >= 6)))
1963 DEBUG(errs() << "Emitting bit tests\n");
1964 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1966 // Optimize the case where all the case values fit in a
1967 // word without having to subtract minValue. In this case,
1968 // we can optimize away the subtraction.
1969 if (minValue.isNonNegative() &&
1970 maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1971 cmpRange = maxValue;
1973 lowBound = minValue;
1976 CaseBitsVector CasesBits;
1977 unsigned i, count = 0;
1979 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1980 MachineBasicBlock* Dest = I->BB;
1981 for (i = 0; i < count; ++i)
1982 if (Dest == CasesBits[i].BB)
1986 assert((count < 3) && "Too much destinations to test!");
1987 CasesBits.push_back(CaseBits(0, Dest, 0));
1991 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1992 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1994 uint64_t lo = (lowValue - lowBound).getZExtValue();
1995 uint64_t hi = (highValue - lowBound).getZExtValue();
1997 for (uint64_t j = lo; j <= hi; j++) {
1998 CasesBits[i].Mask |= 1ULL << j;
1999 CasesBits[i].Bits++;
2003 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2007 // Figure out which block is immediately after the current one.
2008 MachineFunction::iterator BBI = CR.CaseBB;
2011 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2013 DEBUG(errs() << "Cases:\n");
2014 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2015 DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2016 << ", Bits: " << CasesBits[i].Bits
2017 << ", BB: " << CasesBits[i].BB << '\n');
2019 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2020 CurMF->insert(BBI, CaseBB);
2021 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2025 // Put SV in a virtual register to make it available from the new blocks.
2026 ExportFromCurrentBlock(SV);
2029 BitTestBlock BTB(lowBound, cmpRange, SV,
2030 -1U, (CR.CaseBB == CurMBB),
2031 CR.CaseBB, Default, BTC);
2033 if (CR.CaseBB == CurMBB)
2034 visitBitTestHeader(BTB);
2036 BitTestCases.push_back(BTB);
2042 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2043 size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
2044 const SwitchInst& SI) {
2047 // Start with "simple" cases
2048 for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2049 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2050 Cases.push_back(Case(SI.getSuccessorValue(i),
2051 SI.getSuccessorValue(i),
2054 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2056 // Merge case into clusters
2057 if (Cases.size() >= 2)
2058 // Must recompute end() each iteration because it may be
2059 // invalidated by erase if we hold on to it
2060 for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2061 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2062 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2063 MachineBasicBlock* nextBB = J->BB;
2064 MachineBasicBlock* currentBB = I->BB;
2066 // If the two neighboring cases go to the same destination, merge them
2067 // into a single case.
2068 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2076 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2077 if (I->Low != I->High)
2078 // A range counts double, since it requires two compares.
2085 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
2086 // Figure out which block is immediately after the current one.
2087 MachineBasicBlock *NextBlock = 0;
2088 MachineFunction::iterator BBI = CurMBB;
2090 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2092 // If there is only the default destination, branch to it if it is not the
2093 // next basic block. Otherwise, just fall through.
2094 if (SI.getNumOperands() == 2) {
2095 // Update machine-CFG edges.
2097 // If this is not a fall-through branch, emit the branch.
2098 CurMBB->addSuccessor(Default);
2099 if (Default != NextBlock)
2100 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2101 MVT::Other, getControlRoot(),
2102 DAG.getBasicBlock(Default)));
2106 // If there are any non-default case statements, create a vector of Cases
2107 // representing each one, and sort the vector so that we can efficiently
2108 // create a binary search tree from them.
2110 size_t numCmps = Clusterify(Cases, SI);
2111 DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2112 << ". Total compares: " << numCmps << '\n');
2115 // Get the Value to be switched on and default basic blocks, which will be
2116 // inserted into CaseBlock records, representing basic blocks in the binary
2118 Value *SV = SI.getOperand(0);
2120 // Push the initial CaseRec onto the worklist
2121 CaseRecVector WorkList;
2122 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2124 while (!WorkList.empty()) {
2125 // Grab a record representing a case range to process off the worklist
2126 CaseRec CR = WorkList.back();
2127 WorkList.pop_back();
2129 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2132 // If the range has few cases (two or less) emit a series of specific
2134 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2137 // If the switch has more than 5 blocks, and at least 40% dense, and the
2138 // target supports indirect branches, then emit a jump table rather than
2139 // lowering the switch to a binary tree of conditional branches.
2140 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2143 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2144 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2145 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2150 void SelectionDAGLowering::visitSub(User &I) {
2151 // -0.0 - X --> fneg
2152 const Type *Ty = I.getType();
2153 if (isa<VectorType>(Ty)) {
2154 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2155 const VectorType *DestTy = cast<VectorType>(I.getType());
2156 const Type *ElTy = DestTy->getElementType();
2157 if (ElTy->isFloatingPoint()) {
2158 unsigned VL = DestTy->getNumElements();
2159 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2160 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2162 SDValue Op2 = getValue(I.getOperand(1));
2163 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2164 Op2.getValueType(), Op2));
2170 if (Ty->isFloatingPoint()) {
2171 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2172 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2173 SDValue Op2 = getValue(I.getOperand(1));
2174 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2175 Op2.getValueType(), Op2));
2180 visitBinary(I, Ty->isFPOrFPVector() ? ISD::FSUB : ISD::SUB);
2183 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2184 SDValue Op1 = getValue(I.getOperand(0));
2185 SDValue Op2 = getValue(I.getOperand(1));
2187 setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2188 Op1.getValueType(), Op1, Op2));
2191 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2192 SDValue Op1 = getValue(I.getOperand(0));
2193 SDValue Op2 = getValue(I.getOperand(1));
2194 if (!isa<VectorType>(I.getType()) &&
2195 Op2.getValueType() != TLI.getShiftAmountTy()) {
2196 // If the operand is smaller than the shift count type, promote it.
2197 if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
2198 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2199 TLI.getShiftAmountTy(), Op2);
2200 // If the operand is larger than the shift count type but the shift
2201 // count type has enough bits to represent any shift value, truncate
2202 // it now. This is a common case and it exposes the truncate to
2203 // optimization early.
2204 else if (TLI.getShiftAmountTy().getSizeInBits() >=
2205 Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2206 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2207 TLI.getShiftAmountTy(), Op2);
2208 // Otherwise we'll need to temporarily settle for some other
2209 // convenient type; type legalization will make adjustments as
2211 else if (TLI.getPointerTy().bitsLT(Op2.getValueType()))
2212 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2213 TLI.getPointerTy(), Op2);
2214 else if (TLI.getPointerTy().bitsGT(Op2.getValueType()))
2215 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2216 TLI.getPointerTy(), Op2);
2219 setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2220 Op1.getValueType(), Op1, Op2));
2223 void SelectionDAGLowering::visitICmp(User &I) {
2224 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2225 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2226 predicate = IC->getPredicate();
2227 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2228 predicate = ICmpInst::Predicate(IC->getPredicate());
2229 SDValue Op1 = getValue(I.getOperand(0));
2230 SDValue Op2 = getValue(I.getOperand(1));
2231 ISD::CondCode Opcode = getICmpCondCode(predicate);
2232 setValue(&I, DAG.getSetCC(getCurDebugLoc(),MVT::i1, Op1, Op2, Opcode));
2235 void SelectionDAGLowering::visitFCmp(User &I) {
2236 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2237 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2238 predicate = FC->getPredicate();
2239 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2240 predicate = FCmpInst::Predicate(FC->getPredicate());
2241 SDValue Op1 = getValue(I.getOperand(0));
2242 SDValue Op2 = getValue(I.getOperand(1));
2243 ISD::CondCode Condition = getFCmpCondCode(predicate);
2244 setValue(&I, DAG.getSetCC(getCurDebugLoc(), MVT::i1, Op1, Op2, Condition));
2247 void SelectionDAGLowering::visitVICmp(User &I) {
2248 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2249 if (VICmpInst *IC = dyn_cast<VICmpInst>(&I))
2250 predicate = IC->getPredicate();
2251 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2252 predicate = ICmpInst::Predicate(IC->getPredicate());
2253 SDValue Op1 = getValue(I.getOperand(0));
2254 SDValue Op2 = getValue(I.getOperand(1));
2255 ISD::CondCode Opcode = getICmpCondCode(predicate);
2256 setValue(&I, DAG.getVSetCC(getCurDebugLoc(), Op1.getValueType(),
2260 void SelectionDAGLowering::visitVFCmp(User &I) {
2261 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2262 if (VFCmpInst *FC = dyn_cast<VFCmpInst>(&I))
2263 predicate = FC->getPredicate();
2264 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2265 predicate = FCmpInst::Predicate(FC->getPredicate());
2266 SDValue Op1 = getValue(I.getOperand(0));
2267 SDValue Op2 = getValue(I.getOperand(1));
2268 ISD::CondCode Condition = getFCmpCondCode(predicate);
2269 MVT DestVT = TLI.getValueType(I.getType());
2271 setValue(&I, DAG.getVSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2274 void SelectionDAGLowering::visitSelect(User &I) {
2275 SmallVector<MVT, 4> ValueVTs;
2276 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2277 unsigned NumValues = ValueVTs.size();
2278 if (NumValues != 0) {
2279 SmallVector<SDValue, 4> Values(NumValues);
2280 SDValue Cond = getValue(I.getOperand(0));
2281 SDValue TrueVal = getValue(I.getOperand(1));
2282 SDValue FalseVal = getValue(I.getOperand(2));
2284 for (unsigned i = 0; i != NumValues; ++i)
2285 Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2286 TrueVal.getValueType(), Cond,
2287 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2288 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2290 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2291 DAG.getVTList(&ValueVTs[0], NumValues),
2292 &Values[0], NumValues));
2297 void SelectionDAGLowering::visitTrunc(User &I) {
2298 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2299 SDValue N = getValue(I.getOperand(0));
2300 MVT DestVT = TLI.getValueType(I.getType());
2301 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2304 void SelectionDAGLowering::visitZExt(User &I) {
2305 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2306 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2307 SDValue N = getValue(I.getOperand(0));
2308 MVT DestVT = TLI.getValueType(I.getType());
2309 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2312 void SelectionDAGLowering::visitSExt(User &I) {
2313 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2314 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2315 SDValue N = getValue(I.getOperand(0));
2316 MVT DestVT = TLI.getValueType(I.getType());
2317 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2320 void SelectionDAGLowering::visitFPTrunc(User &I) {
2321 // FPTrunc is never a no-op cast, no need to check
2322 SDValue N = getValue(I.getOperand(0));
2323 MVT DestVT = TLI.getValueType(I.getType());
2324 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2325 DestVT, N, DAG.getIntPtrConstant(0)));
2328 void SelectionDAGLowering::visitFPExt(User &I){
2329 // FPTrunc is never a no-op cast, no need to check
2330 SDValue N = getValue(I.getOperand(0));
2331 MVT DestVT = TLI.getValueType(I.getType());
2332 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2335 void SelectionDAGLowering::visitFPToUI(User &I) {
2336 // FPToUI is never a no-op cast, no need to check
2337 SDValue N = getValue(I.getOperand(0));
2338 MVT DestVT = TLI.getValueType(I.getType());
2339 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2342 void SelectionDAGLowering::visitFPToSI(User &I) {
2343 // FPToSI is never a no-op cast, no need to check
2344 SDValue N = getValue(I.getOperand(0));
2345 MVT DestVT = TLI.getValueType(I.getType());
2346 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2349 void SelectionDAGLowering::visitUIToFP(User &I) {
2350 // UIToFP is never a no-op cast, no need to check
2351 SDValue N = getValue(I.getOperand(0));
2352 MVT DestVT = TLI.getValueType(I.getType());
2353 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2356 void SelectionDAGLowering::visitSIToFP(User &I){
2357 // SIToFP is never a no-op cast, no need to check
2358 SDValue N = getValue(I.getOperand(0));
2359 MVT DestVT = TLI.getValueType(I.getType());
2360 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2363 void SelectionDAGLowering::visitPtrToInt(User &I) {
2364 // What to do depends on the size of the integer and the size of the pointer.
2365 // We can either truncate, zero extend, or no-op, accordingly.
2366 SDValue N = getValue(I.getOperand(0));
2367 MVT SrcVT = N.getValueType();
2368 MVT DestVT = TLI.getValueType(I.getType());
2370 if (DestVT.bitsLT(SrcVT))
2371 Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2373 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2374 Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2375 setValue(&I, Result);
2378 void SelectionDAGLowering::visitIntToPtr(User &I) {
2379 // What to do depends on the size of the integer and the size of the pointer.
2380 // We can either truncate, zero extend, or no-op, accordingly.
2381 SDValue N = getValue(I.getOperand(0));
2382 MVT SrcVT = N.getValueType();
2383 MVT DestVT = TLI.getValueType(I.getType());
2384 if (DestVT.bitsLT(SrcVT))
2385 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2387 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2388 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2392 void SelectionDAGLowering::visitBitCast(User &I) {
2393 SDValue N = getValue(I.getOperand(0));
2394 MVT DestVT = TLI.getValueType(I.getType());
2396 // BitCast assures us that source and destination are the same size so this
2397 // is either a BIT_CONVERT or a no-op.
2398 if (DestVT != N.getValueType())
2399 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2400 DestVT, N)); // convert types
2402 setValue(&I, N); // noop cast.
2405 void SelectionDAGLowering::visitInsertElement(User &I) {
2406 SDValue InVec = getValue(I.getOperand(0));
2407 SDValue InVal = getValue(I.getOperand(1));
2408 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2410 getValue(I.getOperand(2)));
2412 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2413 TLI.getValueType(I.getType()),
2414 InVec, InVal, InIdx));
2417 void SelectionDAGLowering::visitExtractElement(User &I) {
2418 SDValue InVec = getValue(I.getOperand(0));
2419 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2421 getValue(I.getOperand(1)));
2422 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2423 TLI.getValueType(I.getType()), InVec, InIdx));
2427 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2428 // from SIndx and increasing to the element length (undefs are allowed).
2429 static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2430 unsigned MaskNumElts = Mask.size();
2431 for (unsigned i = 0; i != MaskNumElts; ++i)
2432 if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2437 void SelectionDAGLowering::visitShuffleVector(User &I) {
2438 SmallVector<int, 8> Mask;
2439 SDValue Src1 = getValue(I.getOperand(0));
2440 SDValue Src2 = getValue(I.getOperand(1));
2442 // Convert the ConstantVector mask operand into an array of ints, with -1
2443 // representing undef values.
2444 SmallVector<Constant*, 8> MaskElts;
2445 cast<Constant>(I.getOperand(2))->getVectorElements(MaskElts);
2446 unsigned MaskNumElts = MaskElts.size();
2447 for (unsigned i = 0; i != MaskNumElts; ++i) {
2448 if (isa<UndefValue>(MaskElts[i]))
2451 Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2454 MVT VT = TLI.getValueType(I.getType());
2455 MVT SrcVT = Src1.getValueType();
2456 unsigned SrcNumElts = SrcVT.getVectorNumElements();
2458 if (SrcNumElts == MaskNumElts) {
2459 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2464 // Normalize the shuffle vector since mask and vector length don't match.
2465 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2466 // Mask is longer than the source vectors and is a multiple of the source
2467 // vectors. We can use concatenate vector to make the mask and vectors
2469 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2470 // The shuffle is concatenating two vectors together.
2471 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2476 // Pad both vectors with undefs to make them the same length as the mask.
2477 unsigned NumConcat = MaskNumElts / SrcNumElts;
2478 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2479 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2480 SDValue UndefVal = DAG.getUNDEF(SrcVT);
2482 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2483 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2487 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2488 getCurDebugLoc(), VT,
2489 &MOps1[0], NumConcat);
2490 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2491 getCurDebugLoc(), VT,
2492 &MOps2[0], NumConcat);
2494 // Readjust mask for new input vector length.
2495 SmallVector<int, 8> MappedOps;
2496 for (unsigned i = 0; i != MaskNumElts; ++i) {
2498 if (Idx < (int)SrcNumElts)
2499 MappedOps.push_back(Idx);
2501 MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2503 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2508 if (SrcNumElts > MaskNumElts) {
2509 // Analyze the access pattern of the vector to see if we can extract
2510 // two subvectors and do the shuffle. The analysis is done by calculating
2511 // the range of elements the mask access on both vectors.
2512 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2513 int MaxRange[2] = {-1, -1};
2515 for (unsigned i = 0; i != MaskNumElts; ++i) {
2521 if (Idx >= (int)SrcNumElts) {
2525 if (Idx > MaxRange[Input])
2526 MaxRange[Input] = Idx;
2527 if (Idx < MinRange[Input])
2528 MinRange[Input] = Idx;
2531 // Check if the access is smaller than the vector size and can we find
2532 // a reasonable extract index.
2533 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2534 int StartIdx[2]; // StartIdx to extract from
2535 for (int Input=0; Input < 2; ++Input) {
2536 if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2537 RangeUse[Input] = 0; // Unused
2538 StartIdx[Input] = 0;
2539 } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2540 // Fits within range but we should see if we can find a good
2541 // start index that is a multiple of the mask length.
2542 if (MaxRange[Input] < (int)MaskNumElts) {
2543 RangeUse[Input] = 1; // Extract from beginning of the vector
2544 StartIdx[Input] = 0;
2546 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2547 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2548 StartIdx[Input] + MaskNumElts < SrcNumElts)
2549 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2554 if (RangeUse[0] == 0 && RangeUse[0] == 0) {
2555 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2558 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2559 // Extract appropriate subvector and generate a vector shuffle
2560 for (int Input=0; Input < 2; ++Input) {
2561 SDValue& Src = Input == 0 ? Src1 : Src2;
2562 if (RangeUse[Input] == 0) {
2563 Src = DAG.getUNDEF(VT);
2565 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2566 Src, DAG.getIntPtrConstant(StartIdx[Input]));
2569 // Calculate new mask.
2570 SmallVector<int, 8> MappedOps;
2571 for (unsigned i = 0; i != MaskNumElts; ++i) {
2574 MappedOps.push_back(Idx);
2575 else if (Idx < (int)SrcNumElts)
2576 MappedOps.push_back(Idx - StartIdx[0]);
2578 MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2580 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2586 // We can't use either concat vectors or extract subvectors so fall back to
2587 // replacing the shuffle with extract and build vector.
2588 // to insert and build vector.
2589 MVT EltVT = VT.getVectorElementType();
2590 MVT PtrVT = TLI.getPointerTy();
2591 SmallVector<SDValue,8> Ops;
2592 for (unsigned i = 0; i != MaskNumElts; ++i) {
2594 Ops.push_back(DAG.getUNDEF(EltVT));
2597 if (Idx < (int)SrcNumElts)
2598 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2599 EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2601 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2603 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2606 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2607 VT, &Ops[0], Ops.size()));
2610 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2611 const Value *Op0 = I.getOperand(0);
2612 const Value *Op1 = I.getOperand(1);
2613 const Type *AggTy = I.getType();
2614 const Type *ValTy = Op1->getType();
2615 bool IntoUndef = isa<UndefValue>(Op0);
2616 bool FromUndef = isa<UndefValue>(Op1);
2618 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2619 I.idx_begin(), I.idx_end());
2621 SmallVector<MVT, 4> AggValueVTs;
2622 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2623 SmallVector<MVT, 4> ValValueVTs;
2624 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2626 unsigned NumAggValues = AggValueVTs.size();
2627 unsigned NumValValues = ValValueVTs.size();
2628 SmallVector<SDValue, 4> Values(NumAggValues);
2630 SDValue Agg = getValue(Op0);
2631 SDValue Val = getValue(Op1);
2633 // Copy the beginning value(s) from the original aggregate.
2634 for (; i != LinearIndex; ++i)
2635 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2636 SDValue(Agg.getNode(), Agg.getResNo() + i);
2637 // Copy values from the inserted value(s).
2638 for (; i != LinearIndex + NumValValues; ++i)
2639 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2640 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2641 // Copy remaining value(s) from the original aggregate.
2642 for (; i != NumAggValues; ++i)
2643 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2644 SDValue(Agg.getNode(), Agg.getResNo() + i);
2646 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2647 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2648 &Values[0], NumAggValues));
2651 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2652 const Value *Op0 = I.getOperand(0);
2653 const Type *AggTy = Op0->getType();
2654 const Type *ValTy = I.getType();
2655 bool OutOfUndef = isa<UndefValue>(Op0);
2657 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2658 I.idx_begin(), I.idx_end());
2660 SmallVector<MVT, 4> ValValueVTs;
2661 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2663 unsigned NumValValues = ValValueVTs.size();
2664 SmallVector<SDValue, 4> Values(NumValValues);
2666 SDValue Agg = getValue(Op0);
2667 // Copy out the selected value(s).
2668 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2669 Values[i - LinearIndex] =
2671 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2672 SDValue(Agg.getNode(), Agg.getResNo() + i);
2674 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2675 DAG.getVTList(&ValValueVTs[0], NumValValues),
2676 &Values[0], NumValValues));
2680 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2681 SDValue N = getValue(I.getOperand(0));
2682 const Type *Ty = I.getOperand(0)->getType();
2684 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2687 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2688 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2691 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2692 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2693 DAG.getIntPtrConstant(Offset));
2695 Ty = StTy->getElementType(Field);
2697 Ty = cast<SequentialType>(Ty)->getElementType();
2699 // If this is a constant subscript, handle it quickly.
2700 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2701 if (CI->getZExtValue() == 0) continue;
2703 TD->getTypePaddedSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2705 unsigned PtrBits = TLI.getPointerTy().getSizeInBits();
2707 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2709 DAG.getConstant(Offs, MVT::i64));
2711 OffsVal = DAG.getIntPtrConstant(Offs);
2712 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2717 // N = N + Idx * ElementSize;
2718 uint64_t ElementSize = TD->getTypePaddedSize(Ty);
2719 SDValue IdxN = getValue(Idx);
2721 // If the index is smaller or larger than intptr_t, truncate or extend
2723 if (IdxN.getValueType().bitsLT(N.getValueType()))
2724 IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
2725 N.getValueType(), IdxN);
2726 else if (IdxN.getValueType().bitsGT(N.getValueType()))
2727 IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2728 N.getValueType(), IdxN);
2730 // If this is a multiply by a power of two, turn it into a shl
2731 // immediately. This is a very common case.
2732 if (ElementSize != 1) {
2733 if (isPowerOf2_64(ElementSize)) {
2734 unsigned Amt = Log2_64(ElementSize);
2735 IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2736 N.getValueType(), IdxN,
2737 DAG.getConstant(Amt, TLI.getPointerTy()));
2739 SDValue Scale = DAG.getIntPtrConstant(ElementSize);
2740 IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2741 N.getValueType(), IdxN, Scale);
2745 N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2746 N.getValueType(), N, IdxN);
2752 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2753 // If this is a fixed sized alloca in the entry block of the function,
2754 // allocate it statically on the stack.
2755 if (FuncInfo.StaticAllocaMap.count(&I))
2756 return; // getValue will auto-populate this.
2758 const Type *Ty = I.getAllocatedType();
2759 uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
2761 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2764 SDValue AllocSize = getValue(I.getArraySize());
2766 AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2768 DAG.getConstant(TySize, AllocSize.getValueType()));
2772 MVT IntPtr = TLI.getPointerTy();
2773 if (IntPtr.bitsLT(AllocSize.getValueType()))
2774 AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2776 else if (IntPtr.bitsGT(AllocSize.getValueType()))
2777 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2780 // Handle alignment. If the requested alignment is less than or equal to
2781 // the stack alignment, ignore it. If the size is greater than or equal to
2782 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2783 unsigned StackAlign =
2784 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2785 if (Align <= StackAlign)
2788 // Round the size of the allocation up to the stack alignment size
2789 // by add SA-1 to the size.
2790 AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2791 AllocSize.getValueType(), AllocSize,
2792 DAG.getIntPtrConstant(StackAlign-1));
2793 // Mask out the low bits for alignment purposes.
2794 AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2795 AllocSize.getValueType(), AllocSize,
2796 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2798 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2799 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2800 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2803 DAG.setRoot(DSA.getValue(1));
2805 // Inform the Frame Information that we have just allocated a variable-sized
2807 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
2810 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2811 const Value *SV = I.getOperand(0);
2812 SDValue Ptr = getValue(SV);
2814 const Type *Ty = I.getType();
2815 bool isVolatile = I.isVolatile();
2816 unsigned Alignment = I.getAlignment();
2818 SmallVector<MVT, 4> ValueVTs;
2819 SmallVector<uint64_t, 4> Offsets;
2820 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2821 unsigned NumValues = ValueVTs.size();
2826 bool ConstantMemory = false;
2828 // Serialize volatile loads with other side effects.
2830 else if (AA->pointsToConstantMemory(SV)) {
2831 // Do not serialize (non-volatile) loads of constant memory with anything.
2832 Root = DAG.getEntryNode();
2833 ConstantMemory = true;
2835 // Do not serialize non-volatile loads against each other.
2836 Root = DAG.getRoot();
2839 SmallVector<SDValue, 4> Values(NumValues);
2840 SmallVector<SDValue, 4> Chains(NumValues);
2841 MVT PtrVT = Ptr.getValueType();
2842 for (unsigned i = 0; i != NumValues; ++i) {
2843 SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2844 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2846 DAG.getConstant(Offsets[i], PtrVT)),
2848 isVolatile, Alignment);
2850 Chains[i] = L.getValue(1);
2853 if (!ConstantMemory) {
2854 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2856 &Chains[0], NumValues);
2860 PendingLoads.push_back(Chain);
2863 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2864 DAG.getVTList(&ValueVTs[0], NumValues),
2865 &Values[0], NumValues));
2869 void SelectionDAGLowering::visitStore(StoreInst &I) {
2870 Value *SrcV = I.getOperand(0);
2871 Value *PtrV = I.getOperand(1);
2873 SmallVector<MVT, 4> ValueVTs;
2874 SmallVector<uint64_t, 4> Offsets;
2875 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2876 unsigned NumValues = ValueVTs.size();
2880 // Get the lowered operands. Note that we do this after
2881 // checking if NumResults is zero, because with zero results
2882 // the operands won't have values in the map.
2883 SDValue Src = getValue(SrcV);
2884 SDValue Ptr = getValue(PtrV);
2886 SDValue Root = getRoot();
2887 SmallVector<SDValue, 4> Chains(NumValues);
2888 MVT PtrVT = Ptr.getValueType();
2889 bool isVolatile = I.isVolatile();
2890 unsigned Alignment = I.getAlignment();
2891 for (unsigned i = 0; i != NumValues; ++i)
2892 Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2893 SDValue(Src.getNode(), Src.getResNo() + i),
2894 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2896 DAG.getConstant(Offsets[i], PtrVT)),
2898 isVolatile, Alignment);
2900 DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2901 MVT::Other, &Chains[0], NumValues));
2904 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2906 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2907 unsigned Intrinsic) {
2908 bool HasChain = !I.doesNotAccessMemory();
2909 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2911 // Build the operand list.
2912 SmallVector<SDValue, 8> Ops;
2913 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2915 // We don't need to serialize loads against other loads.
2916 Ops.push_back(DAG.getRoot());
2918 Ops.push_back(getRoot());
2922 // Info is set by getTgtMemInstrinsic
2923 TargetLowering::IntrinsicInfo Info;
2924 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2926 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2927 if (!IsTgtIntrinsic)
2928 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2930 // Add all operands of the call to the operand list.
2931 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2932 SDValue Op = getValue(I.getOperand(i));
2933 assert(TLI.isTypeLegal(Op.getValueType()) &&
2934 "Intrinsic uses a non-legal type?");
2938 std::vector<MVT> VTArray;
2939 if (I.getType() != Type::VoidTy) {
2940 MVT VT = TLI.getValueType(I.getType());
2941 if (VT.isVector()) {
2942 const VectorType *DestTy = cast<VectorType>(I.getType());
2943 MVT EltVT = TLI.getValueType(DestTy->getElementType());
2945 VT = MVT::getVectorVT(EltVT, DestTy->getNumElements());
2946 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
2949 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
2950 VTArray.push_back(VT);
2953 VTArray.push_back(MVT::Other);
2955 SDVTList VTs = DAG.getVTList(&VTArray[0], VTArray.size());
2959 if (IsTgtIntrinsic) {
2960 // This is target intrinsic that touches memory
2961 Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2962 VTs, &Ops[0], Ops.size(),
2963 Info.memVT, Info.ptrVal, Info.offset,
2964 Info.align, Info.vol,
2965 Info.readMem, Info.writeMem);
2968 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2969 VTs, &Ops[0], Ops.size());
2970 else if (I.getType() != Type::VoidTy)
2971 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2972 VTs, &Ops[0], Ops.size());
2974 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2975 VTs, &Ops[0], Ops.size());
2978 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2980 PendingLoads.push_back(Chain);
2984 if (I.getType() != Type::VoidTy) {
2985 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2986 MVT VT = TLI.getValueType(PTy);
2987 Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2989 setValue(&I, Result);
2993 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
2994 static GlobalVariable *ExtractTypeInfo(Value *V) {
2995 V = V->stripPointerCasts();
2996 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2997 assert ((GV || isa<ConstantPointerNull>(V)) &&
2998 "TypeInfo must be a global variable or NULL");
3004 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
3005 /// call, and add them to the specified machine basic block.
3006 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
3007 MachineBasicBlock *MBB) {
3008 // Inform the MachineModuleInfo of the personality for this landing pad.
3009 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
3010 assert(CE->getOpcode() == Instruction::BitCast &&
3011 isa<Function>(CE->getOperand(0)) &&
3012 "Personality should be a function");
3013 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
3015 // Gather all the type infos for this landing pad and pass them along to
3016 // MachineModuleInfo.
3017 std::vector<GlobalVariable *> TyInfo;
3018 unsigned N = I.getNumOperands();
3020 for (unsigned i = N - 1; i > 2; --i) {
3021 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
3022 unsigned FilterLength = CI->getZExtValue();
3023 unsigned FirstCatch = i + FilterLength + !FilterLength;
3024 assert (FirstCatch <= N && "Invalid filter length");
3026 if (FirstCatch < N) {
3027 TyInfo.reserve(N - FirstCatch);
3028 for (unsigned j = FirstCatch; j < N; ++j)
3029 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3030 MMI->addCatchTypeInfo(MBB, TyInfo);
3034 if (!FilterLength) {
3036 MMI->addCleanup(MBB);
3039 TyInfo.reserve(FilterLength - 1);
3040 for (unsigned j = i + 1; j < FirstCatch; ++j)
3041 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3042 MMI->addFilterTypeInfo(MBB, TyInfo);
3051 TyInfo.reserve(N - 3);
3052 for (unsigned j = 3; j < N; ++j)
3053 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3054 MMI->addCatchTypeInfo(MBB, TyInfo);
3060 /// GetSignificand - Get the significand and build it into a floating-point
3061 /// number with exponent of 1:
3063 /// Op = (Op & 0x007fffff) | 0x3f800000;
3065 /// where Op is the hexidecimal representation of floating point value.
3067 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
3068 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3069 DAG.getConstant(0x007fffff, MVT::i32));
3070 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3071 DAG.getConstant(0x3f800000, MVT::i32));
3072 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3075 /// GetExponent - Get the exponent:
3077 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3079 /// where Op is the hexidecimal representation of floating point value.
3081 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3083 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3084 DAG.getConstant(0x7f800000, MVT::i32));
3085 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3086 DAG.getConstant(23, TLI.getPointerTy()));
3087 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3088 DAG.getConstant(127, MVT::i32));
3089 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3092 /// getF32Constant - Get 32-bit floating point constant.
3094 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3095 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3098 /// Inlined utility function to implement binary input atomic intrinsics for
3099 /// visitIntrinsicCall: I is a call instruction
3100 /// Op is the associated NodeType for I
3102 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3103 SDValue Root = getRoot();
3105 DAG.getAtomic(Op, getCurDebugLoc(),
3106 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3108 getValue(I.getOperand(1)),
3109 getValue(I.getOperand(2)),
3112 DAG.setRoot(L.getValue(1));
3116 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3118 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3119 SDValue Op1 = getValue(I.getOperand(1));
3120 SDValue Op2 = getValue(I.getOperand(2));
3122 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3123 SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3125 setValue(&I, Result);
3129 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
3130 /// limited-precision mode.
3132 SelectionDAGLowering::visitExp(CallInst &I) {
3134 DebugLoc dl = getCurDebugLoc();
3136 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3137 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3138 SDValue Op = getValue(I.getOperand(1));
3140 // Put the exponent in the right bit position for later addition to the
3143 // #define LOG2OFe 1.4426950f
3144 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3145 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3146 getF32Constant(DAG, 0x3fb8aa3b));
3147 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3149 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3150 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3151 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3153 // IntegerPartOfX <<= 23;
3154 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3155 DAG.getConstant(23, TLI.getPointerTy()));
3157 if (LimitFloatPrecision <= 6) {
3158 // For floating-point precision of 6:
3160 // TwoToFractionalPartOfX =
3162 // (0.735607626f + 0.252464424f * x) * x;
3164 // error 0.0144103317, which is 6 bits
3165 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3166 getF32Constant(DAG, 0x3e814304));
3167 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3168 getF32Constant(DAG, 0x3f3c50c8));
3169 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3170 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3171 getF32Constant(DAG, 0x3f7f5e7e));
3172 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3174 // Add the exponent into the result in integer domain.
3175 SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3176 TwoToFracPartOfX, IntegerPartOfX);
3178 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3179 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3180 // For floating-point precision of 12:
3182 // TwoToFractionalPartOfX =
3185 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3187 // 0.000107046256 error, which is 13 to 14 bits
3188 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3189 getF32Constant(DAG, 0x3da235e3));
3190 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3191 getF32Constant(DAG, 0x3e65b8f3));
3192 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3193 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3194 getF32Constant(DAG, 0x3f324b07));
3195 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3196 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3197 getF32Constant(DAG, 0x3f7ff8fd));
3198 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3200 // Add the exponent into the result in integer domain.
3201 SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3202 TwoToFracPartOfX, IntegerPartOfX);
3204 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3205 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3206 // For floating-point precision of 18:
3208 // TwoToFractionalPartOfX =
3212 // (0.554906021e-1f +
3213 // (0.961591928e-2f +
3214 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3216 // error 2.47208000*10^(-7), which is better than 18 bits
3217 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3218 getF32Constant(DAG, 0x3924b03e));
3219 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3220 getF32Constant(DAG, 0x3ab24b87));
3221 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3222 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3223 getF32Constant(DAG, 0x3c1d8c17));
3224 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3225 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3226 getF32Constant(DAG, 0x3d634a1d));
3227 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3228 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3229 getF32Constant(DAG, 0x3e75fe14));
3230 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3231 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3232 getF32Constant(DAG, 0x3f317234));
3233 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3234 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3235 getF32Constant(DAG, 0x3f800000));
3236 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3239 // Add the exponent into the result in integer domain.
3240 SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3241 TwoToFracPartOfX, IntegerPartOfX);
3243 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3246 // No special expansion.
3247 result = DAG.getNode(ISD::FEXP, dl,
3248 getValue(I.getOperand(1)).getValueType(),
3249 getValue(I.getOperand(1)));
3252 setValue(&I, result);
3255 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3256 /// limited-precision mode.
3258 SelectionDAGLowering::visitLog(CallInst &I) {
3260 DebugLoc dl = getCurDebugLoc();
3262 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3263 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3264 SDValue Op = getValue(I.getOperand(1));
3265 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3267 // Scale the exponent by log(2) [0.69314718f].
3268 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3269 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3270 getF32Constant(DAG, 0x3f317218));
3272 // Get the significand and build it into a floating-point number with
3274 SDValue X = GetSignificand(DAG, Op1, dl);
3276 if (LimitFloatPrecision <= 6) {
3277 // For floating-point precision of 6:
3281 // (1.4034025f - 0.23903021f * x) * x;
3283 // error 0.0034276066, which is better than 8 bits
3284 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3285 getF32Constant(DAG, 0xbe74c456));
3286 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3287 getF32Constant(DAG, 0x3fb3a2b1));
3288 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3289 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3290 getF32Constant(DAG, 0x3f949a29));
3292 result = DAG.getNode(ISD::FADD, dl,
3293 MVT::f32, LogOfExponent, LogOfMantissa);
3294 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3295 // For floating-point precision of 12:
3301 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3303 // error 0.000061011436, which is 14 bits
3304 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3305 getF32Constant(DAG, 0xbd67b6d6));
3306 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3307 getF32Constant(DAG, 0x3ee4f4b8));
3308 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3309 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3310 getF32Constant(DAG, 0x3fbc278b));
3311 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3312 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3313 getF32Constant(DAG, 0x40348e95));
3314 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3315 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3316 getF32Constant(DAG, 0x3fdef31a));
3318 result = DAG.getNode(ISD::FADD, dl,
3319 MVT::f32, LogOfExponent, LogOfMantissa);
3320 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3321 // For floating-point precision of 18:
3329 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3331 // error 0.0000023660568, which is better than 18 bits
3332 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3333 getF32Constant(DAG, 0xbc91e5ac));
3334 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3335 getF32Constant(DAG, 0x3e4350aa));
3336 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3337 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3338 getF32Constant(DAG, 0x3f60d3e3));
3339 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3340 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3341 getF32Constant(DAG, 0x4011cdf0));
3342 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3343 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3344 getF32Constant(DAG, 0x406cfd1c));
3345 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3346 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3347 getF32Constant(DAG, 0x408797cb));
3348 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3349 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3350 getF32Constant(DAG, 0x4006dcab));
3352 result = DAG.getNode(ISD::FADD, dl,
3353 MVT::f32, LogOfExponent, LogOfMantissa);
3356 // No special expansion.
3357 result = DAG.getNode(ISD::FLOG, dl,
3358 getValue(I.getOperand(1)).getValueType(),
3359 getValue(I.getOperand(1)));
3362 setValue(&I, result);
3365 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3366 /// limited-precision mode.
3368 SelectionDAGLowering::visitLog2(CallInst &I) {
3370 DebugLoc dl = getCurDebugLoc();
3372 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3373 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3374 SDValue Op = getValue(I.getOperand(1));
3375 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3377 // Get the exponent.
3378 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3380 // Get the significand and build it into a floating-point number with
3382 SDValue X = GetSignificand(DAG, Op1, dl);
3384 // Different possible minimax approximations of significand in
3385 // floating-point for various degrees of accuracy over [1,2].
3386 if (LimitFloatPrecision <= 6) {
3387 // For floating-point precision of 6:
3389 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3391 // error 0.0049451742, which is more than 7 bits
3392 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3393 getF32Constant(DAG, 0xbeb08fe0));
3394 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3395 getF32Constant(DAG, 0x40019463));
3396 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3397 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3398 getF32Constant(DAG, 0x3fd6633d));
3400 result = DAG.getNode(ISD::FADD, dl,
3401 MVT::f32, LogOfExponent, Log2ofMantissa);
3402 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3403 // For floating-point precision of 12:
3409 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3411 // error 0.0000876136000, which is better than 13 bits
3412 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3413 getF32Constant(DAG, 0xbda7262e));
3414 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3415 getF32Constant(DAG, 0x3f25280b));
3416 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3417 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3418 getF32Constant(DAG, 0x4007b923));
3419 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3420 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3421 getF32Constant(DAG, 0x40823e2f));
3422 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3423 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3424 getF32Constant(DAG, 0x4020d29c));
3426 result = DAG.getNode(ISD::FADD, dl,
3427 MVT::f32, LogOfExponent, Log2ofMantissa);
3428 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3429 // For floating-point precision of 18:
3438 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3440 // error 0.0000018516, which is better than 18 bits
3441 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3442 getF32Constant(DAG, 0xbcd2769e));
3443 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3444 getF32Constant(DAG, 0x3e8ce0b9));
3445 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3446 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3447 getF32Constant(DAG, 0x3fa22ae7));
3448 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3449 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3450 getF32Constant(DAG, 0x40525723));
3451 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3452 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3453 getF32Constant(DAG, 0x40aaf200));
3454 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3455 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3456 getF32Constant(DAG, 0x40c39dad));
3457 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3458 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3459 getF32Constant(DAG, 0x4042902c));
3461 result = DAG.getNode(ISD::FADD, dl,
3462 MVT::f32, LogOfExponent, Log2ofMantissa);
3465 // No special expansion.
3466 result = DAG.getNode(ISD::FLOG2, dl,
3467 getValue(I.getOperand(1)).getValueType(),
3468 getValue(I.getOperand(1)));
3471 setValue(&I, result);
3474 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3475 /// limited-precision mode.
3477 SelectionDAGLowering::visitLog10(CallInst &I) {
3479 DebugLoc dl = getCurDebugLoc();
3481 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3482 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3483 SDValue Op = getValue(I.getOperand(1));
3484 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3486 // Scale the exponent by log10(2) [0.30102999f].
3487 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3488 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3489 getF32Constant(DAG, 0x3e9a209a));
3491 // Get the significand and build it into a floating-point number with
3493 SDValue X = GetSignificand(DAG, Op1, dl);
3495 if (LimitFloatPrecision <= 6) {
3496 // For floating-point precision of 6:
3498 // Log10ofMantissa =
3500 // (0.60948995f - 0.10380950f * x) * x;
3502 // error 0.0014886165, which is 6 bits
3503 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3504 getF32Constant(DAG, 0xbdd49a13));
3505 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3506 getF32Constant(DAG, 0x3f1c0789));
3507 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3508 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3509 getF32Constant(DAG, 0x3f011300));
3511 result = DAG.getNode(ISD::FADD, dl,
3512 MVT::f32, LogOfExponent, Log10ofMantissa);
3513 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3514 // For floating-point precision of 12:
3516 // Log10ofMantissa =
3519 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3521 // error 0.00019228036, which is better than 12 bits
3522 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3523 getF32Constant(DAG, 0x3d431f31));
3524 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3525 getF32Constant(DAG, 0x3ea21fb2));
3526 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3527 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3528 getF32Constant(DAG, 0x3f6ae232));
3529 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3530 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3531 getF32Constant(DAG, 0x3f25f7c3));
3533 result = DAG.getNode(ISD::FADD, dl,
3534 MVT::f32, LogOfExponent, Log10ofMantissa);
3535 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3536 // For floating-point precision of 18:
3538 // Log10ofMantissa =
3543 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3545 // error 0.0000037995730, which is better than 18 bits
3546 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3547 getF32Constant(DAG, 0x3c5d51ce));
3548 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3549 getF32Constant(DAG, 0x3e00685a));
3550 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3551 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3552 getF32Constant(DAG, 0x3efb6798));
3553 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3554 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3555 getF32Constant(DAG, 0x3f88d192));
3556 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3557 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3558 getF32Constant(DAG, 0x3fc4316c));
3559 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3560 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3561 getF32Constant(DAG, 0x3f57ce70));
3563 result = DAG.getNode(ISD::FADD, dl,
3564 MVT::f32, LogOfExponent, Log10ofMantissa);
3567 // No special expansion.
3568 result = DAG.getNode(ISD::FLOG10, dl,
3569 getValue(I.getOperand(1)).getValueType(),
3570 getValue(I.getOperand(1)));
3573 setValue(&I, result);
3576 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3577 /// limited-precision mode.
3579 SelectionDAGLowering::visitExp2(CallInst &I) {
3581 DebugLoc dl = getCurDebugLoc();
3583 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3584 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3585 SDValue Op = getValue(I.getOperand(1));
3587 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3589 // FractionalPartOfX = x - (float)IntegerPartOfX;
3590 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3591 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3593 // IntegerPartOfX <<= 23;
3594 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3595 DAG.getConstant(23, TLI.getPointerTy()));
3597 if (LimitFloatPrecision <= 6) {
3598 // For floating-point precision of 6:
3600 // TwoToFractionalPartOfX =
3602 // (0.735607626f + 0.252464424f * x) * x;
3604 // error 0.0144103317, which is 6 bits
3605 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3606 getF32Constant(DAG, 0x3e814304));
3607 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3608 getF32Constant(DAG, 0x3f3c50c8));
3609 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3610 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3611 getF32Constant(DAG, 0x3f7f5e7e));
3612 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3613 SDValue TwoToFractionalPartOfX =
3614 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3616 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3617 MVT::f32, TwoToFractionalPartOfX);
3618 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3619 // For floating-point precision of 12:
3621 // TwoToFractionalPartOfX =
3624 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3626 // error 0.000107046256, which is 13 to 14 bits
3627 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3628 getF32Constant(DAG, 0x3da235e3));
3629 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3630 getF32Constant(DAG, 0x3e65b8f3));
3631 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3632 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3633 getF32Constant(DAG, 0x3f324b07));
3634 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3635 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3636 getF32Constant(DAG, 0x3f7ff8fd));
3637 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3638 SDValue TwoToFractionalPartOfX =
3639 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3641 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3642 MVT::f32, TwoToFractionalPartOfX);
3643 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3644 // For floating-point precision of 18:
3646 // TwoToFractionalPartOfX =
3650 // (0.554906021e-1f +
3651 // (0.961591928e-2f +
3652 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3653 // error 2.47208000*10^(-7), which is better than 18 bits
3654 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3655 getF32Constant(DAG, 0x3924b03e));
3656 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3657 getF32Constant(DAG, 0x3ab24b87));
3658 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3659 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3660 getF32Constant(DAG, 0x3c1d8c17));
3661 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3662 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3663 getF32Constant(DAG, 0x3d634a1d));
3664 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3665 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3666 getF32Constant(DAG, 0x3e75fe14));
3667 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3668 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3669 getF32Constant(DAG, 0x3f317234));
3670 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3671 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3672 getF32Constant(DAG, 0x3f800000));
3673 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3674 SDValue TwoToFractionalPartOfX =
3675 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3677 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3678 MVT::f32, TwoToFractionalPartOfX);
3681 // No special expansion.
3682 result = DAG.getNode(ISD::FEXP2, dl,
3683 getValue(I.getOperand(1)).getValueType(),
3684 getValue(I.getOperand(1)));
3687 setValue(&I, result);
3690 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3691 /// limited-precision mode with x == 10.0f.
3693 SelectionDAGLowering::visitPow(CallInst &I) {
3695 Value *Val = I.getOperand(1);
3696 DebugLoc dl = getCurDebugLoc();
3697 bool IsExp10 = false;
3699 if (getValue(Val).getValueType() == MVT::f32 &&
3700 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3701 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3702 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3703 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3705 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3710 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3711 SDValue Op = getValue(I.getOperand(2));
3713 // Put the exponent in the right bit position for later addition to the
3716 // #define LOG2OF10 3.3219281f
3717 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3718 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3719 getF32Constant(DAG, 0x40549a78));
3720 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3722 // FractionalPartOfX = x - (float)IntegerPartOfX;
3723 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3724 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3726 // IntegerPartOfX <<= 23;
3727 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3728 DAG.getConstant(23, TLI.getPointerTy()));
3730 if (LimitFloatPrecision <= 6) {
3731 // For floating-point precision of 6:
3733 // twoToFractionalPartOfX =
3735 // (0.735607626f + 0.252464424f * x) * x;
3737 // error 0.0144103317, which is 6 bits
3738 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3739 getF32Constant(DAG, 0x3e814304));
3740 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3741 getF32Constant(DAG, 0x3f3c50c8));
3742 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3743 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3744 getF32Constant(DAG, 0x3f7f5e7e));
3745 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3746 SDValue TwoToFractionalPartOfX =
3747 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3749 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3750 MVT::f32, TwoToFractionalPartOfX);
3751 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3752 // For floating-point precision of 12:
3754 // TwoToFractionalPartOfX =
3757 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3759 // error 0.000107046256, which is 13 to 14 bits
3760 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3761 getF32Constant(DAG, 0x3da235e3));
3762 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3763 getF32Constant(DAG, 0x3e65b8f3));
3764 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3765 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3766 getF32Constant(DAG, 0x3f324b07));
3767 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3768 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3769 getF32Constant(DAG, 0x3f7ff8fd));
3770 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3771 SDValue TwoToFractionalPartOfX =
3772 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3774 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3775 MVT::f32, TwoToFractionalPartOfX);
3776 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3777 // For floating-point precision of 18:
3779 // TwoToFractionalPartOfX =
3783 // (0.554906021e-1f +
3784 // (0.961591928e-2f +
3785 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3786 // error 2.47208000*10^(-7), which is better than 18 bits
3787 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3788 getF32Constant(DAG, 0x3924b03e));
3789 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3790 getF32Constant(DAG, 0x3ab24b87));
3791 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3792 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3793 getF32Constant(DAG, 0x3c1d8c17));
3794 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3795 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3796 getF32Constant(DAG, 0x3d634a1d));
3797 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3798 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3799 getF32Constant(DAG, 0x3e75fe14));
3800 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3801 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3802 getF32Constant(DAG, 0x3f317234));
3803 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3804 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3805 getF32Constant(DAG, 0x3f800000));
3806 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3807 SDValue TwoToFractionalPartOfX =
3808 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3810 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3811 MVT::f32, TwoToFractionalPartOfX);
3814 // No special expansion.
3815 result = DAG.getNode(ISD::FPOW, dl,
3816 getValue(I.getOperand(1)).getValueType(),
3817 getValue(I.getOperand(1)),
3818 getValue(I.getOperand(2)));
3821 setValue(&I, result);
3824 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3825 /// we want to emit this as a call to a named external function, return the name
3826 /// otherwise lower it and return null.
3828 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3829 DebugLoc dl = getCurDebugLoc();
3830 switch (Intrinsic) {
3832 // By default, turn this into a target intrinsic node.
3833 visitTargetIntrinsic(I, Intrinsic);
3835 case Intrinsic::vastart: visitVAStart(I); return 0;
3836 case Intrinsic::vaend: visitVAEnd(I); return 0;
3837 case Intrinsic::vacopy: visitVACopy(I); return 0;
3838 case Intrinsic::returnaddress:
3839 setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3840 getValue(I.getOperand(1))));
3842 case Intrinsic::frameaddress:
3843 setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3844 getValue(I.getOperand(1))));
3846 case Intrinsic::setjmp:
3847 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3849 case Intrinsic::longjmp:
3850 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3852 case Intrinsic::memcpy: {
3853 SDValue Op1 = getValue(I.getOperand(1));
3854 SDValue Op2 = getValue(I.getOperand(2));
3855 SDValue Op3 = getValue(I.getOperand(3));
3856 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3857 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3858 I.getOperand(1), 0, I.getOperand(2), 0));
3861 case Intrinsic::memset: {
3862 SDValue Op1 = getValue(I.getOperand(1));
3863 SDValue Op2 = getValue(I.getOperand(2));
3864 SDValue Op3 = getValue(I.getOperand(3));
3865 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3866 DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3867 I.getOperand(1), 0));
3870 case Intrinsic::memmove: {
3871 SDValue Op1 = getValue(I.getOperand(1));
3872 SDValue Op2 = getValue(I.getOperand(2));
3873 SDValue Op3 = getValue(I.getOperand(3));
3874 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3876 // If the source and destination are known to not be aliases, we can
3877 // lower memmove as memcpy.
3878 uint64_t Size = -1ULL;
3879 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3880 Size = C->getZExtValue();
3881 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3882 AliasAnalysis::NoAlias) {
3883 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3884 I.getOperand(1), 0, I.getOperand(2), 0));
3888 DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3889 I.getOperand(1), 0, I.getOperand(2), 0));
3892 case Intrinsic::dbg_stoppoint: {
3893 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3894 if (DIDescriptor::ValidDebugInfo(SPI.getContext(), OptLevel)) {
3895 MachineFunction &MF = DAG.getMachineFunction();
3896 DICompileUnit CU(cast<GlobalVariable>(SPI.getContext()));
3897 DebugLoc Loc = DebugLoc::get(MF.getOrCreateDebugLocID(CU.getGV(),
3898 SPI.getLine(), SPI.getColumn()));
3899 setCurDebugLoc(Loc);
3901 if (OptLevel == CodeGenOpt::None)
3902 DAG.setRoot(DAG.getDbgStopPoint(Loc, getRoot(),
3909 case Intrinsic::dbg_region_start: {
3910 DwarfWriter *DW = DAG.getDwarfWriter();
3911 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3913 if (DIDescriptor::ValidDebugInfo(RSI.getContext(), OptLevel) &&
3914 DW && DW->ShouldEmitDwarfDebug()) {
3916 DW->RecordRegionStart(cast<GlobalVariable>(RSI.getContext()));
3917 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3918 getRoot(), LabelID));
3923 case Intrinsic::dbg_region_end: {
3924 DwarfWriter *DW = DAG.getDwarfWriter();
3925 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3927 if (DIDescriptor::ValidDebugInfo(REI.getContext(), OptLevel) &&
3928 DW && DW->ShouldEmitDwarfDebug()) {
3929 MachineFunction &MF = DAG.getMachineFunction();
3930 DISubprogram Subprogram(cast<GlobalVariable>(REI.getContext()));
3932 Subprogram.getLinkageName(SPName);
3934 && strcmp(SPName.c_str(), MF.getFunction()->getNameStart())) {
3935 // This is end of inlined function. Debugging information for
3936 // inlined function is not handled yet (only supported by FastISel).
3937 if (OptLevel == CodeGenOpt::None) {
3938 unsigned ID = DW->RecordInlinedFnEnd(Subprogram);
3940 // Returned ID is 0 if this is unbalanced "end of inlined
3941 // scope". This could happen if optimizer eats dbg intrinsics
3942 // or "beginning of inlined scope" is not recoginized due to
3943 // missing location info. In such cases, do ignore this region.end.
3944 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3951 DW->RecordRegionEnd(cast<GlobalVariable>(REI.getContext()));
3952 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3953 getRoot(), LabelID));
3958 case Intrinsic::dbg_func_start: {
3959 DwarfWriter *DW = DAG.getDwarfWriter();
3960 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3961 Value *SP = FSI.getSubprogram();
3962 if (!DIDescriptor::ValidDebugInfo(SP, OptLevel))
3965 MachineFunction &MF = DAG.getMachineFunction();
3966 if (OptLevel == CodeGenOpt::None) {
3967 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is what
3968 // (most?) gdb expects.
3969 DebugLoc PrevLoc = CurDebugLoc;
3970 DISubprogram Subprogram(cast<GlobalVariable>(SP));
3971 DICompileUnit CompileUnit = Subprogram.getCompileUnit();
3973 if (!Subprogram.describes(MF.getFunction())) {
3974 // This is a beginning of an inlined function.
3976 // If llvm.dbg.func.start is seen in a new block before any
3977 // llvm.dbg.stoppoint intrinsic then the location info is unknown.
3978 // FIXME : Why DebugLoc is reset at the beginning of each block ?
3979 if (PrevLoc.isUnknown())
3982 // Record the source line.
3983 unsigned Line = Subprogram.getLineNumber();
3984 setCurDebugLoc(DebugLoc::get(
3985 MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
3987 if (DW && DW->ShouldEmitDwarfDebug()) {
3988 DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
3989 unsigned LabelID = DW->RecordInlinedFnStart(Subprogram,
3990 DICompileUnit(PrevLocTpl.CompileUnit),
3993 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3994 getRoot(), LabelID));
3997 // Record the source line.
3998 unsigned Line = Subprogram.getLineNumber();
3999 MF.setDefaultDebugLoc(DebugLoc::get(
4000 MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
4001 if (DW && DW->ShouldEmitDwarfDebug()) {
4002 // llvm.dbg.func_start also defines beginning of function scope.
4003 DW->RecordRegionStart(cast<GlobalVariable>(FSI.getSubprogram()));
4007 DISubprogram Subprogram(cast<GlobalVariable>(SP));
4010 Subprogram.getLinkageName(SPName);
4012 && strcmp(SPName.c_str(), MF.getFunction()->getNameStart())) {
4013 // This is beginning of inlined function. Debugging information for
4014 // inlined function is not handled yet (only supported by FastISel).
4018 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is
4019 // what (most?) gdb expects.
4020 DICompileUnit CompileUnit = Subprogram.getCompileUnit();
4022 // Record the source line but does not create a label for the normal
4023 // function start. It will be emitted at asm emission time. However,
4024 // create a label if this is a beginning of inlined function.
4025 unsigned Line = Subprogram.getLineNumber();
4026 setCurDebugLoc(DebugLoc::get(
4027 MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
4028 // FIXME - Start new region because llvm.dbg.func_start also defines
4029 // beginning of function scope.
4034 case Intrinsic::dbg_declare: {
4035 if (OptLevel == CodeGenOpt::None) {
4036 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4037 Value *Variable = DI.getVariable();
4038 if (DIDescriptor::ValidDebugInfo(Variable, OptLevel))
4039 DAG.setRoot(DAG.getNode(ISD::DECLARE, dl, MVT::Other, getRoot(),
4040 getValue(DI.getAddress()), getValue(Variable)));
4042 // FIXME: Do something sensible here when we support debug declare.
4046 case Intrinsic::eh_exception: {
4047 if (!CurMBB->isLandingPad()) {
4048 // FIXME: Mark exception register as live in. Hack for PR1508.
4049 unsigned Reg = TLI.getExceptionAddressRegister();
4050 if (Reg) CurMBB->addLiveIn(Reg);
4052 // Insert the EXCEPTIONADDR instruction.
4053 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
4055 Ops[0] = DAG.getRoot();
4056 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
4058 DAG.setRoot(Op.getValue(1));
4062 case Intrinsic::eh_selector_i32:
4063 case Intrinsic::eh_selector_i64: {
4064 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4065 MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
4066 MVT::i32 : MVT::i64);
4069 if (CurMBB->isLandingPad())
4070 AddCatchInfo(I, MMI, CurMBB);
4073 FuncInfo.CatchInfoLost.insert(&I);
4075 // FIXME: Mark exception selector register as live in. Hack for PR1508.
4076 unsigned Reg = TLI.getExceptionSelectorRegister();
4077 if (Reg) CurMBB->addLiveIn(Reg);
4080 // Insert the EHSELECTION instruction.
4081 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
4083 Ops[0] = getValue(I.getOperand(1));
4085 SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4087 DAG.setRoot(Op.getValue(1));
4089 setValue(&I, DAG.getConstant(0, VT));
4095 case Intrinsic::eh_typeid_for_i32:
4096 case Intrinsic::eh_typeid_for_i64: {
4097 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4098 MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
4099 MVT::i32 : MVT::i64);
4102 // Find the type id for the given typeinfo.
4103 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4105 unsigned TypeID = MMI->getTypeIDFor(GV);
4106 setValue(&I, DAG.getConstant(TypeID, VT));
4108 // Return something different to eh_selector.
4109 setValue(&I, DAG.getConstant(1, VT));
4115 case Intrinsic::eh_return_i32:
4116 case Intrinsic::eh_return_i64:
4117 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4118 MMI->setCallsEHReturn(true);
4119 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
4122 getValue(I.getOperand(1)),
4123 getValue(I.getOperand(2))));
4125 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4129 case Intrinsic::eh_unwind_init:
4130 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4131 MMI->setCallsUnwindInit(true);
4136 case Intrinsic::eh_dwarf_cfa: {
4137 MVT VT = getValue(I.getOperand(1)).getValueType();
4139 if (VT.bitsGT(TLI.getPointerTy()))
4140 CfaArg = DAG.getNode(ISD::TRUNCATE, dl,
4141 TLI.getPointerTy(), getValue(I.getOperand(1)));
4143 CfaArg = DAG.getNode(ISD::SIGN_EXTEND, dl,
4144 TLI.getPointerTy(), getValue(I.getOperand(1)));
4146 SDValue Offset = DAG.getNode(ISD::ADD, dl,
4148 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4149 TLI.getPointerTy()),
4151 setValue(&I, DAG.getNode(ISD::ADD, dl,
4153 DAG.getNode(ISD::FRAMEADDR, dl,
4156 TLI.getPointerTy())),
4161 case Intrinsic::convertff:
4162 case Intrinsic::convertfsi:
4163 case Intrinsic::convertfui:
4164 case Intrinsic::convertsif:
4165 case Intrinsic::convertuif:
4166 case Intrinsic::convertss:
4167 case Intrinsic::convertsu:
4168 case Intrinsic::convertus:
4169 case Intrinsic::convertuu: {
4170 ISD::CvtCode Code = ISD::CVT_INVALID;
4171 switch (Intrinsic) {
4172 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
4173 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4174 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4175 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4176 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4177 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
4178 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
4179 case Intrinsic::convertus: Code = ISD::CVT_US; break;
4180 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
4182 MVT DestVT = TLI.getValueType(I.getType());
4183 Value* Op1 = I.getOperand(1);
4184 setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4185 DAG.getValueType(DestVT),
4186 DAG.getValueType(getValue(Op1).getValueType()),
4187 getValue(I.getOperand(2)),
4188 getValue(I.getOperand(3)),
4193 case Intrinsic::sqrt:
4194 setValue(&I, DAG.getNode(ISD::FSQRT, dl,
4195 getValue(I.getOperand(1)).getValueType(),
4196 getValue(I.getOperand(1))));
4198 case Intrinsic::powi:
4199 setValue(&I, DAG.getNode(ISD::FPOWI, dl,
4200 getValue(I.getOperand(1)).getValueType(),
4201 getValue(I.getOperand(1)),
4202 getValue(I.getOperand(2))));
4204 case Intrinsic::sin:
4205 setValue(&I, DAG.getNode(ISD::FSIN, dl,
4206 getValue(I.getOperand(1)).getValueType(),
4207 getValue(I.getOperand(1))));
4209 case Intrinsic::cos:
4210 setValue(&I, DAG.getNode(ISD::FCOS, dl,
4211 getValue(I.getOperand(1)).getValueType(),
4212 getValue(I.getOperand(1))));
4214 case Intrinsic::log:
4217 case Intrinsic::log2:
4220 case Intrinsic::log10:
4223 case Intrinsic::exp:
4226 case Intrinsic::exp2:
4229 case Intrinsic::pow:
4232 case Intrinsic::pcmarker: {
4233 SDValue Tmp = getValue(I.getOperand(1));
4234 DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4237 case Intrinsic::readcyclecounter: {
4238 SDValue Op = getRoot();
4239 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4240 DAG.getVTList(MVT::i64, MVT::Other),
4243 DAG.setRoot(Tmp.getValue(1));
4246 case Intrinsic::part_select: {
4247 // Currently not implemented: just abort
4248 assert(0 && "part_select intrinsic not implemented");
4251 case Intrinsic::part_set: {
4252 // Currently not implemented: just abort
4253 assert(0 && "part_set intrinsic not implemented");
4256 case Intrinsic::bswap:
4257 setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4258 getValue(I.getOperand(1)).getValueType(),
4259 getValue(I.getOperand(1))));
4261 case Intrinsic::cttz: {
4262 SDValue Arg = getValue(I.getOperand(1));
4263 MVT Ty = Arg.getValueType();
4264 SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4265 setValue(&I, result);
4268 case Intrinsic::ctlz: {
4269 SDValue Arg = getValue(I.getOperand(1));
4270 MVT Ty = Arg.getValueType();
4271 SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4272 setValue(&I, result);
4275 case Intrinsic::ctpop: {
4276 SDValue Arg = getValue(I.getOperand(1));
4277 MVT Ty = Arg.getValueType();
4278 SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4279 setValue(&I, result);
4282 case Intrinsic::stacksave: {
4283 SDValue Op = getRoot();
4284 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4285 DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4287 DAG.setRoot(Tmp.getValue(1));
4290 case Intrinsic::stackrestore: {
4291 SDValue Tmp = getValue(I.getOperand(1));
4292 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4295 case Intrinsic::stackprotector: {
4296 // Emit code into the DAG to store the stack guard onto the stack.
4297 MachineFunction &MF = DAG.getMachineFunction();
4298 MachineFrameInfo *MFI = MF.getFrameInfo();
4299 MVT PtrTy = TLI.getPointerTy();
4301 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4302 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4304 int FI = FuncInfo.StaticAllocaMap[Slot];
4305 MFI->setStackProtectorIndex(FI);
4307 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4309 // Store the stack protector onto the stack.
4310 SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4311 PseudoSourceValue::getFixedStack(FI),
4313 setValue(&I, Result);
4314 DAG.setRoot(Result);
4317 case Intrinsic::var_annotation:
4318 // Discard annotate attributes
4321 case Intrinsic::init_trampoline: {
4322 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4326 Ops[1] = getValue(I.getOperand(1));
4327 Ops[2] = getValue(I.getOperand(2));
4328 Ops[3] = getValue(I.getOperand(3));
4329 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4330 Ops[5] = DAG.getSrcValue(F);
4332 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4333 DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4337 DAG.setRoot(Tmp.getValue(1));
4341 case Intrinsic::gcroot:
4343 Value *Alloca = I.getOperand(1);
4344 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4346 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4347 GFI->addStackRoot(FI->getIndex(), TypeMap);
4351 case Intrinsic::gcread:
4352 case Intrinsic::gcwrite:
4353 assert(0 && "GC failed to lower gcread/gcwrite intrinsics!");
4356 case Intrinsic::flt_rounds: {
4357 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4361 case Intrinsic::trap: {
4362 DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4366 case Intrinsic::uadd_with_overflow:
4367 return implVisitAluOverflow(I, ISD::UADDO);
4368 case Intrinsic::sadd_with_overflow:
4369 return implVisitAluOverflow(I, ISD::SADDO);
4370 case Intrinsic::usub_with_overflow:
4371 return implVisitAluOverflow(I, ISD::USUBO);
4372 case Intrinsic::ssub_with_overflow:
4373 return implVisitAluOverflow(I, ISD::SSUBO);
4374 case Intrinsic::umul_with_overflow:
4375 return implVisitAluOverflow(I, ISD::UMULO);
4376 case Intrinsic::smul_with_overflow:
4377 return implVisitAluOverflow(I, ISD::SMULO);
4379 case Intrinsic::prefetch: {
4382 Ops[1] = getValue(I.getOperand(1));
4383 Ops[2] = getValue(I.getOperand(2));
4384 Ops[3] = getValue(I.getOperand(3));
4385 DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4389 case Intrinsic::memory_barrier: {
4392 for (int x = 1; x < 6; ++x)
4393 Ops[x] = getValue(I.getOperand(x));
4395 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4398 case Intrinsic::atomic_cmp_swap: {
4399 SDValue Root = getRoot();
4401 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4402 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4404 getValue(I.getOperand(1)),
4405 getValue(I.getOperand(2)),
4406 getValue(I.getOperand(3)),
4409 DAG.setRoot(L.getValue(1));
4412 case Intrinsic::atomic_load_add:
4413 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4414 case Intrinsic::atomic_load_sub:
4415 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4416 case Intrinsic::atomic_load_or:
4417 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4418 case Intrinsic::atomic_load_xor:
4419 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4420 case Intrinsic::atomic_load_and:
4421 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4422 case Intrinsic::atomic_load_nand:
4423 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4424 case Intrinsic::atomic_load_max:
4425 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4426 case Intrinsic::atomic_load_min:
4427 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4428 case Intrinsic::atomic_load_umin:
4429 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4430 case Intrinsic::atomic_load_umax:
4431 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4432 case Intrinsic::atomic_swap:
4433 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4438 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4440 MachineBasicBlock *LandingPad) {
4441 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4442 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4443 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4444 unsigned BeginLabel = 0, EndLabel = 0;
4446 TargetLowering::ArgListTy Args;
4447 TargetLowering::ArgListEntry Entry;
4448 Args.reserve(CS.arg_size());
4449 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4451 SDValue ArgNode = getValue(*i);
4452 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4454 unsigned attrInd = i - CS.arg_begin() + 1;
4455 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4456 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4457 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4458 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4459 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4460 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4461 Entry.Alignment = CS.getParamAlignment(attrInd);
4462 Args.push_back(Entry);
4465 if (LandingPad && MMI) {
4466 // Insert a label before the invoke call to mark the try range. This can be
4467 // used to detect deletion of the invoke via the MachineModuleInfo.
4468 BeginLabel = MMI->NextLabelID();
4469 // Both PendingLoads and PendingExports must be flushed here;
4470 // this call might not return.
4472 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4473 getControlRoot(), BeginLabel));
4476 std::pair<SDValue,SDValue> Result =
4477 TLI.LowerCallTo(getRoot(), CS.getType(),
4478 CS.paramHasAttr(0, Attribute::SExt),
4479 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4480 CS.paramHasAttr(0, Attribute::InReg),
4481 CS.getCallingConv(),
4482 IsTailCall && PerformTailCallOpt,
4483 Callee, Args, DAG, getCurDebugLoc());
4484 if (CS.getType() != Type::VoidTy)
4485 setValue(CS.getInstruction(), Result.first);
4486 DAG.setRoot(Result.second);
4488 if (LandingPad && MMI) {
4489 // Insert a label at the end of the invoke call to mark the try range. This
4490 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4491 EndLabel = MMI->NextLabelID();
4492 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4493 getRoot(), EndLabel));
4495 // Inform MachineModuleInfo of range.
4496 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4501 void SelectionDAGLowering::visitCall(CallInst &I) {
4502 const char *RenameFn = 0;
4503 if (Function *F = I.getCalledFunction()) {
4504 if (F->isDeclaration()) {
4505 const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4507 if (unsigned IID = II->getIntrinsicID(F)) {
4508 RenameFn = visitIntrinsicCall(I, IID);
4513 if (unsigned IID = F->getIntrinsicID()) {
4514 RenameFn = visitIntrinsicCall(I, IID);
4520 // Check for well-known libc/libm calls. If the function is internal, it
4521 // can't be a library call.
4522 unsigned NameLen = F->getNameLen();
4523 if (!F->hasLocalLinkage() && NameLen) {
4524 const char *NameStr = F->getNameStart();
4525 if (NameStr[0] == 'c' &&
4526 ((NameLen == 8 && !strcmp(NameStr, "copysign")) ||
4527 (NameLen == 9 && !strcmp(NameStr, "copysignf")))) {
4528 if (I.getNumOperands() == 3 && // Basic sanity checks.
4529 I.getOperand(1)->getType()->isFloatingPoint() &&
4530 I.getType() == I.getOperand(1)->getType() &&
4531 I.getType() == I.getOperand(2)->getType()) {
4532 SDValue LHS = getValue(I.getOperand(1));
4533 SDValue RHS = getValue(I.getOperand(2));
4534 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4535 LHS.getValueType(), LHS, RHS));
4538 } else if (NameStr[0] == 'f' &&
4539 ((NameLen == 4 && !strcmp(NameStr, "fabs")) ||
4540 (NameLen == 5 && !strcmp(NameStr, "fabsf")) ||
4541 (NameLen == 5 && !strcmp(NameStr, "fabsl")))) {
4542 if (I.getNumOperands() == 2 && // Basic sanity checks.
4543 I.getOperand(1)->getType()->isFloatingPoint() &&
4544 I.getType() == I.getOperand(1)->getType()) {
4545 SDValue Tmp = getValue(I.getOperand(1));
4546 setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4547 Tmp.getValueType(), Tmp));
4550 } else if (NameStr[0] == 's' &&
4551 ((NameLen == 3 && !strcmp(NameStr, "sin")) ||
4552 (NameLen == 4 && !strcmp(NameStr, "sinf")) ||
4553 (NameLen == 4 && !strcmp(NameStr, "sinl")))) {
4554 if (I.getNumOperands() == 2 && // Basic sanity checks.
4555 I.getOperand(1)->getType()->isFloatingPoint() &&
4556 I.getType() == I.getOperand(1)->getType()) {
4557 SDValue Tmp = getValue(I.getOperand(1));
4558 setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4559 Tmp.getValueType(), Tmp));
4562 } else if (NameStr[0] == 'c' &&
4563 ((NameLen == 3 && !strcmp(NameStr, "cos")) ||
4564 (NameLen == 4 && !strcmp(NameStr, "cosf")) ||
4565 (NameLen == 4 && !strcmp(NameStr, "cosl")))) {
4566 if (I.getNumOperands() == 2 && // Basic sanity checks.
4567 I.getOperand(1)->getType()->isFloatingPoint() &&
4568 I.getType() == I.getOperand(1)->getType()) {
4569 SDValue Tmp = getValue(I.getOperand(1));
4570 setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4571 Tmp.getValueType(), Tmp));
4576 } else if (isa<InlineAsm>(I.getOperand(0))) {
4583 Callee = getValue(I.getOperand(0));
4585 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4587 LowerCallTo(&I, Callee, I.isTailCall());
4591 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4592 /// this value and returns the result as a ValueVT value. This uses
4593 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4594 /// If the Flag pointer is NULL, no flag is used.
4595 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4597 SDValue *Flag) const {
4598 // Assemble the legal parts into the final values.
4599 SmallVector<SDValue, 4> Values(ValueVTs.size());
4600 SmallVector<SDValue, 8> Parts;
4601 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4602 // Copy the legal parts from the registers.
4603 MVT ValueVT = ValueVTs[Value];
4604 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
4605 MVT RegisterVT = RegVTs[Value];
4607 Parts.resize(NumRegs);
4608 for (unsigned i = 0; i != NumRegs; ++i) {
4611 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4613 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4614 *Flag = P.getValue(2);
4616 Chain = P.getValue(1);
4618 // If the source register was virtual and if we know something about it,
4619 // add an assert node.
4620 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4621 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4622 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4623 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4624 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4625 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4627 unsigned RegSize = RegisterVT.getSizeInBits();
4628 unsigned NumSignBits = LOI.NumSignBits;
4629 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4631 // FIXME: We capture more information than the dag can represent. For
4632 // now, just use the tightest assertzext/assertsext possible.
4634 MVT FromVT(MVT::Other);
4635 if (NumSignBits == RegSize)
4636 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4637 else if (NumZeroBits >= RegSize-1)
4638 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4639 else if (NumSignBits > RegSize-8)
4640 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4641 else if (NumZeroBits >= RegSize-8)
4642 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4643 else if (NumSignBits > RegSize-16)
4644 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4645 else if (NumZeroBits >= RegSize-16)
4646 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4647 else if (NumSignBits > RegSize-32)
4648 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4649 else if (NumZeroBits >= RegSize-32)
4650 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4652 if (FromVT != MVT::Other) {
4653 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4654 RegisterVT, P, DAG.getValueType(FromVT));
4663 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4664 NumRegs, RegisterVT, ValueVT);
4669 return DAG.getNode(ISD::MERGE_VALUES, dl,
4670 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4671 &Values[0], ValueVTs.size());
4674 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4675 /// specified value into the registers specified by this object. This uses
4676 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4677 /// If the Flag pointer is NULL, no flag is used.
4678 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4679 SDValue &Chain, SDValue *Flag) const {
4680 // Get the list of the values's legal parts.
4681 unsigned NumRegs = Regs.size();
4682 SmallVector<SDValue, 8> Parts(NumRegs);
4683 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4684 MVT ValueVT = ValueVTs[Value];
4685 unsigned NumParts = TLI->getNumRegisters(ValueVT);
4686 MVT RegisterVT = RegVTs[Value];
4688 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4689 &Parts[Part], NumParts, RegisterVT);
4693 // Copy the parts into the registers.
4694 SmallVector<SDValue, 8> Chains(NumRegs);
4695 for (unsigned i = 0; i != NumRegs; ++i) {
4698 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4700 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4701 *Flag = Part.getValue(1);
4703 Chains[i] = Part.getValue(0);
4706 if (NumRegs == 1 || Flag)
4707 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4708 // flagged to it. That is the CopyToReg nodes and the user are considered
4709 // a single scheduling unit. If we create a TokenFactor and return it as
4710 // chain, then the TokenFactor is both a predecessor (operand) of the
4711 // user as well as a successor (the TF operands are flagged to the user).
4712 // c1, f1 = CopyToReg
4713 // c2, f2 = CopyToReg
4714 // c3 = TokenFactor c1, c2
4717 Chain = Chains[NumRegs-1];
4719 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4722 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4723 /// operand list. This adds the code marker and includes the number of
4724 /// values added into it.
4725 void RegsForValue::AddInlineAsmOperands(unsigned Code,
4726 bool HasMatching,unsigned MatchingIdx,
4728 std::vector<SDValue> &Ops) const {
4729 MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4730 assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4731 unsigned Flag = Code | (Regs.size() << 3);
4733 Flag |= 0x80000000 | (MatchingIdx << 16);
4734 Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4735 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4736 unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
4737 MVT RegisterVT = RegVTs[Value];
4738 for (unsigned i = 0; i != NumRegs; ++i) {
4739 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4740 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4745 /// isAllocatableRegister - If the specified register is safe to allocate,
4746 /// i.e. it isn't a stack pointer or some other special register, return the
4747 /// register class for the register. Otherwise, return null.
4748 static const TargetRegisterClass *
4749 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4750 const TargetLowering &TLI,
4751 const TargetRegisterInfo *TRI) {
4752 MVT FoundVT = MVT::Other;
4753 const TargetRegisterClass *FoundRC = 0;
4754 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4755 E = TRI->regclass_end(); RCI != E; ++RCI) {
4756 MVT ThisVT = MVT::Other;
4758 const TargetRegisterClass *RC = *RCI;
4759 // If none of the the value types for this register class are valid, we
4760 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4761 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4763 if (TLI.isTypeLegal(*I)) {
4764 // If we have already found this register in a different register class,
4765 // choose the one with the largest VT specified. For example, on
4766 // PowerPC, we favor f64 register classes over f32.
4767 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4774 if (ThisVT == MVT::Other) continue;
4776 // NOTE: This isn't ideal. In particular, this might allocate the
4777 // frame pointer in functions that need it (due to them not being taken
4778 // out of allocation, because a variable sized allocation hasn't been seen
4779 // yet). This is a slight code pessimization, but should still work.
4780 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4781 E = RC->allocation_order_end(MF); I != E; ++I)
4783 // We found a matching register class. Keep looking at others in case
4784 // we find one with larger registers that this physreg is also in.
4795 /// AsmOperandInfo - This contains information for each constraint that we are
4797 class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4798 public TargetLowering::AsmOperandInfo {
4800 /// CallOperand - If this is the result output operand or a clobber
4801 /// this is null, otherwise it is the incoming operand to the CallInst.
4802 /// This gets modified as the asm is processed.
4803 SDValue CallOperand;
4805 /// AssignedRegs - If this is a register or register class operand, this
4806 /// contains the set of register corresponding to the operand.
4807 RegsForValue AssignedRegs;
4809 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4810 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4813 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4814 /// busy in OutputRegs/InputRegs.
4815 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4816 std::set<unsigned> &OutputRegs,
4817 std::set<unsigned> &InputRegs,
4818 const TargetRegisterInfo &TRI) const {
4820 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4821 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4824 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4825 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4829 /// getCallOperandValMVT - Return the MVT of the Value* that this operand
4830 /// corresponds to. If there is no Value* for this operand, it returns
4832 MVT getCallOperandValMVT(const TargetLowering &TLI,
4833 const TargetData *TD) const {
4834 if (CallOperandVal == 0) return MVT::Other;
4836 if (isa<BasicBlock>(CallOperandVal))
4837 return TLI.getPointerTy();
4839 const llvm::Type *OpTy = CallOperandVal->getType();
4841 // If this is an indirect operand, the operand is a pointer to the
4844 OpTy = cast<PointerType>(OpTy)->getElementType();
4846 // If OpTy is not a single value, it may be a struct/union that we
4847 // can tile with integers.
4848 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4849 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4858 OpTy = IntegerType::get(BitSize);
4863 return TLI.getValueType(OpTy, true);
4867 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4869 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4870 const TargetRegisterInfo &TRI) {
4871 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4873 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4874 for (; *Aliases; ++Aliases)
4875 Regs.insert(*Aliases);
4878 } // end llvm namespace.
4881 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4882 /// specified operand. We prefer to assign virtual registers, to allow the
4883 /// register allocator handle the assignment process. However, if the asm uses
4884 /// features that we can't model on machineinstrs, we have SDISel do the
4885 /// allocation. This produces generally horrible, but correct, code.
4887 /// OpInfo describes the operand.
4888 /// Input and OutputRegs are the set of already allocated physical registers.
4890 void SelectionDAGLowering::
4891 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4892 std::set<unsigned> &OutputRegs,
4893 std::set<unsigned> &InputRegs) {
4894 // Compute whether this value requires an input register, an output register,
4896 bool isOutReg = false;
4897 bool isInReg = false;
4898 switch (OpInfo.Type) {
4899 case InlineAsm::isOutput:
4902 // If there is an input constraint that matches this, we need to reserve
4903 // the input register so no other inputs allocate to it.
4904 isInReg = OpInfo.hasMatchingInput();
4906 case InlineAsm::isInput:
4910 case InlineAsm::isClobber:
4917 MachineFunction &MF = DAG.getMachineFunction();
4918 SmallVector<unsigned, 4> Regs;
4920 // If this is a constraint for a single physreg, or a constraint for a
4921 // register class, find it.
4922 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4923 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4924 OpInfo.ConstraintVT);
4926 unsigned NumRegs = 1;
4927 if (OpInfo.ConstraintVT != MVT::Other) {
4928 // If this is a FP input in an integer register (or visa versa) insert a bit
4929 // cast of the input value. More generally, handle any case where the input
4930 // value disagrees with the register class we plan to stick this in.
4931 if (OpInfo.Type == InlineAsm::isInput &&
4932 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4933 // Try to convert to the first MVT that the reg class contains. If the
4934 // types are identical size, use a bitcast to convert (e.g. two differing
4936 MVT RegVT = *PhysReg.second->vt_begin();
4937 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4938 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4939 RegVT, OpInfo.CallOperand);
4940 OpInfo.ConstraintVT = RegVT;
4941 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4942 // If the input is a FP value and we want it in FP registers, do a
4943 // bitcast to the corresponding integer type. This turns an f64 value
4944 // into i64, which can be passed with two i32 values on a 32-bit
4946 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
4947 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4948 RegVT, OpInfo.CallOperand);
4949 OpInfo.ConstraintVT = RegVT;
4953 NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT);
4957 MVT ValueVT = OpInfo.ConstraintVT;
4959 // If this is a constraint for a specific physical register, like {r17},
4961 if (unsigned AssignedReg = PhysReg.first) {
4962 const TargetRegisterClass *RC = PhysReg.second;
4963 if (OpInfo.ConstraintVT == MVT::Other)
4964 ValueVT = *RC->vt_begin();
4966 // Get the actual register value type. This is important, because the user
4967 // may have asked for (e.g.) the AX register in i32 type. We need to
4968 // remember that AX is actually i16 to get the right extension.
4969 RegVT = *RC->vt_begin();
4971 // This is a explicit reference to a physical register.
4972 Regs.push_back(AssignedReg);
4974 // If this is an expanded reference, add the rest of the regs to Regs.
4976 TargetRegisterClass::iterator I = RC->begin();
4977 for (; *I != AssignedReg; ++I)
4978 assert(I != RC->end() && "Didn't find reg!");
4980 // Already added the first reg.
4982 for (; NumRegs; --NumRegs, ++I) {
4983 assert(I != RC->end() && "Ran out of registers to allocate!");
4987 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4988 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4989 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4993 // Otherwise, if this was a reference to an LLVM register class, create vregs
4994 // for this reference.
4995 if (const TargetRegisterClass *RC = PhysReg.second) {
4996 RegVT = *RC->vt_begin();
4997 if (OpInfo.ConstraintVT == MVT::Other)
5000 // Create the appropriate number of virtual registers.
5001 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5002 for (; NumRegs; --NumRegs)
5003 Regs.push_back(RegInfo.createVirtualRegister(RC));
5005 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5009 // This is a reference to a register class that doesn't directly correspond
5010 // to an LLVM register class. Allocate NumRegs consecutive, available,
5011 // registers from the class.
5012 std::vector<unsigned> RegClassRegs
5013 = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
5014 OpInfo.ConstraintVT);
5016 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5017 unsigned NumAllocated = 0;
5018 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
5019 unsigned Reg = RegClassRegs[i];
5020 // See if this register is available.
5021 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
5022 (isInReg && InputRegs.count(Reg))) { // Already used.
5023 // Make sure we find consecutive registers.
5028 // Check to see if this register is allocatable (i.e. don't give out the
5030 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
5031 if (!RC) { // Couldn't allocate this register.
5032 // Reset NumAllocated to make sure we return consecutive registers.
5037 // Okay, this register is good, we can use it.
5040 // If we allocated enough consecutive registers, succeed.
5041 if (NumAllocated == NumRegs) {
5042 unsigned RegStart = (i-NumAllocated)+1;
5043 unsigned RegEnd = i+1;
5044 // Mark all of the allocated registers used.
5045 for (unsigned i = RegStart; i != RegEnd; ++i)
5046 Regs.push_back(RegClassRegs[i]);
5048 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5049 OpInfo.ConstraintVT);
5050 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5055 // Otherwise, we couldn't allocate enough registers for this.
5058 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5059 /// processed uses a memory 'm' constraint.
5061 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5062 const TargetLowering &TLI) {
5063 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5064 InlineAsm::ConstraintInfo &CI = CInfos[i];
5065 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5066 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5067 if (CType == TargetLowering::C_Memory)
5071 // Indirect operand accesses access memory.
5079 /// visitInlineAsm - Handle a call to an InlineAsm object.
5081 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
5082 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5084 /// ConstraintOperands - Information about all of the constraints.
5085 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5087 std::set<unsigned> OutputRegs, InputRegs;
5089 // Do a prepass over the constraints, canonicalizing them, and building up the
5090 // ConstraintOperands list.
5091 std::vector<InlineAsm::ConstraintInfo>
5092 ConstraintInfos = IA->ParseConstraints();
5094 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5096 SDValue Chain, Flag;
5098 // We won't need to flush pending loads if this asm doesn't touch
5099 // memory and is nonvolatile.
5100 if (hasMemory || IA->hasSideEffects())
5103 Chain = DAG.getRoot();
5105 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
5106 unsigned ResNo = 0; // ResNo - The result number of the next output.
5107 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5108 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5109 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5111 MVT OpVT = MVT::Other;
5113 // Compute the value type for each operand.
5114 switch (OpInfo.Type) {
5115 case InlineAsm::isOutput:
5116 // Indirect outputs just consume an argument.
5117 if (OpInfo.isIndirect) {
5118 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5122 // The return value of the call is this value. As such, there is no
5123 // corresponding argument.
5124 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5125 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5126 OpVT = TLI.getValueType(STy->getElementType(ResNo));
5128 assert(ResNo == 0 && "Asm only has one result!");
5129 OpVT = TLI.getValueType(CS.getType());
5133 case InlineAsm::isInput:
5134 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5136 case InlineAsm::isClobber:
5141 // If this is an input or an indirect output, process the call argument.
5142 // BasicBlocks are labels, currently appearing only in asm's.
5143 if (OpInfo.CallOperandVal) {
5144 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5145 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5147 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5150 OpVT = OpInfo.getCallOperandValMVT(TLI, TD);
5153 OpInfo.ConstraintVT = OpVT;
5156 // Second pass over the constraints: compute which constraint option to use
5157 // and assign registers to constraints that want a specific physreg.
5158 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5159 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5161 // If this is an output operand with a matching input operand, look up the
5162 // matching input. If their types mismatch, e.g. one is an integer, the
5163 // other is floating point, or their sizes are different, flag it as an
5165 if (OpInfo.hasMatchingInput()) {
5166 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5167 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5168 if ((OpInfo.ConstraintVT.isInteger() !=
5169 Input.ConstraintVT.isInteger()) ||
5170 (OpInfo.ConstraintVT.getSizeInBits() !=
5171 Input.ConstraintVT.getSizeInBits())) {
5172 cerr << "llvm: error: Unsupported asm: input constraint with a "
5173 << "matching output constraint of incompatible type!\n";
5176 Input.ConstraintVT = OpInfo.ConstraintVT;
5180 // Compute the constraint code and ConstraintType to use.
5181 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5183 // If this is a memory input, and if the operand is not indirect, do what we
5184 // need to to provide an address for the memory input.
5185 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5186 !OpInfo.isIndirect) {
5187 assert(OpInfo.Type == InlineAsm::isInput &&
5188 "Can only indirectify direct input operands!");
5190 // Memory operands really want the address of the value. If we don't have
5191 // an indirect input, put it in the constpool if we can, otherwise spill
5192 // it to a stack slot.
5194 // If the operand is a float, integer, or vector constant, spill to a
5195 // constant pool entry to get its address.
5196 Value *OpVal = OpInfo.CallOperandVal;
5197 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5198 isa<ConstantVector>(OpVal)) {
5199 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5200 TLI.getPointerTy());
5202 // Otherwise, create a stack slot and emit a store to it before the
5204 const Type *Ty = OpVal->getType();
5205 uint64_t TySize = TLI.getTargetData()->getTypePaddedSize(Ty);
5206 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5207 MachineFunction &MF = DAG.getMachineFunction();
5208 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5209 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5210 Chain = DAG.getStore(Chain, getCurDebugLoc(),
5211 OpInfo.CallOperand, StackSlot, NULL, 0);
5212 OpInfo.CallOperand = StackSlot;
5215 // There is no longer a Value* corresponding to this operand.
5216 OpInfo.CallOperandVal = 0;
5217 // It is now an indirect operand.
5218 OpInfo.isIndirect = true;
5221 // If this constraint is for a specific register, allocate it before
5223 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5224 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5226 ConstraintInfos.clear();
5229 // Second pass - Loop over all of the operands, assigning virtual or physregs
5230 // to register class operands.
5231 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5232 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5234 // C_Register operands have already been allocated, Other/Memory don't need
5236 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5237 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5240 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5241 std::vector<SDValue> AsmNodeOperands;
5242 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5243 AsmNodeOperands.push_back(
5244 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5247 // Loop over all of the inputs, copying the operand values into the
5248 // appropriate registers and processing the output regs.
5249 RegsForValue RetValRegs;
5251 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5252 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5254 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5255 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5257 switch (OpInfo.Type) {
5258 case InlineAsm::isOutput: {
5259 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5260 OpInfo.ConstraintType != TargetLowering::C_Register) {
5261 // Memory output, or 'other' output (e.g. 'X' constraint).
5262 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5264 // Add information to the INLINEASM node to know about this output.
5265 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5266 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5267 TLI.getPointerTy()));
5268 AsmNodeOperands.push_back(OpInfo.CallOperand);
5272 // Otherwise, this is a register or register class output.
5274 // Copy the output from the appropriate register. Find a register that
5276 if (OpInfo.AssignedRegs.Regs.empty()) {
5277 cerr << "llvm: error: Couldn't allocate output reg for constraint '"
5278 << OpInfo.ConstraintCode << "'!\n";
5282 // If this is an indirect operand, store through the pointer after the
5284 if (OpInfo.isIndirect) {
5285 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5286 OpInfo.CallOperandVal));
5288 // This is the result value of the call.
5289 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5290 // Concatenate this output onto the outputs list.
5291 RetValRegs.append(OpInfo.AssignedRegs);
5294 // Add information to the INLINEASM node to know that this register is
5296 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5297 6 /* EARLYCLOBBER REGDEF */ :
5301 DAG, AsmNodeOperands);
5304 case InlineAsm::isInput: {
5305 SDValue InOperandVal = OpInfo.CallOperand;
5307 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5308 // If this is required to match an output register we have already set,
5309 // just use its register.
5310 unsigned OperandNo = OpInfo.getMatchedOperand();
5312 // Scan until we find the definition we already emitted of this operand.
5313 // When we find it, create a RegsForValue operand.
5314 unsigned CurOp = 2; // The first operand.
5315 for (; OperandNo; --OperandNo) {
5316 // Advance to the next operand.
5318 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5319 assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5320 (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5321 (OpFlag & 7) == 4 /*MEM*/) &&
5322 "Skipped past definitions?");
5323 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5327 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5328 if ((OpFlag & 7) == 2 /*REGDEF*/
5329 || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5330 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5331 RegsForValue MatchedRegs;
5332 MatchedRegs.TLI = &TLI;
5333 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5334 MVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5335 MatchedRegs.RegVTs.push_back(RegVT);
5336 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5337 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5340 push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5342 // Use the produced MatchedRegs object to
5343 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5345 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5346 true, OpInfo.getMatchedOperand(),
5347 DAG, AsmNodeOperands);
5350 assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5351 assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5352 "Unexpected number of operands");
5353 // Add information to the INLINEASM node to know about this input.
5354 // See InlineAsm.h isUseOperandTiedToDef.
5355 OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5356 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5357 TLI.getPointerTy()));
5358 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5363 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5364 assert(!OpInfo.isIndirect &&
5365 "Don't know how to handle indirect other inputs yet!");
5367 std::vector<SDValue> Ops;
5368 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5369 hasMemory, Ops, DAG);
5371 cerr << "llvm: error: Invalid operand for inline asm constraint '"
5372 << OpInfo.ConstraintCode << "'!\n";
5376 // Add information to the INLINEASM node to know about this input.
5377 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5378 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5379 TLI.getPointerTy()));
5380 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5382 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5383 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5384 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5385 "Memory operands expect pointer values");
5387 // Add information to the INLINEASM node to know about this input.
5388 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5389 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5390 TLI.getPointerTy()));
5391 AsmNodeOperands.push_back(InOperandVal);
5395 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5396 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5397 "Unknown constraint type!");
5398 assert(!OpInfo.isIndirect &&
5399 "Don't know how to handle indirect register inputs yet!");
5401 // Copy the input into the appropriate registers.
5402 if (OpInfo.AssignedRegs.Regs.empty()) {
5403 cerr << "llvm: error: Couldn't allocate output reg for constraint '"
5404 << OpInfo.ConstraintCode << "'!\n";
5408 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5411 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5412 DAG, AsmNodeOperands);
5415 case InlineAsm::isClobber: {
5416 // Add the clobbered value to the operand list, so that the register
5417 // allocator is aware that the physreg got clobbered.
5418 if (!OpInfo.AssignedRegs.Regs.empty())
5419 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5420 false, 0, DAG,AsmNodeOperands);
5426 // Finish up input operands.
5427 AsmNodeOperands[0] = Chain;
5428 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5430 Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5431 DAG.getVTList(MVT::Other, MVT::Flag),
5432 &AsmNodeOperands[0], AsmNodeOperands.size());
5433 Flag = Chain.getValue(1);
5435 // If this asm returns a register value, copy the result from that register
5436 // and set it as the value of the call.
5437 if (!RetValRegs.Regs.empty()) {
5438 SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5441 // FIXME: Why don't we do this for inline asms with MRVs?
5442 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5443 MVT ResultType = TLI.getValueType(CS.getType());
5445 // If any of the results of the inline asm is a vector, it may have the
5446 // wrong width/num elts. This can happen for register classes that can
5447 // contain multiple different value types. The preg or vreg allocated may
5448 // not have the same VT as was expected. Convert it to the right type
5449 // with bit_convert.
5450 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5451 Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5454 } else if (ResultType != Val.getValueType() &&
5455 ResultType.isInteger() && Val.getValueType().isInteger()) {
5456 // If a result value was tied to an input value, the computed result may
5457 // have a wider width than the expected result. Extract the relevant
5459 Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5462 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5465 setValue(CS.getInstruction(), Val);
5466 // Don't need to use this as a chain in this case.
5467 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5471 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5473 // Process indirect outputs, first output all of the flagged copies out of
5475 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5476 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5477 Value *Ptr = IndirectStoresToEmit[i].second;
5478 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5480 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5484 // Emit the non-flagged stores from the physregs.
5485 SmallVector<SDValue, 8> OutChains;
5486 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5487 OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5488 StoresToEmit[i].first,
5489 getValue(StoresToEmit[i].second),
5490 StoresToEmit[i].second, 0));
5491 if (!OutChains.empty())
5492 Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5493 &OutChains[0], OutChains.size());
5498 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
5499 SDValue Src = getValue(I.getOperand(0));
5501 // Scale up by the type size in the original i32 type width. Various
5502 // mid-level optimizers may make assumptions about demanded bits etc from the
5503 // i32-ness of the optimizer: we do not want to promote to i64 and then
5504 // multiply on 64-bit targets.
5505 // FIXME: Malloc inst should go away: PR715.
5506 uint64_t ElementSize = TD->getTypePaddedSize(I.getType()->getElementType());
5507 if (ElementSize != 1)
5508 Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
5509 Src, DAG.getConstant(ElementSize, Src.getValueType()));
5511 MVT IntPtr = TLI.getPointerTy();
5513 if (IntPtr.bitsLT(Src.getValueType()))
5514 Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
5515 else if (IntPtr.bitsGT(Src.getValueType()))
5516 Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
5518 TargetLowering::ArgListTy Args;
5519 TargetLowering::ArgListEntry Entry;
5521 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5522 Args.push_back(Entry);
5524 std::pair<SDValue,SDValue> Result =
5525 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
5526 CallingConv::C, PerformTailCallOpt,
5527 DAG.getExternalSymbol("malloc", IntPtr),
5528 Args, DAG, getCurDebugLoc());
5529 setValue(&I, Result.first); // Pointers always fit in registers
5530 DAG.setRoot(Result.second);
5533 void SelectionDAGLowering::visitFree(FreeInst &I) {
5534 TargetLowering::ArgListTy Args;
5535 TargetLowering::ArgListEntry Entry;
5536 Entry.Node = getValue(I.getOperand(0));
5537 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5538 Args.push_back(Entry);
5539 MVT IntPtr = TLI.getPointerTy();
5540 std::pair<SDValue,SDValue> Result =
5541 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
5542 CallingConv::C, PerformTailCallOpt,
5543 DAG.getExternalSymbol("free", IntPtr), Args, DAG,
5545 DAG.setRoot(Result.second);
5548 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5549 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5550 MVT::Other, getRoot(),
5551 getValue(I.getOperand(1)),
5552 DAG.getSrcValue(I.getOperand(1))));
5555 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5556 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5557 getRoot(), getValue(I.getOperand(0)),
5558 DAG.getSrcValue(I.getOperand(0)));
5560 DAG.setRoot(V.getValue(1));
5563 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5564 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5565 MVT::Other, getRoot(),
5566 getValue(I.getOperand(1)),
5567 DAG.getSrcValue(I.getOperand(1))));
5570 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5571 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5572 MVT::Other, getRoot(),
5573 getValue(I.getOperand(1)),
5574 getValue(I.getOperand(2)),
5575 DAG.getSrcValue(I.getOperand(1)),
5576 DAG.getSrcValue(I.getOperand(2))));
5579 /// TargetLowering::LowerArguments - This is the default LowerArguments
5580 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
5581 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
5582 /// integrated into SDISel.
5583 void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
5584 SmallVectorImpl<SDValue> &ArgValues,
5586 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
5587 SmallVector<SDValue, 3+16> Ops;
5588 Ops.push_back(DAG.getRoot());
5589 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
5590 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
5592 // Add one result value for each formal argument.
5593 SmallVector<MVT, 16> RetVals;
5595 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5597 SmallVector<MVT, 4> ValueVTs;
5598 ComputeValueVTs(*this, I->getType(), ValueVTs);
5599 for (unsigned Value = 0, NumValues = ValueVTs.size();
5600 Value != NumValues; ++Value) {
5601 MVT VT = ValueVTs[Value];
5602 const Type *ArgTy = VT.getTypeForMVT();
5603 ISD::ArgFlagsTy Flags;
5604 unsigned OriginalAlignment =
5605 getTargetData()->getABITypeAlignment(ArgTy);
5607 if (F.paramHasAttr(j, Attribute::ZExt))
5609 if (F.paramHasAttr(j, Attribute::SExt))
5611 if (F.paramHasAttr(j, Attribute::InReg))
5613 if (F.paramHasAttr(j, Attribute::StructRet))
5615 if (F.paramHasAttr(j, Attribute::ByVal)) {
5617 const PointerType *Ty = cast<PointerType>(I->getType());
5618 const Type *ElementTy = Ty->getElementType();
5619 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5620 unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
5621 // For ByVal, alignment should be passed from FE. BE will guess if
5622 // this info is not there but there are cases it cannot get right.
5623 if (F.getParamAlignment(j))
5624 FrameAlign = F.getParamAlignment(j);
5625 Flags.setByValAlign(FrameAlign);
5626 Flags.setByValSize(FrameSize);
5628 if (F.paramHasAttr(j, Attribute::Nest))
5630 Flags.setOrigAlign(OriginalAlignment);
5632 MVT RegisterVT = getRegisterType(VT);
5633 unsigned NumRegs = getNumRegisters(VT);
5634 for (unsigned i = 0; i != NumRegs; ++i) {
5635 RetVals.push_back(RegisterVT);
5636 ISD::ArgFlagsTy MyFlags = Flags;
5637 if (NumRegs > 1 && i == 0)
5639 // if it isn't first piece, alignment must be 1
5641 MyFlags.setOrigAlign(1);
5642 Ops.push_back(DAG.getArgFlags(MyFlags));
5647 RetVals.push_back(MVT::Other);
5650 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, dl,
5651 DAG.getVTList(&RetVals[0], RetVals.size()),
5652 &Ops[0], Ops.size()).getNode();
5654 // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but
5655 // allows exposing the loads that may be part of the argument access to the
5656 // first DAGCombiner pass.
5657 SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG);
5659 // The number of results should match up, except that the lowered one may have
5660 // an extra flag result.
5661 assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() ||
5662 (Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() &&
5663 TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag))
5664 && "Lowering produced unexpected number of results!");
5666 // The FORMAL_ARGUMENTS node itself is likely no longer needed.
5667 if (Result != TmpRes.getNode() && Result->use_empty()) {
5668 HandleSDNode Dummy(DAG.getRoot());
5669 DAG.RemoveDeadNode(Result);
5672 Result = TmpRes.getNode();
5674 unsigned NumArgRegs = Result->getNumValues() - 1;
5675 DAG.setRoot(SDValue(Result, NumArgRegs));
5677 // Set up the return result vector.
5680 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5682 SmallVector<MVT, 4> ValueVTs;
5683 ComputeValueVTs(*this, I->getType(), ValueVTs);
5684 for (unsigned Value = 0, NumValues = ValueVTs.size();
5685 Value != NumValues; ++Value) {
5686 MVT VT = ValueVTs[Value];
5687 MVT PartVT = getRegisterType(VT);
5689 unsigned NumParts = getNumRegisters(VT);
5690 SmallVector<SDValue, 4> Parts(NumParts);
5691 for (unsigned j = 0; j != NumParts; ++j)
5692 Parts[j] = SDValue(Result, i++);
5694 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5695 if (F.paramHasAttr(Idx, Attribute::SExt))
5696 AssertOp = ISD::AssertSext;
5697 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5698 AssertOp = ISD::AssertZext;
5700 ArgValues.push_back(getCopyFromParts(DAG, dl, &Parts[0], NumParts,
5701 PartVT, VT, AssertOp));
5704 assert(i == NumArgRegs && "Argument register count mismatch!");
5708 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5709 /// implementation, which just inserts an ISD::CALL node, which is later custom
5710 /// lowered by the target to something concrete. FIXME: When all targets are
5711 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
5712 std::pair<SDValue, SDValue>
5713 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5714 bool RetSExt, bool RetZExt, bool isVarArg,
5716 unsigned CallingConv, bool isTailCall,
5718 ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5719 assert((!isTailCall || PerformTailCallOpt) &&
5720 "isTailCall set when tail-call optimizations are disabled!");
5722 SmallVector<SDValue, 32> Ops;
5723 Ops.push_back(Chain); // Op#0 - Chain
5724 Ops.push_back(Callee);
5726 // Handle all of the outgoing arguments.
5727 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5728 SmallVector<MVT, 4> ValueVTs;
5729 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5730 for (unsigned Value = 0, NumValues = ValueVTs.size();
5731 Value != NumValues; ++Value) {
5732 MVT VT = ValueVTs[Value];
5733 const Type *ArgTy = VT.getTypeForMVT();
5734 SDValue Op = SDValue(Args[i].Node.getNode(),
5735 Args[i].Node.getResNo() + Value);
5736 ISD::ArgFlagsTy Flags;
5737 unsigned OriginalAlignment =
5738 getTargetData()->getABITypeAlignment(ArgTy);
5744 if (Args[i].isInReg)
5748 if (Args[i].isByVal) {
5750 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5751 const Type *ElementTy = Ty->getElementType();
5752 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5753 unsigned FrameSize = getTargetData()->getTypePaddedSize(ElementTy);
5754 // For ByVal, alignment should come from FE. BE will guess if this
5755 // info is not there but there are cases it cannot get right.
5756 if (Args[i].Alignment)
5757 FrameAlign = Args[i].Alignment;
5758 Flags.setByValAlign(FrameAlign);
5759 Flags.setByValSize(FrameSize);
5763 Flags.setOrigAlign(OriginalAlignment);
5765 MVT PartVT = getRegisterType(VT);
5766 unsigned NumParts = getNumRegisters(VT);
5767 SmallVector<SDValue, 4> Parts(NumParts);
5768 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5771 ExtendKind = ISD::SIGN_EXTEND;
5772 else if (Args[i].isZExt)
5773 ExtendKind = ISD::ZERO_EXTEND;
5775 getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5777 for (unsigned i = 0; i != NumParts; ++i) {
5778 // if it isn't first piece, alignment must be 1
5779 ISD::ArgFlagsTy MyFlags = Flags;
5780 if (NumParts > 1 && i == 0)
5783 MyFlags.setOrigAlign(1);
5785 Ops.push_back(Parts[i]);
5786 Ops.push_back(DAG.getArgFlags(MyFlags));
5791 // Figure out the result value types. We start by making a list of
5792 // the potentially illegal return value types.
5793 SmallVector<MVT, 4> LoweredRetTys;
5794 SmallVector<MVT, 4> RetTys;
5795 ComputeValueVTs(*this, RetTy, RetTys);
5797 // Then we translate that to a list of legal types.
5798 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5800 MVT RegisterVT = getRegisterType(VT);
5801 unsigned NumRegs = getNumRegisters(VT);
5802 for (unsigned i = 0; i != NumRegs; ++i)
5803 LoweredRetTys.push_back(RegisterVT);
5806 LoweredRetTys.push_back(MVT::Other); // Always has a chain.
5808 // Create the CALL node.
5809 SDValue Res = DAG.getCall(CallingConv, dl,
5810 isVarArg, isTailCall, isInreg,
5811 DAG.getVTList(&LoweredRetTys[0],
5812 LoweredRetTys.size()),
5815 Chain = Res.getValue(LoweredRetTys.size() - 1);
5817 // Gather up the call result into a single value.
5818 if (RetTy != Type::VoidTy && !RetTys.empty()) {
5819 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5822 AssertOp = ISD::AssertSext;
5824 AssertOp = ISD::AssertZext;
5826 SmallVector<SDValue, 4> ReturnValues;
5828 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5830 MVT RegisterVT = getRegisterType(VT);
5831 unsigned NumRegs = getNumRegisters(VT);
5832 unsigned RegNoEnd = NumRegs + RegNo;
5833 SmallVector<SDValue, 4> Results;
5834 for (; RegNo != RegNoEnd; ++RegNo)
5835 Results.push_back(Res.getValue(RegNo));
5836 SDValue ReturnValue =
5837 getCopyFromParts(DAG, dl, &Results[0], NumRegs, RegisterVT, VT,
5839 ReturnValues.push_back(ReturnValue);
5841 Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5842 DAG.getVTList(&RetTys[0], RetTys.size()),
5843 &ReturnValues[0], ReturnValues.size());
5846 return std::make_pair(Res, Chain);
5849 void TargetLowering::LowerOperationWrapper(SDNode *N,
5850 SmallVectorImpl<SDValue> &Results,
5851 SelectionDAG &DAG) {
5852 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5854 Results.push_back(Res);
5857 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5858 assert(0 && "LowerOperation not implemented for this target!");
5864 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5865 SDValue Op = getValue(V);
5866 assert((Op.getOpcode() != ISD::CopyFromReg ||
5867 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5868 "Copy from a reg to the same reg!");
5869 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5871 RegsForValue RFV(TLI, Reg, V->getType());
5872 SDValue Chain = DAG.getEntryNode();
5873 RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5874 PendingExports.push_back(Chain);
5877 #include "llvm/CodeGen/SelectionDAGISel.h"
5879 void SelectionDAGISel::
5880 LowerArguments(BasicBlock *LLVMBB) {
5881 // If this is the entry block, emit arguments.
5882 Function &F = *LLVMBB->getParent();
5883 SDValue OldRoot = SDL->DAG.getRoot();
5884 SmallVector<SDValue, 16> Args;
5885 TLI.LowerArguments(F, SDL->DAG, Args, SDL->getCurDebugLoc());
5888 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
5890 SmallVector<MVT, 4> ValueVTs;
5891 ComputeValueVTs(TLI, AI->getType(), ValueVTs);
5892 unsigned NumValues = ValueVTs.size();
5893 if (!AI->use_empty()) {
5894 SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues,
5895 SDL->getCurDebugLoc()));
5896 // If this argument is live outside of the entry block, insert a copy from
5897 // whereever we got it to the vreg that other BB's will reference it as.
5898 SDL->CopyToExportRegsIfNeeded(AI);
5903 // Finally, if the target has anything special to do, allow it to do so.
5904 // FIXME: this should insert code into the DAG!
5905 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5908 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5909 /// ensure constants are generated when needed. Remember the virtual registers
5910 /// that need to be added to the Machine PHI nodes as input. We cannot just
5911 /// directly add them, because expansion might result in multiple MBB's for one
5912 /// BB. As such, the start of the BB might correspond to a different MBB than
5916 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5917 TerminatorInst *TI = LLVMBB->getTerminator();
5919 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5921 // Check successor nodes' PHI nodes that expect a constant to be available
5923 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5924 BasicBlock *SuccBB = TI->getSuccessor(succ);
5925 if (!isa<PHINode>(SuccBB->begin())) continue;
5926 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5928 // If this terminator has multiple identical successors (common for
5929 // switches), only handle each succ once.
5930 if (!SuccsHandled.insert(SuccMBB)) continue;
5932 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5935 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5936 // nodes and Machine PHI nodes, but the incoming operands have not been
5938 for (BasicBlock::iterator I = SuccBB->begin();
5939 (PN = dyn_cast<PHINode>(I)); ++I) {
5940 // Ignore dead phi's.
5941 if (PN->use_empty()) continue;
5944 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5946 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5947 unsigned &RegOut = SDL->ConstantsOut[C];
5949 RegOut = FuncInfo->CreateRegForValue(C);
5950 SDL->CopyValueToVirtualRegister(C, RegOut);
5954 Reg = FuncInfo->ValueMap[PHIOp];
5956 assert(isa<AllocaInst>(PHIOp) &&
5957 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5958 "Didn't codegen value into a register!??");
5959 Reg = FuncInfo->CreateRegForValue(PHIOp);
5960 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5964 // Remember that this register needs to added to the machine PHI node as
5965 // the input for this MBB.
5966 SmallVector<MVT, 4> ValueVTs;
5967 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5968 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5969 MVT VT = ValueVTs[vti];
5970 unsigned NumRegisters = TLI.getNumRegisters(VT);
5971 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5972 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5973 Reg += NumRegisters;
5977 SDL->ConstantsOut.clear();
5980 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5981 /// supports legal types, and it emits MachineInstrs directly instead of
5982 /// creating SelectionDAG nodes.
5985 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5987 TerminatorInst *TI = LLVMBB->getTerminator();
5989 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5990 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
5992 // Check successor nodes' PHI nodes that expect a constant to be available
5994 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5995 BasicBlock *SuccBB = TI->getSuccessor(succ);
5996 if (!isa<PHINode>(SuccBB->begin())) continue;
5997 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5999 // If this terminator has multiple identical successors (common for
6000 // switches), only handle each succ once.
6001 if (!SuccsHandled.insert(SuccMBB)) continue;
6003 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6006 // At this point we know that there is a 1-1 correspondence between LLVM PHI
6007 // nodes and Machine PHI nodes, but the incoming operands have not been
6009 for (BasicBlock::iterator I = SuccBB->begin();
6010 (PN = dyn_cast<PHINode>(I)); ++I) {
6011 // Ignore dead phi's.
6012 if (PN->use_empty()) continue;
6014 // Only handle legal types. Two interesting things to note here. First,
6015 // by bailing out early, we may leave behind some dead instructions,
6016 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
6017 // own moves. Second, this check is necessary becuase FastISel doesn't
6018 // use CreateRegForValue to create registers, so it always creates
6019 // exactly one register for each non-void instruction.
6020 MVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
6021 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
6024 VT = TLI.getTypeToTransformTo(VT);
6026 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6031 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6033 unsigned Reg = F->getRegForValue(PHIOp);
6035 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6038 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));