1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/InlineAsm.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Module.h"
29 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/CodeGen/GCStrategy.h"
31 #include "llvm/CodeGen/GCMetadata.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/PseudoSourceValue.h"
39 #include "llvm/CodeGen/SelectionDAG.h"
40 #include "llvm/CodeGen/DwarfWriter.h"
41 #include "llvm/Analysis/DebugInfo.h"
42 #include "llvm/Target/TargetRegisterInfo.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Target/TargetFrameInfo.h"
45 #include "llvm/Target/TargetInstrInfo.h"
46 #include "llvm/Target/TargetIntrinsicInfo.h"
47 #include "llvm/Target/TargetLowering.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
57 /// LimitFloatPrecision - Generate low-precision inline sequences for
58 /// some float libcalls (6, 8 or 12 bits).
59 static unsigned LimitFloatPrecision;
61 static cl::opt<unsigned, true>
62 LimitFPPrecision("limit-float-precision",
63 cl::desc("Generate low-precision inline sequences "
64 "for some float libcalls"),
65 cl::location(LimitFloatPrecision),
68 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
69 /// of insertvalue or extractvalue indices that identify a member, return
70 /// the linearized index of the start of the member.
72 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
73 const unsigned *Indices,
74 const unsigned *IndicesEnd,
75 unsigned CurIndex = 0) {
76 // Base case: We're done.
77 if (Indices && Indices == IndicesEnd)
80 // Given a struct type, recursively traverse the elements.
81 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
82 for (StructType::element_iterator EB = STy->element_begin(),
84 EE = STy->element_end();
86 if (Indices && *Indices == unsigned(EI - EB))
87 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
88 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
92 // Given an array type, recursively traverse the elements.
93 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
94 const Type *EltTy = ATy->getElementType();
95 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
96 if (Indices && *Indices == i)
97 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
98 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
102 // We haven't found the type we're looking for, so keep searching.
106 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
107 /// MVTs that represent all the individual underlying
108 /// non-aggregate types that comprise it.
110 /// If Offsets is non-null, it points to a vector to be filled in
111 /// with the in-memory offsets of each of the individual values.
113 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
114 SmallVectorImpl<MVT> &ValueVTs,
115 SmallVectorImpl<uint64_t> *Offsets = 0,
116 uint64_t StartingOffset = 0) {
117 // Given a struct type, recursively traverse the elements.
118 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
119 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
120 for (StructType::element_iterator EB = STy->element_begin(),
122 EE = STy->element_end();
124 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
125 StartingOffset + SL->getElementOffset(EI - EB));
128 // Given an array type, recursively traverse the elements.
129 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
130 const Type *EltTy = ATy->getElementType();
131 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
132 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
133 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
134 StartingOffset + i * EltSize);
137 // Interpret void as zero return values.
138 if (Ty == Type::VoidTy)
140 // Base case: we can get an MVT for this LLVM IR type.
141 ValueVTs.push_back(TLI.getValueType(Ty));
143 Offsets->push_back(StartingOffset);
147 /// RegsForValue - This struct represents the registers (physical or virtual)
148 /// that a particular set of values is assigned, and the type information about
149 /// the value. The most common situation is to represent one value at a time,
150 /// but struct or array values are handled element-wise as multiple values.
151 /// The splitting of aggregates is performed recursively, so that we never
152 /// have aggregate-typed registers. The values at this point do not necessarily
153 /// have legal types, so each value may require one or more registers of some
156 struct VISIBILITY_HIDDEN RegsForValue {
157 /// TLI - The TargetLowering object.
159 const TargetLowering *TLI;
161 /// ValueVTs - The value types of the values, which may not be legal, and
162 /// may need be promoted or synthesized from one or more registers.
164 SmallVector<MVT, 4> ValueVTs;
166 /// RegVTs - The value types of the registers. This is the same size as
167 /// ValueVTs and it records, for each value, what the type of the assigned
168 /// register or registers are. (Individual values are never synthesized
169 /// from more than one type of register.)
171 /// With virtual registers, the contents of RegVTs is redundant with TLI's
172 /// getRegisterType member function, however when with physical registers
173 /// it is necessary to have a separate record of the types.
175 SmallVector<MVT, 4> RegVTs;
177 /// Regs - This list holds the registers assigned to the values.
178 /// Each legal or promoted value requires one register, and each
179 /// expanded value requires multiple registers.
181 SmallVector<unsigned, 4> Regs;
183 RegsForValue() : TLI(0) {}
185 RegsForValue(const TargetLowering &tli,
186 const SmallVector<unsigned, 4> ®s,
187 MVT regvt, MVT valuevt)
188 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
189 RegsForValue(const TargetLowering &tli,
190 const SmallVector<unsigned, 4> ®s,
191 const SmallVector<MVT, 4> ®vts,
192 const SmallVector<MVT, 4> &valuevts)
193 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
194 RegsForValue(const TargetLowering &tli,
195 unsigned Reg, const Type *Ty) : TLI(&tli) {
196 ComputeValueVTs(tli, Ty, ValueVTs);
198 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
199 MVT ValueVT = ValueVTs[Value];
200 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
201 MVT RegisterVT = TLI->getRegisterType(ValueVT);
202 for (unsigned i = 0; i != NumRegs; ++i)
203 Regs.push_back(Reg + i);
204 RegVTs.push_back(RegisterVT);
209 /// append - Add the specified values to this one.
210 void append(const RegsForValue &RHS) {
212 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
213 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
214 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
218 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
219 /// this value and returns the result as a ValueVTs value. This uses
220 /// Chain/Flag as the input and updates them for the output Chain/Flag.
221 /// If the Flag pointer is NULL, no flag is used.
222 SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
223 SDValue &Chain, SDValue *Flag) const;
225 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
226 /// specified value into the registers specified by this object. This uses
227 /// Chain/Flag as the input and updates them for the output Chain/Flag.
228 /// If the Flag pointer is NULL, no flag is used.
229 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
230 SDValue &Chain, SDValue *Flag) const;
232 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
233 /// operand list. This adds the code marker, matching input operand index
234 /// (if applicable), and includes the number of values added into it.
235 void AddInlineAsmOperands(unsigned Code,
236 bool HasMatching, unsigned MatchingIdx,
237 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
241 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
242 /// PHI nodes or outside of the basic block that defines it, or used by a
243 /// switch or atomic instruction, which may expand to multiple basic blocks.
244 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
245 if (isa<PHINode>(I)) return true;
246 BasicBlock *BB = I->getParent();
247 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
248 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
253 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
254 /// entry block, return true. This includes arguments used by switches, since
255 /// the switch may expand into multiple basic blocks.
256 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
257 // With FastISel active, we may be splitting blocks, so force creation
258 // of virtual registers for all non-dead arguments.
259 // Don't force virtual registers for byval arguments though, because
260 // fast-isel can't handle those in all cases.
261 if (EnableFastISel && !A->hasByValAttr())
262 return A->use_empty();
264 BasicBlock *Entry = A->getParent()->begin();
265 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
266 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
267 return false; // Use not in entry block.
271 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
275 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
277 bool EnableFastISel) {
280 RegInfo = &MF->getRegInfo();
282 // Create a vreg for each argument register that is not dead and is used
283 // outside of the entry block for the function.
284 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
286 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
287 InitializeRegForValue(AI);
289 // Initialize the mapping of values to registers. This is only set up for
290 // instruction values that are used outside of the block that defines
292 Function::iterator BB = Fn->begin(), EB = Fn->end();
293 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
294 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
295 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
296 const Type *Ty = AI->getAllocatedType();
297 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
299 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
302 TySize *= CUI->getZExtValue(); // Get total allocated size.
303 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
304 StaticAllocaMap[AI] =
305 MF->getFrameInfo()->CreateStackObject(TySize, Align);
308 for (; BB != EB; ++BB)
309 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
310 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
311 if (!isa<AllocaInst>(I) ||
312 !StaticAllocaMap.count(cast<AllocaInst>(I)))
313 InitializeRegForValue(I);
315 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
316 // also creates the initial PHI MachineInstrs, though none of the input
317 // operands are populated.
318 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
319 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
323 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
327 for (BasicBlock::iterator
328 I = BB->begin(), E = BB->end(); I != E; ++I) {
329 if (CallInst *CI = dyn_cast<CallInst>(I)) {
330 if (Function *F = CI->getCalledFunction()) {
331 switch (F->getIntrinsicID()) {
333 case Intrinsic::dbg_stoppoint: {
334 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
336 if (DIDescriptor::ValidDebugInfo(SPI->getContext(),
337 CodeGenOpt::Default)) {
338 DICompileUnit CU(cast<GlobalVariable>(SPI->getContext()));
339 unsigned idx = MF->getOrCreateDebugLocID(CU.getGV(),
342 DL = DebugLoc::get(idx);
347 case Intrinsic::dbg_func_start: {
348 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
349 Value *SP = FSI->getSubprogram();
351 if (DIDescriptor::ValidDebugInfo(SP, CodeGenOpt::Default)) {
352 DISubprogram Subprogram(cast<GlobalVariable>(SP));
353 DICompileUnit CU(Subprogram.getCompileUnit());
354 unsigned Line = Subprogram.getLineNumber();
355 DL = DebugLoc::get(MF->getOrCreateDebugLocID(CU.getGV(),
365 PN = dyn_cast<PHINode>(I);
366 if (!PN || PN->use_empty()) continue;
368 unsigned PHIReg = ValueMap[PN];
369 assert(PHIReg && "PHI node does not have an assigned virtual register!");
371 SmallVector<MVT, 4> ValueVTs;
372 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
373 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
374 MVT VT = ValueVTs[vti];
375 unsigned NumRegisters = TLI.getNumRegisters(VT);
376 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
377 for (unsigned i = 0; i != NumRegisters; ++i)
378 BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
379 PHIReg += NumRegisters;
385 unsigned FunctionLoweringInfo::MakeReg(MVT VT) {
386 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
389 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
390 /// the correctly promoted or expanded types. Assign these registers
391 /// consecutive vreg numbers and return the first assigned number.
393 /// In the case that the given value has struct or array type, this function
394 /// will assign registers for each member or element.
396 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
397 SmallVector<MVT, 4> ValueVTs;
398 ComputeValueVTs(TLI, V->getType(), ValueVTs);
400 unsigned FirstReg = 0;
401 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
402 MVT ValueVT = ValueVTs[Value];
403 MVT RegisterVT = TLI.getRegisterType(ValueVT);
405 unsigned NumRegs = TLI.getNumRegisters(ValueVT);
406 for (unsigned i = 0; i != NumRegs; ++i) {
407 unsigned R = MakeReg(RegisterVT);
408 if (!FirstReg) FirstReg = R;
414 /// getCopyFromParts - Create a value that contains the specified legal parts
415 /// combined into the value they represent. If the parts combine to a type
416 /// larger then ValueVT then AssertOp can be used to specify whether the extra
417 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
418 /// (ISD::AssertSext).
419 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
420 const SDValue *Parts,
421 unsigned NumParts, MVT PartVT, MVT ValueVT,
422 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
423 assert(NumParts > 0 && "No parts to assemble!");
424 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
425 SDValue Val = Parts[0];
428 // Assemble the value from multiple parts.
429 if (!ValueVT.isVector() && ValueVT.isInteger()) {
430 unsigned PartBits = PartVT.getSizeInBits();
431 unsigned ValueBits = ValueVT.getSizeInBits();
433 // Assemble the power of 2 part.
434 unsigned RoundParts = NumParts & (NumParts - 1) ?
435 1 << Log2_32(NumParts) : NumParts;
436 unsigned RoundBits = PartBits * RoundParts;
437 MVT RoundVT = RoundBits == ValueBits ?
438 ValueVT : MVT::getIntegerVT(RoundBits);
441 MVT HalfVT = MVT::getIntegerVT(RoundBits/2);
443 if (RoundParts > 2) {
444 Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
445 Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
448 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
449 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
451 if (TLI.isBigEndian())
453 Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
455 if (RoundParts < NumParts) {
456 // Assemble the trailing non-power-of-2 part.
457 unsigned OddParts = NumParts - RoundParts;
458 MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
459 Hi = getCopyFromParts(DAG, dl,
460 Parts+RoundParts, OddParts, PartVT, OddVT);
462 // Combine the round and odd parts.
464 if (TLI.isBigEndian())
466 MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
467 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
468 Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
469 DAG.getConstant(Lo.getValueType().getSizeInBits(),
470 TLI.getPointerTy()));
471 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
472 Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
474 } else if (ValueVT.isVector()) {
475 // Handle a multi-element vector.
476 MVT IntermediateVT, RegisterVT;
477 unsigned NumIntermediates;
479 TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
481 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
482 NumParts = NumRegs; // Silence a compiler warning.
483 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
484 assert(RegisterVT == Parts[0].getValueType() &&
485 "Part type doesn't match part!");
487 // Assemble the parts into intermediate operands.
488 SmallVector<SDValue, 8> Ops(NumIntermediates);
489 if (NumIntermediates == NumParts) {
490 // If the register was not expanded, truncate or copy the value,
492 for (unsigned i = 0; i != NumParts; ++i)
493 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
494 PartVT, IntermediateVT);
495 } else if (NumParts > 0) {
496 // If the intermediate type was expanded, build the intermediate operands
498 assert(NumParts % NumIntermediates == 0 &&
499 "Must expand into a divisible number of parts!");
500 unsigned Factor = NumParts / NumIntermediates;
501 for (unsigned i = 0; i != NumIntermediates; ++i)
502 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
503 PartVT, IntermediateVT);
506 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
508 Val = DAG.getNode(IntermediateVT.isVector() ?
509 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
510 ValueVT, &Ops[0], NumIntermediates);
511 } else if (PartVT.isFloatingPoint()) {
512 // FP split into multiple FP parts (for ppcf128)
513 assert(ValueVT == MVT(MVT::ppcf128) && PartVT == MVT(MVT::f64) &&
516 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[0]);
517 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[1]);
518 if (TLI.isBigEndian())
520 Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
522 // FP split into integer parts (soft fp)
523 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
524 !PartVT.isVector() && "Unexpected split");
525 MVT IntVT = MVT::getIntegerVT(ValueVT.getSizeInBits());
526 Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
530 // There is now one part, held in Val. Correct it to match ValueVT.
531 PartVT = Val.getValueType();
533 if (PartVT == ValueVT)
536 if (PartVT.isVector()) {
537 assert(ValueVT.isVector() && "Unknown vector conversion!");
538 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
541 if (ValueVT.isVector()) {
542 assert(ValueVT.getVectorElementType() == PartVT &&
543 ValueVT.getVectorNumElements() == 1 &&
544 "Only trivial scalar-to-vector conversions should get here!");
545 return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
548 if (PartVT.isInteger() &&
549 ValueVT.isInteger()) {
550 if (ValueVT.bitsLT(PartVT)) {
551 // For a truncate, see if we have any information to
552 // indicate whether the truncated bits will always be
553 // zero or sign-extension.
554 if (AssertOp != ISD::DELETED_NODE)
555 Val = DAG.getNode(AssertOp, dl, PartVT, Val,
556 DAG.getValueType(ValueVT));
557 return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
559 return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
563 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
564 if (ValueVT.bitsLT(Val.getValueType()))
565 // FP_ROUND's are always exact here.
566 return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
567 DAG.getIntPtrConstant(1));
568 return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
571 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
572 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
574 assert(0 && "Unknown mismatch!");
578 /// getCopyToParts - Create a series of nodes that contain the specified value
579 /// split into legal parts. If the parts contain more bits than Val, then, for
580 /// integers, ExtendKind can be used to specify how to generate the extra bits.
581 static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
582 SDValue *Parts, unsigned NumParts, MVT PartVT,
583 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
584 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
585 MVT PtrVT = TLI.getPointerTy();
586 MVT ValueVT = Val.getValueType();
587 unsigned PartBits = PartVT.getSizeInBits();
588 unsigned OrigNumParts = NumParts;
589 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
594 if (!ValueVT.isVector()) {
595 if (PartVT == ValueVT) {
596 assert(NumParts == 1 && "No-op copy with multiple parts!");
601 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
602 // If the parts cover more bits than the value has, promote the value.
603 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
604 assert(NumParts == 1 && "Do not know what to promote to!");
605 Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
606 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
607 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
608 Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
610 assert(0 && "Unknown mismatch!");
612 } else if (PartBits == ValueVT.getSizeInBits()) {
613 // Different types of the same size.
614 assert(NumParts == 1 && PartVT != ValueVT);
615 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
616 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
617 // If the parts cover less bits than value has, truncate the value.
618 if (PartVT.isInteger() && ValueVT.isInteger()) {
619 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
620 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
622 assert(0 && "Unknown mismatch!");
626 // The value may have changed - recompute ValueVT.
627 ValueVT = Val.getValueType();
628 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
629 "Failed to tile the value with PartVT!");
632 assert(PartVT == ValueVT && "Type conversion failed!");
637 // Expand the value into multiple parts.
638 if (NumParts & (NumParts - 1)) {
639 // The number of parts is not a power of 2. Split off and copy the tail.
640 assert(PartVT.isInteger() && ValueVT.isInteger() &&
641 "Do not know what to expand to!");
642 unsigned RoundParts = 1 << Log2_32(NumParts);
643 unsigned RoundBits = RoundParts * PartBits;
644 unsigned OddParts = NumParts - RoundParts;
645 SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
646 DAG.getConstant(RoundBits,
647 TLI.getPointerTy()));
648 getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
649 if (TLI.isBigEndian())
650 // The odd parts were reversed by getCopyToParts - unreverse them.
651 std::reverse(Parts + RoundParts, Parts + NumParts);
652 NumParts = RoundParts;
653 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
654 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
657 // The number of parts is a power of 2. Repeatedly bisect the value using
659 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
660 MVT::getIntegerVT(ValueVT.getSizeInBits()),
662 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
663 for (unsigned i = 0; i < NumParts; i += StepSize) {
664 unsigned ThisBits = StepSize * PartBits / 2;
665 MVT ThisVT = MVT::getIntegerVT (ThisBits);
666 SDValue &Part0 = Parts[i];
667 SDValue &Part1 = Parts[i+StepSize/2];
669 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
671 DAG.getConstant(1, PtrVT));
672 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
674 DAG.getConstant(0, PtrVT));
676 if (ThisBits == PartBits && ThisVT != PartVT) {
677 Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
679 Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
685 if (TLI.isBigEndian())
686 std::reverse(Parts, Parts + OrigNumParts);
693 if (PartVT != ValueVT) {
694 if (PartVT.isVector()) {
695 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
697 assert(ValueVT.getVectorElementType() == PartVT &&
698 ValueVT.getVectorNumElements() == 1 &&
699 "Only trivial vector-to-scalar conversions should get here!");
700 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
702 DAG.getConstant(0, PtrVT));
710 // Handle a multi-element vector.
711 MVT IntermediateVT, RegisterVT;
712 unsigned NumIntermediates;
713 unsigned NumRegs = TLI
714 .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
716 unsigned NumElements = ValueVT.getVectorNumElements();
718 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
719 NumParts = NumRegs; // Silence a compiler warning.
720 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
722 // Split the vector into intermediate operands.
723 SmallVector<SDValue, 8> Ops(NumIntermediates);
724 for (unsigned i = 0; i != NumIntermediates; ++i)
725 if (IntermediateVT.isVector())
726 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
728 DAG.getConstant(i * (NumElements / NumIntermediates),
731 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
733 DAG.getConstant(i, PtrVT));
735 // Split the intermediate operands into legal parts.
736 if (NumParts == NumIntermediates) {
737 // If the register was not expanded, promote or copy the value,
739 for (unsigned i = 0; i != NumParts; ++i)
740 getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
741 } else if (NumParts > 0) {
742 // If the intermediate type was expanded, split each the value into
744 assert(NumParts % NumIntermediates == 0 &&
745 "Must expand into a divisible number of parts!");
746 unsigned Factor = NumParts / NumIntermediates;
747 for (unsigned i = 0; i != NumIntermediates; ++i)
748 getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
753 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
756 TD = DAG.getTarget().getTargetData();
759 /// clear - Clear out the curret SelectionDAG and the associated
760 /// state and prepare this SelectionDAGLowering object to be used
761 /// for a new block. This doesn't clear out information about
762 /// additional blocks that are needed to complete switch lowering
763 /// or PHI node updating; that information is cleared out as it is
765 void SelectionDAGLowering::clear() {
767 PendingLoads.clear();
768 PendingExports.clear();
770 CurDebugLoc = DebugLoc::getUnknownLoc();
773 /// getRoot - Return the current virtual root of the Selection DAG,
774 /// flushing any PendingLoad items. This must be done before emitting
775 /// a store or any other node that may need to be ordered after any
776 /// prior load instructions.
778 SDValue SelectionDAGLowering::getRoot() {
779 if (PendingLoads.empty())
780 return DAG.getRoot();
782 if (PendingLoads.size() == 1) {
783 SDValue Root = PendingLoads[0];
785 PendingLoads.clear();
789 // Otherwise, we have to make a token factor node.
790 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
791 &PendingLoads[0], PendingLoads.size());
792 PendingLoads.clear();
797 /// getControlRoot - Similar to getRoot, but instead of flushing all the
798 /// PendingLoad items, flush all the PendingExports items. It is necessary
799 /// to do this before emitting a terminator instruction.
801 SDValue SelectionDAGLowering::getControlRoot() {
802 SDValue Root = DAG.getRoot();
804 if (PendingExports.empty())
807 // Turn all of the CopyToReg chains into one factored node.
808 if (Root.getOpcode() != ISD::EntryToken) {
809 unsigned i = 0, e = PendingExports.size();
810 for (; i != e; ++i) {
811 assert(PendingExports[i].getNode()->getNumOperands() > 1);
812 if (PendingExports[i].getNode()->getOperand(0) == Root)
813 break; // Don't add the root if we already indirectly depend on it.
817 PendingExports.push_back(Root);
820 Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
822 PendingExports.size());
823 PendingExports.clear();
828 void SelectionDAGLowering::visit(Instruction &I) {
829 visit(I.getOpcode(), I);
832 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
833 // Note: this doesn't use InstVisitor, because it has to work with
834 // ConstantExpr's in addition to instructions.
836 default: assert(0 && "Unknown instruction type encountered!");
838 // Build the switch statement using the Instruction.def file.
839 #define HANDLE_INST(NUM, OPCODE, CLASS) \
840 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
841 #include "llvm/Instruction.def"
845 void SelectionDAGLowering::visitAdd(User &I) {
846 if (I.getType()->isFPOrFPVector())
847 visitBinary(I, ISD::FADD);
849 visitBinary(I, ISD::ADD);
852 void SelectionDAGLowering::visitMul(User &I) {
853 if (I.getType()->isFPOrFPVector())
854 visitBinary(I, ISD::FMUL);
856 visitBinary(I, ISD::MUL);
859 SDValue SelectionDAGLowering::getValue(const Value *V) {
860 SDValue &N = NodeMap[V];
861 if (N.getNode()) return N;
863 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
864 MVT VT = TLI.getValueType(V->getType(), true);
866 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
867 return N = DAG.getConstant(*CI, VT);
869 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
870 return N = DAG.getGlobalAddress(GV, VT);
872 if (isa<ConstantPointerNull>(C))
873 return N = DAG.getConstant(0, TLI.getPointerTy());
875 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
876 return N = DAG.getConstantFP(*CFP, VT);
878 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
879 return N = DAG.getUNDEF(VT);
881 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
882 visit(CE->getOpcode(), *CE);
883 SDValue N1 = NodeMap[V];
884 assert(N1.getNode() && "visit didn't populate the ValueMap!");
888 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
889 SmallVector<SDValue, 4> Constants;
890 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
892 SDNode *Val = getValue(*OI).getNode();
893 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
894 Constants.push_back(SDValue(Val, i));
896 return DAG.getMergeValues(&Constants[0], Constants.size(),
900 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
901 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
902 "Unknown struct or array constant!");
904 SmallVector<MVT, 4> ValueVTs;
905 ComputeValueVTs(TLI, C->getType(), ValueVTs);
906 unsigned NumElts = ValueVTs.size();
908 return SDValue(); // empty struct
909 SmallVector<SDValue, 4> Constants(NumElts);
910 for (unsigned i = 0; i != NumElts; ++i) {
911 MVT EltVT = ValueVTs[i];
912 if (isa<UndefValue>(C))
913 Constants[i] = DAG.getUNDEF(EltVT);
914 else if (EltVT.isFloatingPoint())
915 Constants[i] = DAG.getConstantFP(0, EltVT);
917 Constants[i] = DAG.getConstant(0, EltVT);
919 return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
922 const VectorType *VecTy = cast<VectorType>(V->getType());
923 unsigned NumElements = VecTy->getNumElements();
925 // Now that we know the number and type of the elements, get that number of
926 // elements into the Ops array based on what kind of constant it is.
927 SmallVector<SDValue, 16> Ops;
928 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
929 for (unsigned i = 0; i != NumElements; ++i)
930 Ops.push_back(getValue(CP->getOperand(i)));
932 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
933 MVT EltVT = TLI.getValueType(VecTy->getElementType());
936 if (EltVT.isFloatingPoint())
937 Op = DAG.getConstantFP(0, EltVT);
939 Op = DAG.getConstant(0, EltVT);
940 Ops.assign(NumElements, Op);
943 // Create a BUILD_VECTOR node.
944 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
945 VT, &Ops[0], Ops.size());
948 // If this is a static alloca, generate it as the frameindex instead of
950 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
951 DenseMap<const AllocaInst*, int>::iterator SI =
952 FuncInfo.StaticAllocaMap.find(AI);
953 if (SI != FuncInfo.StaticAllocaMap.end())
954 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
957 unsigned InReg = FuncInfo.ValueMap[V];
958 assert(InReg && "Value not in map!");
960 RegsForValue RFV(TLI, InReg, V->getType());
961 SDValue Chain = DAG.getEntryNode();
962 return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
966 void SelectionDAGLowering::visitRet(ReturnInst &I) {
967 if (I.getNumOperands() == 0) {
968 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(),
969 MVT::Other, getControlRoot()));
973 SmallVector<SDValue, 8> NewValues;
974 NewValues.push_back(getControlRoot());
975 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
976 SmallVector<MVT, 4> ValueVTs;
977 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
978 unsigned NumValues = ValueVTs.size();
979 if (NumValues == 0) continue;
981 SDValue RetOp = getValue(I.getOperand(i));
982 for (unsigned j = 0, f = NumValues; j != f; ++j) {
983 MVT VT = ValueVTs[j];
985 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
987 const Function *F = I.getParent()->getParent();
988 if (F->paramHasAttr(0, Attribute::SExt))
989 ExtendKind = ISD::SIGN_EXTEND;
990 else if (F->paramHasAttr(0, Attribute::ZExt))
991 ExtendKind = ISD::ZERO_EXTEND;
993 // FIXME: C calling convention requires the return type to be promoted to
994 // at least 32-bit. But this is not necessary for non-C calling
995 // conventions. The frontend should mark functions whose return values
996 // require promoting with signext or zeroext attributes.
997 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
998 MVT MinVT = TLI.getRegisterType(MVT::i32);
999 if (VT.bitsLT(MinVT))
1003 unsigned NumParts = TLI.getNumRegisters(VT);
1004 MVT PartVT = TLI.getRegisterType(VT);
1005 SmallVector<SDValue, 4> Parts(NumParts);
1006 getCopyToParts(DAG, getCurDebugLoc(),
1007 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1008 &Parts[0], NumParts, PartVT, ExtendKind);
1010 // 'inreg' on function refers to return value
1011 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1012 if (F->paramHasAttr(0, Attribute::InReg))
1014 for (unsigned i = 0; i < NumParts; ++i) {
1015 NewValues.push_back(Parts[i]);
1016 NewValues.push_back(DAG.getArgFlags(Flags));
1020 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), MVT::Other,
1021 &NewValues[0], NewValues.size()));
1024 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1025 /// created for it, emit nodes to copy the value into the virtual
1027 void SelectionDAGLowering::CopyToExportRegsIfNeeded(Value *V) {
1028 if (!V->use_empty()) {
1029 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1030 if (VMI != FuncInfo.ValueMap.end())
1031 CopyValueToVirtualRegister(V, VMI->second);
1035 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1036 /// the current basic block, add it to ValueMap now so that we'll get a
1038 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
1039 // No need to export constants.
1040 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1042 // Already exported?
1043 if (FuncInfo.isExportedInst(V)) return;
1045 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1046 CopyValueToVirtualRegister(V, Reg);
1049 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
1050 const BasicBlock *FromBB) {
1051 // The operands of the setcc have to be in this block. We don't know
1052 // how to export them from some other block.
1053 if (Instruction *VI = dyn_cast<Instruction>(V)) {
1054 // Can export from current BB.
1055 if (VI->getParent() == FromBB)
1058 // Is already exported, noop.
1059 return FuncInfo.isExportedInst(V);
1062 // If this is an argument, we can export it if the BB is the entry block or
1063 // if it is already exported.
1064 if (isa<Argument>(V)) {
1065 if (FromBB == &FromBB->getParent()->getEntryBlock())
1068 // Otherwise, can only export this if it is already exported.
1069 return FuncInfo.isExportedInst(V);
1072 // Otherwise, constants can always be exported.
1076 static bool InBlock(const Value *V, const BasicBlock *BB) {
1077 if (const Instruction *I = dyn_cast<Instruction>(V))
1078 return I->getParent() == BB;
1082 /// getFCmpCondCode - Return the ISD condition code corresponding to
1083 /// the given LLVM IR floating-point condition code. This includes
1084 /// consideration of global floating-point math flags.
1086 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1087 ISD::CondCode FPC, FOC;
1089 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1090 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1091 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1092 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1093 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1094 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1095 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1096 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1097 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1098 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1099 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1100 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1101 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1102 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1103 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1104 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1106 assert(0 && "Invalid FCmp predicate opcode!");
1107 FOC = FPC = ISD::SETFALSE;
1110 if (FiniteOnlyFPMath())
1116 /// getICmpCondCode - Return the ISD condition code corresponding to
1117 /// the given LLVM IR integer condition code.
1119 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1121 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1122 case ICmpInst::ICMP_NE: return ISD::SETNE;
1123 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1124 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1125 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1126 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1127 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1128 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1129 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1130 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1132 assert(0 && "Invalid ICmp predicate opcode!");
1137 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1138 /// This function emits a branch and is used at the leaves of an OR or an
1139 /// AND operator tree.
1142 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1143 MachineBasicBlock *TBB,
1144 MachineBasicBlock *FBB,
1145 MachineBasicBlock *CurBB) {
1146 const BasicBlock *BB = CurBB->getBasicBlock();
1148 // If the leaf of the tree is a comparison, merge the condition into
1150 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1151 // The operands of the cmp have to be in this block. We don't know
1152 // how to export them from some other block. If this is the first block
1153 // of the sequence, no exporting is needed.
1154 if (CurBB == CurMBB ||
1155 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1156 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1157 ISD::CondCode Condition;
1158 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1159 Condition = getICmpCondCode(IC->getPredicate());
1160 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1161 Condition = getFCmpCondCode(FC->getPredicate());
1163 Condition = ISD::SETEQ; // silence warning.
1164 assert(0 && "Unknown compare instruction");
1167 CaseBlock CB(Condition, BOp->getOperand(0),
1168 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1169 SwitchCases.push_back(CB);
1174 // Create a CaseBlock record representing this branch.
1175 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
1176 NULL, TBB, FBB, CurBB);
1177 SwitchCases.push_back(CB);
1180 /// FindMergedConditions - If Cond is an expression like
1181 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1182 MachineBasicBlock *TBB,
1183 MachineBasicBlock *FBB,
1184 MachineBasicBlock *CurBB,
1186 // If this node is not part of the or/and tree, emit it as a branch.
1187 Instruction *BOp = dyn_cast<Instruction>(Cond);
1188 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1189 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1190 BOp->getParent() != CurBB->getBasicBlock() ||
1191 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1192 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1193 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1197 // Create TmpBB after CurBB.
1198 MachineFunction::iterator BBI = CurBB;
1199 MachineFunction &MF = DAG.getMachineFunction();
1200 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1201 CurBB->getParent()->insert(++BBI, TmpBB);
1203 if (Opc == Instruction::Or) {
1204 // Codegen X | Y as:
1212 // Emit the LHS condition.
1213 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1215 // Emit the RHS condition into TmpBB.
1216 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1218 assert(Opc == Instruction::And && "Unknown merge op!");
1219 // Codegen X & Y as:
1226 // This requires creation of TmpBB after CurBB.
1228 // Emit the LHS condition.
1229 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1231 // Emit the RHS condition into TmpBB.
1232 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1236 /// If the set of cases should be emitted as a series of branches, return true.
1237 /// If we should emit this as a bunch of and/or'd together conditions, return
1240 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1241 if (Cases.size() != 2) return true;
1243 // If this is two comparisons of the same values or'd or and'd together, they
1244 // will get folded into a single comparison, so don't emit two blocks.
1245 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1246 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1247 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1248 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1255 void SelectionDAGLowering::visitBr(BranchInst &I) {
1256 // Update machine-CFG edges.
1257 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1259 // Figure out which block is immediately after the current one.
1260 MachineBasicBlock *NextBlock = 0;
1261 MachineFunction::iterator BBI = CurMBB;
1262 if (++BBI != CurMBB->getParent()->end())
1265 if (I.isUnconditional()) {
1266 // Update machine-CFG edges.
1267 CurMBB->addSuccessor(Succ0MBB);
1269 // If this is not a fall-through branch, emit the branch.
1270 if (Succ0MBB != NextBlock)
1271 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1272 MVT::Other, getControlRoot(),
1273 DAG.getBasicBlock(Succ0MBB)));
1277 // If this condition is one of the special cases we handle, do special stuff
1279 Value *CondVal = I.getCondition();
1280 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1282 // If this is a series of conditions that are or'd or and'd together, emit
1283 // this as a sequence of branches instead of setcc's with and/or operations.
1284 // For example, instead of something like:
1297 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1298 if (BOp->hasOneUse() &&
1299 (BOp->getOpcode() == Instruction::And ||
1300 BOp->getOpcode() == Instruction::Or)) {
1301 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1302 // If the compares in later blocks need to use values not currently
1303 // exported from this block, export them now. This block should always
1304 // be the first entry.
1305 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1307 // Allow some cases to be rejected.
1308 if (ShouldEmitAsBranches(SwitchCases)) {
1309 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1310 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1311 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1314 // Emit the branch for this block.
1315 visitSwitchCase(SwitchCases[0]);
1316 SwitchCases.erase(SwitchCases.begin());
1320 // Okay, we decided not to do this, remove any inserted MBB's and clear
1322 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1323 CurMBB->getParent()->erase(SwitchCases[i].ThisBB);
1325 SwitchCases.clear();
1329 // Create a CaseBlock record representing this branch.
1330 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
1331 NULL, Succ0MBB, Succ1MBB, CurMBB);
1332 // Use visitSwitchCase to actually insert the fast branch sequence for this
1334 visitSwitchCase(CB);
1337 /// visitSwitchCase - Emits the necessary code to represent a single node in
1338 /// the binary search tree resulting from lowering a switch instruction.
1339 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1341 SDValue CondLHS = getValue(CB.CmpLHS);
1342 DebugLoc dl = getCurDebugLoc();
1344 // Build the setcc now.
1345 if (CB.CmpMHS == NULL) {
1346 // Fold "(X == true)" to X and "(X == false)" to !X to
1347 // handle common cases produced by branch lowering.
1348 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
1350 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
1351 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1352 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1354 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1356 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1358 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1359 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1361 SDValue CmpOp = getValue(CB.CmpMHS);
1362 MVT VT = CmpOp.getValueType();
1364 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1365 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1368 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1369 VT, CmpOp, DAG.getConstant(Low, VT));
1370 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1371 DAG.getConstant(High-Low, VT), ISD::SETULE);
1375 // Update successor info
1376 CurMBB->addSuccessor(CB.TrueBB);
1377 CurMBB->addSuccessor(CB.FalseBB);
1379 // Set NextBlock to be the MBB immediately after the current one, if any.
1380 // This is used to avoid emitting unnecessary branches to the next block.
1381 MachineBasicBlock *NextBlock = 0;
1382 MachineFunction::iterator BBI = CurMBB;
1383 if (++BBI != CurMBB->getParent()->end())
1386 // If the lhs block is the next block, invert the condition so that we can
1387 // fall through to the lhs instead of the rhs block.
1388 if (CB.TrueBB == NextBlock) {
1389 std::swap(CB.TrueBB, CB.FalseBB);
1390 SDValue True = DAG.getConstant(1, Cond.getValueType());
1391 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1393 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1394 MVT::Other, getControlRoot(), Cond,
1395 DAG.getBasicBlock(CB.TrueBB));
1397 // If the branch was constant folded, fix up the CFG.
1398 if (BrCond.getOpcode() == ISD::BR) {
1399 CurMBB->removeSuccessor(CB.FalseBB);
1400 DAG.setRoot(BrCond);
1402 // Otherwise, go ahead and insert the false branch.
1403 if (BrCond == getControlRoot())
1404 CurMBB->removeSuccessor(CB.TrueBB);
1406 if (CB.FalseBB == NextBlock)
1407 DAG.setRoot(BrCond);
1409 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1410 DAG.getBasicBlock(CB.FalseBB)));
1414 /// visitJumpTable - Emit JumpTable node in the current MBB
1415 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1416 // Emit the code for the jump table
1417 assert(JT.Reg != -1U && "Should lower JT Header first!");
1418 MVT PTy = TLI.getPointerTy();
1419 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1421 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1422 DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1423 MVT::Other, Index.getValue(1),
1427 /// visitJumpTableHeader - This function emits necessary code to produce index
1428 /// in the JumpTable from switch case.
1429 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1430 JumpTableHeader &JTH) {
1431 // Subtract the lowest switch case value from the value being switched on and
1432 // conditional branch to default mbb if the result is greater than the
1433 // difference between smallest and largest cases.
1434 SDValue SwitchOp = getValue(JTH.SValue);
1435 MVT VT = SwitchOp.getValueType();
1436 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1437 DAG.getConstant(JTH.First, VT));
1439 // The SDNode we just created, which holds the value being switched on minus
1440 // the the smallest case value, needs to be copied to a virtual register so it
1441 // can be used as an index into the jump table in a subsequent basic block.
1442 // This value may be smaller or larger than the target's pointer type, and
1443 // therefore require extension or truncating.
1444 if (VT.bitsGT(TLI.getPointerTy()))
1445 SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1446 TLI.getPointerTy(), SUB);
1448 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1449 TLI.getPointerTy(), SUB);
1451 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1452 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1453 JumpTableReg, SwitchOp);
1454 JT.Reg = JumpTableReg;
1456 // Emit the range check for the jump table, and branch to the default block
1457 // for the switch statement if the value being switched on exceeds the largest
1458 // case in the switch.
1459 SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1460 TLI.getSetCCResultType(SUB.getValueType()), SUB,
1461 DAG.getConstant(JTH.Last-JTH.First,VT),
1464 // Set NextBlock to be the MBB immediately after the current one, if any.
1465 // This is used to avoid emitting unnecessary branches to the next block.
1466 MachineBasicBlock *NextBlock = 0;
1467 MachineFunction::iterator BBI = CurMBB;
1468 if (++BBI != CurMBB->getParent()->end())
1471 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1472 MVT::Other, CopyTo, CMP,
1473 DAG.getBasicBlock(JT.Default));
1475 if (JT.MBB == NextBlock)
1476 DAG.setRoot(BrCond);
1478 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1479 DAG.getBasicBlock(JT.MBB)));
1482 /// visitBitTestHeader - This function emits necessary code to produce value
1483 /// suitable for "bit tests"
1484 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1485 // Subtract the minimum value
1486 SDValue SwitchOp = getValue(B.SValue);
1487 MVT VT = SwitchOp.getValueType();
1488 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1489 DAG.getConstant(B.First, VT));
1492 SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1493 TLI.getSetCCResultType(SUB.getValueType()),
1494 SUB, DAG.getConstant(B.Range, VT),
1498 if (VT.bitsGT(TLI.getPointerTy()))
1499 ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1500 TLI.getPointerTy(), SUB);
1502 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1503 TLI.getPointerTy(), SUB);
1505 B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1506 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1509 // Set NextBlock to be the MBB immediately after the current one, if any.
1510 // This is used to avoid emitting unnecessary branches to the next block.
1511 MachineBasicBlock *NextBlock = 0;
1512 MachineFunction::iterator BBI = CurMBB;
1513 if (++BBI != CurMBB->getParent()->end())
1516 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1518 CurMBB->addSuccessor(B.Default);
1519 CurMBB->addSuccessor(MBB);
1521 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1522 MVT::Other, CopyTo, RangeCmp,
1523 DAG.getBasicBlock(B.Default));
1525 if (MBB == NextBlock)
1526 DAG.setRoot(BrRange);
1528 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1529 DAG.getBasicBlock(MBB)));
1532 /// visitBitTestCase - this function produces one "bit test"
1533 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1536 // Make desired shift
1537 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1538 TLI.getPointerTy());
1539 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1541 DAG.getConstant(1, TLI.getPointerTy()),
1544 // Emit bit tests and jumps
1545 SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1546 TLI.getPointerTy(), SwitchVal,
1547 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1548 SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1549 TLI.getSetCCResultType(AndOp.getValueType()),
1550 AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1553 CurMBB->addSuccessor(B.TargetBB);
1554 CurMBB->addSuccessor(NextMBB);
1556 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1557 MVT::Other, getControlRoot(),
1558 AndCmp, DAG.getBasicBlock(B.TargetBB));
1560 // Set NextBlock to be the MBB immediately after the current one, if any.
1561 // This is used to avoid emitting unnecessary branches to the next block.
1562 MachineBasicBlock *NextBlock = 0;
1563 MachineFunction::iterator BBI = CurMBB;
1564 if (++BBI != CurMBB->getParent()->end())
1567 if (NextMBB == NextBlock)
1570 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1571 DAG.getBasicBlock(NextMBB)));
1574 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1575 // Retrieve successors.
1576 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1577 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1579 const Value *Callee(I.getCalledValue());
1580 if (isa<InlineAsm>(Callee))
1583 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1585 // If the value of the invoke is used outside of its defining block, make it
1586 // available as a virtual register.
1587 CopyToExportRegsIfNeeded(&I);
1589 // Update successor info
1590 CurMBB->addSuccessor(Return);
1591 CurMBB->addSuccessor(LandingPad);
1593 // Drop into normal successor.
1594 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1595 MVT::Other, getControlRoot(),
1596 DAG.getBasicBlock(Return)));
1599 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1602 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1603 /// small case ranges).
1604 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1605 CaseRecVector& WorkList,
1607 MachineBasicBlock* Default) {
1608 Case& BackCase = *(CR.Range.second-1);
1610 // Size is the number of Cases represented by this range.
1611 size_t Size = CR.Range.second - CR.Range.first;
1615 // Get the MachineFunction which holds the current MBB. This is used when
1616 // inserting any additional MBBs necessary to represent the switch.
1617 MachineFunction *CurMF = CurMBB->getParent();
1619 // Figure out which block is immediately after the current one.
1620 MachineBasicBlock *NextBlock = 0;
1621 MachineFunction::iterator BBI = CR.CaseBB;
1623 if (++BBI != CurMBB->getParent()->end())
1626 // TODO: If any two of the cases has the same destination, and if one value
1627 // is the same as the other, but has one bit unset that the other has set,
1628 // use bit manipulation to do two compares at once. For example:
1629 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1631 // Rearrange the case blocks so that the last one falls through if possible.
1632 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1633 // The last case block won't fall through into 'NextBlock' if we emit the
1634 // branches in this order. See if rearranging a case value would help.
1635 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1636 if (I->BB == NextBlock) {
1637 std::swap(*I, BackCase);
1643 // Create a CaseBlock record representing a conditional branch to
1644 // the Case's target mbb if the value being switched on SV is equal
1646 MachineBasicBlock *CurBlock = CR.CaseBB;
1647 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1648 MachineBasicBlock *FallThrough;
1650 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1651 CurMF->insert(BBI, FallThrough);
1653 // Put SV in a virtual register to make it available from the new blocks.
1654 ExportFromCurrentBlock(SV);
1656 // If the last case doesn't match, go to the default block.
1657 FallThrough = Default;
1660 Value *RHS, *LHS, *MHS;
1662 if (I->High == I->Low) {
1663 // This is just small small case range :) containing exactly 1 case
1665 LHS = SV; RHS = I->High; MHS = NULL;
1668 LHS = I->Low; MHS = SV; RHS = I->High;
1670 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1672 // If emitting the first comparison, just call visitSwitchCase to emit the
1673 // code into the current block. Otherwise, push the CaseBlock onto the
1674 // vector to be later processed by SDISel, and insert the node's MBB
1675 // before the next MBB.
1676 if (CurBlock == CurMBB)
1677 visitSwitchCase(CB);
1679 SwitchCases.push_back(CB);
1681 CurBlock = FallThrough;
1687 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1688 return !DisableJumpTables &&
1689 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1690 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1693 static APInt ComputeRange(const APInt &First, const APInt &Last) {
1694 APInt LastExt(Last), FirstExt(First);
1695 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1696 LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1697 return (LastExt - FirstExt + 1ULL);
1700 /// handleJTSwitchCase - Emit jumptable for current switch case range
1701 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1702 CaseRecVector& WorkList,
1704 MachineBasicBlock* Default) {
1705 Case& FrontCase = *CR.Range.first;
1706 Case& BackCase = *(CR.Range.second-1);
1708 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1709 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1712 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1716 if (!areJTsAllowed(TLI) || TSize <= 3)
1719 APInt Range = ComputeRange(First, Last);
1720 double Density = (double)TSize / Range.roundToDouble();
1724 DEBUG(errs() << "Lowering jump table\n"
1725 << "First entry: " << First << ". Last entry: " << Last << '\n'
1726 << "Range: " << Range
1727 << "Size: " << TSize << ". Density: " << Density << "\n\n");
1729 // Get the MachineFunction which holds the current MBB. This is used when
1730 // inserting any additional MBBs necessary to represent the switch.
1731 MachineFunction *CurMF = CurMBB->getParent();
1733 // Figure out which block is immediately after the current one.
1734 MachineBasicBlock *NextBlock = 0;
1735 MachineFunction::iterator BBI = CR.CaseBB;
1737 if (++BBI != CurMBB->getParent()->end())
1740 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1742 // Create a new basic block to hold the code for loading the address
1743 // of the jump table, and jumping to it. Update successor information;
1744 // we will either branch to the default case for the switch, or the jump
1746 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1747 CurMF->insert(BBI, JumpTableBB);
1748 CR.CaseBB->addSuccessor(Default);
1749 CR.CaseBB->addSuccessor(JumpTableBB);
1751 // Build a vector of destination BBs, corresponding to each target
1752 // of the jump table. If the value of the jump table slot corresponds to
1753 // a case statement, push the case's BB onto the vector, otherwise, push
1755 std::vector<MachineBasicBlock*> DestBBs;
1757 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1758 const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1759 const APInt& High = cast<ConstantInt>(I->High)->getValue();
1761 if (Low.sle(TEI) && TEI.sle(High)) {
1762 DestBBs.push_back(I->BB);
1766 DestBBs.push_back(Default);
1770 // Update successor info. Add one edge to each unique successor.
1771 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1772 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1773 E = DestBBs.end(); I != E; ++I) {
1774 if (!SuccsHandled[(*I)->getNumber()]) {
1775 SuccsHandled[(*I)->getNumber()] = true;
1776 JumpTableBB->addSuccessor(*I);
1780 // Create a jump table index for this jump table, or return an existing
1782 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1784 // Set the jump table information so that we can codegen it as a second
1785 // MachineBasicBlock
1786 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1787 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1788 if (CR.CaseBB == CurMBB)
1789 visitJumpTableHeader(JT, JTH);
1791 JTCases.push_back(JumpTableBlock(JTH, JT));
1796 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1798 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1799 CaseRecVector& WorkList,
1801 MachineBasicBlock* Default) {
1802 // Get the MachineFunction which holds the current MBB. This is used when
1803 // inserting any additional MBBs necessary to represent the switch.
1804 MachineFunction *CurMF = CurMBB->getParent();
1806 // Figure out which block is immediately after the current one.
1807 MachineBasicBlock *NextBlock = 0;
1808 MachineFunction::iterator BBI = CR.CaseBB;
1810 if (++BBI != CurMBB->getParent()->end())
1813 Case& FrontCase = *CR.Range.first;
1814 Case& BackCase = *(CR.Range.second-1);
1815 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1817 // Size is the number of Cases represented by this range.
1818 unsigned Size = CR.Range.second - CR.Range.first;
1820 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1821 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1823 CaseItr Pivot = CR.Range.first + Size/2;
1825 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1826 // (heuristically) allow us to emit JumpTable's later.
1828 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1832 size_t LSize = FrontCase.size();
1833 size_t RSize = TSize-LSize;
1834 DEBUG(errs() << "Selecting best pivot: \n"
1835 << "First: " << First << ", Last: " << Last <<'\n'
1836 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1837 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1839 const APInt& LEnd = cast<ConstantInt>(I->High)->getValue();
1840 const APInt& RBegin = cast<ConstantInt>(J->Low)->getValue();
1841 APInt Range = ComputeRange(LEnd, RBegin);
1842 assert((Range - 2ULL).isNonNegative() &&
1843 "Invalid case distance");
1844 double LDensity = (double)LSize / (LEnd - First + 1ULL).roundToDouble();
1845 double RDensity = (double)RSize / (Last - RBegin + 1ULL).roundToDouble();
1846 double Metric = Range.logBase2()*(LDensity+RDensity);
1847 // Should always split in some non-trivial place
1848 DEBUG(errs() <<"=>Step\n"
1849 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1850 << "LDensity: " << LDensity
1851 << ", RDensity: " << RDensity << '\n'
1852 << "Metric: " << Metric << '\n');
1853 if (FMetric < Metric) {
1856 DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1862 if (areJTsAllowed(TLI)) {
1863 // If our case is dense we *really* should handle it earlier!
1864 assert((FMetric > 0) && "Should handle dense range earlier!");
1866 Pivot = CR.Range.first + Size/2;
1869 CaseRange LHSR(CR.Range.first, Pivot);
1870 CaseRange RHSR(Pivot, CR.Range.second);
1871 Constant *C = Pivot->Low;
1872 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1874 // We know that we branch to the LHS if the Value being switched on is
1875 // less than the Pivot value, C. We use this to optimize our binary
1876 // tree a bit, by recognizing that if SV is greater than or equal to the
1877 // LHS's Case Value, and that Case Value is exactly one less than the
1878 // Pivot's Value, then we can branch directly to the LHS's Target,
1879 // rather than creating a leaf node for it.
1880 if ((LHSR.second - LHSR.first) == 1 &&
1881 LHSR.first->High == CR.GE &&
1882 cast<ConstantInt>(C)->getValue() ==
1883 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1884 TrueBB = LHSR.first->BB;
1886 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1887 CurMF->insert(BBI, TrueBB);
1888 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1890 // Put SV in a virtual register to make it available from the new blocks.
1891 ExportFromCurrentBlock(SV);
1894 // Similar to the optimization above, if the Value being switched on is
1895 // known to be less than the Constant CR.LT, and the current Case Value
1896 // is CR.LT - 1, then we can branch directly to the target block for
1897 // the current Case Value, rather than emitting a RHS leaf node for it.
1898 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1899 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1900 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1901 FalseBB = RHSR.first->BB;
1903 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1904 CurMF->insert(BBI, FalseBB);
1905 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1907 // Put SV in a virtual register to make it available from the new blocks.
1908 ExportFromCurrentBlock(SV);
1911 // Create a CaseBlock record representing a conditional branch to
1912 // the LHS node if the value being switched on SV is less than C.
1913 // Otherwise, branch to LHS.
1914 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1916 if (CR.CaseBB == CurMBB)
1917 visitSwitchCase(CB);
1919 SwitchCases.push_back(CB);
1924 /// handleBitTestsSwitchCase - if current case range has few destination and
1925 /// range span less, than machine word bitwidth, encode case range into series
1926 /// of masks and emit bit tests with these masks.
1927 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1928 CaseRecVector& WorkList,
1930 MachineBasicBlock* Default){
1931 unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits();
1933 Case& FrontCase = *CR.Range.first;
1934 Case& BackCase = *(CR.Range.second-1);
1936 // Get the MachineFunction which holds the current MBB. This is used when
1937 // inserting any additional MBBs necessary to represent the switch.
1938 MachineFunction *CurMF = CurMBB->getParent();
1940 // If target does not have legal shift left, do not emit bit tests at all.
1941 if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1945 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1947 // Single case counts one, case range - two.
1948 numCmps += (I->Low == I->High ? 1 : 2);
1951 // Count unique destinations
1952 SmallSet<MachineBasicBlock*, 4> Dests;
1953 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1954 Dests.insert(I->BB);
1955 if (Dests.size() > 3)
1956 // Don't bother the code below, if there are too much unique destinations
1959 DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1960 << "Total number of comparisons: " << numCmps << '\n');
1962 // Compute span of values.
1963 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1964 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1965 APInt cmpRange = maxValue - minValue;
1967 DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1968 << "Low bound: " << minValue << '\n'
1969 << "High bound: " << maxValue << '\n');
1971 if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1972 (!(Dests.size() == 1 && numCmps >= 3) &&
1973 !(Dests.size() == 2 && numCmps >= 5) &&
1974 !(Dests.size() >= 3 && numCmps >= 6)))
1977 DEBUG(errs() << "Emitting bit tests\n");
1978 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1980 // Optimize the case where all the case values fit in a
1981 // word without having to subtract minValue. In this case,
1982 // we can optimize away the subtraction.
1983 if (minValue.isNonNegative() &&
1984 maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1985 cmpRange = maxValue;
1987 lowBound = minValue;
1990 CaseBitsVector CasesBits;
1991 unsigned i, count = 0;
1993 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1994 MachineBasicBlock* Dest = I->BB;
1995 for (i = 0; i < count; ++i)
1996 if (Dest == CasesBits[i].BB)
2000 assert((count < 3) && "Too much destinations to test!");
2001 CasesBits.push_back(CaseBits(0, Dest, 0));
2005 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
2006 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
2008 uint64_t lo = (lowValue - lowBound).getZExtValue();
2009 uint64_t hi = (highValue - lowBound).getZExtValue();
2011 for (uint64_t j = lo; j <= hi; j++) {
2012 CasesBits[i].Mask |= 1ULL << j;
2013 CasesBits[i].Bits++;
2017 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
2021 // Figure out which block is immediately after the current one.
2022 MachineFunction::iterator BBI = CR.CaseBB;
2025 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2027 DEBUG(errs() << "Cases:\n");
2028 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2029 DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2030 << ", Bits: " << CasesBits[i].Bits
2031 << ", BB: " << CasesBits[i].BB << '\n');
2033 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2034 CurMF->insert(BBI, CaseBB);
2035 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2039 // Put SV in a virtual register to make it available from the new blocks.
2040 ExportFromCurrentBlock(SV);
2043 BitTestBlock BTB(lowBound, cmpRange, SV,
2044 -1U, (CR.CaseBB == CurMBB),
2045 CR.CaseBB, Default, BTC);
2047 if (CR.CaseBB == CurMBB)
2048 visitBitTestHeader(BTB);
2050 BitTestCases.push_back(BTB);
2056 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2057 size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
2058 const SwitchInst& SI) {
2061 // Start with "simple" cases
2062 for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2063 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2064 Cases.push_back(Case(SI.getSuccessorValue(i),
2065 SI.getSuccessorValue(i),
2068 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2070 // Merge case into clusters
2071 if (Cases.size() >= 2)
2072 // Must recompute end() each iteration because it may be
2073 // invalidated by erase if we hold on to it
2074 for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2075 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2076 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2077 MachineBasicBlock* nextBB = J->BB;
2078 MachineBasicBlock* currentBB = I->BB;
2080 // If the two neighboring cases go to the same destination, merge them
2081 // into a single case.
2082 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2090 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2091 if (I->Low != I->High)
2092 // A range counts double, since it requires two compares.
2099 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
2100 // Figure out which block is immediately after the current one.
2101 MachineBasicBlock *NextBlock = 0;
2102 MachineFunction::iterator BBI = CurMBB;
2104 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2106 // If there is only the default destination, branch to it if it is not the
2107 // next basic block. Otherwise, just fall through.
2108 if (SI.getNumOperands() == 2) {
2109 // Update machine-CFG edges.
2111 // If this is not a fall-through branch, emit the branch.
2112 CurMBB->addSuccessor(Default);
2113 if (Default != NextBlock)
2114 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2115 MVT::Other, getControlRoot(),
2116 DAG.getBasicBlock(Default)));
2120 // If there are any non-default case statements, create a vector of Cases
2121 // representing each one, and sort the vector so that we can efficiently
2122 // create a binary search tree from them.
2124 size_t numCmps = Clusterify(Cases, SI);
2125 DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2126 << ". Total compares: " << numCmps << '\n');
2129 // Get the Value to be switched on and default basic blocks, which will be
2130 // inserted into CaseBlock records, representing basic blocks in the binary
2132 Value *SV = SI.getOperand(0);
2134 // Push the initial CaseRec onto the worklist
2135 CaseRecVector WorkList;
2136 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2138 while (!WorkList.empty()) {
2139 // Grab a record representing a case range to process off the worklist
2140 CaseRec CR = WorkList.back();
2141 WorkList.pop_back();
2143 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2146 // If the range has few cases (two or less) emit a series of specific
2148 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2151 // If the switch has more than 5 blocks, and at least 40% dense, and the
2152 // target supports indirect branches, then emit a jump table rather than
2153 // lowering the switch to a binary tree of conditional branches.
2154 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2157 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2158 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2159 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2164 void SelectionDAGLowering::visitSub(User &I) {
2165 // -0.0 - X --> fneg
2166 const Type *Ty = I.getType();
2167 if (isa<VectorType>(Ty)) {
2168 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2169 const VectorType *DestTy = cast<VectorType>(I.getType());
2170 const Type *ElTy = DestTy->getElementType();
2171 if (ElTy->isFloatingPoint()) {
2172 unsigned VL = DestTy->getNumElements();
2173 std::vector<Constant*> NZ(VL, ConstantFP::getNegativeZero(ElTy));
2174 Constant *CNZ = ConstantVector::get(&NZ[0], NZ.size());
2176 SDValue Op2 = getValue(I.getOperand(1));
2177 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2178 Op2.getValueType(), Op2));
2184 if (Ty->isFloatingPoint()) {
2185 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2186 if (CFP->isExactlyValue(ConstantFP::getNegativeZero(Ty)->getValueAPF())) {
2187 SDValue Op2 = getValue(I.getOperand(1));
2188 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2189 Op2.getValueType(), Op2));
2194 visitBinary(I, Ty->isFPOrFPVector() ? ISD::FSUB : ISD::SUB);
2197 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2198 SDValue Op1 = getValue(I.getOperand(0));
2199 SDValue Op2 = getValue(I.getOperand(1));
2201 setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2202 Op1.getValueType(), Op1, Op2));
2205 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2206 SDValue Op1 = getValue(I.getOperand(0));
2207 SDValue Op2 = getValue(I.getOperand(1));
2208 if (!isa<VectorType>(I.getType()) &&
2209 Op2.getValueType() != TLI.getShiftAmountTy()) {
2210 // If the operand is smaller than the shift count type, promote it.
2211 if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
2212 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2213 TLI.getShiftAmountTy(), Op2);
2214 // If the operand is larger than the shift count type but the shift
2215 // count type has enough bits to represent any shift value, truncate
2216 // it now. This is a common case and it exposes the truncate to
2217 // optimization early.
2218 else if (TLI.getShiftAmountTy().getSizeInBits() >=
2219 Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2220 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2221 TLI.getShiftAmountTy(), Op2);
2222 // Otherwise we'll need to temporarily settle for some other
2223 // convenient type; type legalization will make adjustments as
2225 else if (TLI.getPointerTy().bitsLT(Op2.getValueType()))
2226 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2227 TLI.getPointerTy(), Op2);
2228 else if (TLI.getPointerTy().bitsGT(Op2.getValueType()))
2229 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2230 TLI.getPointerTy(), Op2);
2233 setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2234 Op1.getValueType(), Op1, Op2));
2237 void SelectionDAGLowering::visitICmp(User &I) {
2238 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2239 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2240 predicate = IC->getPredicate();
2241 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2242 predicate = ICmpInst::Predicate(IC->getPredicate());
2243 SDValue Op1 = getValue(I.getOperand(0));
2244 SDValue Op2 = getValue(I.getOperand(1));
2245 ISD::CondCode Opcode = getICmpCondCode(predicate);
2246 setValue(&I, DAG.getSetCC(getCurDebugLoc(),MVT::i1, Op1, Op2, Opcode));
2249 void SelectionDAGLowering::visitFCmp(User &I) {
2250 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2251 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2252 predicate = FC->getPredicate();
2253 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2254 predicate = FCmpInst::Predicate(FC->getPredicate());
2255 SDValue Op1 = getValue(I.getOperand(0));
2256 SDValue Op2 = getValue(I.getOperand(1));
2257 ISD::CondCode Condition = getFCmpCondCode(predicate);
2258 setValue(&I, DAG.getSetCC(getCurDebugLoc(), MVT::i1, Op1, Op2, Condition));
2261 void SelectionDAGLowering::visitVICmp(User &I) {
2262 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2263 if (VICmpInst *IC = dyn_cast<VICmpInst>(&I))
2264 predicate = IC->getPredicate();
2265 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2266 predicate = ICmpInst::Predicate(IC->getPredicate());
2267 SDValue Op1 = getValue(I.getOperand(0));
2268 SDValue Op2 = getValue(I.getOperand(1));
2269 ISD::CondCode Opcode = getICmpCondCode(predicate);
2270 setValue(&I, DAG.getVSetCC(getCurDebugLoc(), Op1.getValueType(),
2274 void SelectionDAGLowering::visitVFCmp(User &I) {
2275 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2276 if (VFCmpInst *FC = dyn_cast<VFCmpInst>(&I))
2277 predicate = FC->getPredicate();
2278 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2279 predicate = FCmpInst::Predicate(FC->getPredicate());
2280 SDValue Op1 = getValue(I.getOperand(0));
2281 SDValue Op2 = getValue(I.getOperand(1));
2282 ISD::CondCode Condition = getFCmpCondCode(predicate);
2283 MVT DestVT = TLI.getValueType(I.getType());
2285 setValue(&I, DAG.getVSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2288 void SelectionDAGLowering::visitSelect(User &I) {
2289 SmallVector<MVT, 4> ValueVTs;
2290 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2291 unsigned NumValues = ValueVTs.size();
2292 if (NumValues != 0) {
2293 SmallVector<SDValue, 4> Values(NumValues);
2294 SDValue Cond = getValue(I.getOperand(0));
2295 SDValue TrueVal = getValue(I.getOperand(1));
2296 SDValue FalseVal = getValue(I.getOperand(2));
2298 for (unsigned i = 0; i != NumValues; ++i)
2299 Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2300 TrueVal.getValueType(), Cond,
2301 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2302 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2304 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2305 DAG.getVTList(&ValueVTs[0], NumValues),
2306 &Values[0], NumValues));
2311 void SelectionDAGLowering::visitTrunc(User &I) {
2312 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2313 SDValue N = getValue(I.getOperand(0));
2314 MVT DestVT = TLI.getValueType(I.getType());
2315 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2318 void SelectionDAGLowering::visitZExt(User &I) {
2319 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2320 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2321 SDValue N = getValue(I.getOperand(0));
2322 MVT DestVT = TLI.getValueType(I.getType());
2323 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2326 void SelectionDAGLowering::visitSExt(User &I) {
2327 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2328 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2329 SDValue N = getValue(I.getOperand(0));
2330 MVT DestVT = TLI.getValueType(I.getType());
2331 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2334 void SelectionDAGLowering::visitFPTrunc(User &I) {
2335 // FPTrunc is never a no-op cast, no need to check
2336 SDValue N = getValue(I.getOperand(0));
2337 MVT DestVT = TLI.getValueType(I.getType());
2338 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2339 DestVT, N, DAG.getIntPtrConstant(0)));
2342 void SelectionDAGLowering::visitFPExt(User &I){
2343 // FPTrunc is never a no-op cast, no need to check
2344 SDValue N = getValue(I.getOperand(0));
2345 MVT DestVT = TLI.getValueType(I.getType());
2346 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2349 void SelectionDAGLowering::visitFPToUI(User &I) {
2350 // FPToUI is never a no-op cast, no need to check
2351 SDValue N = getValue(I.getOperand(0));
2352 MVT DestVT = TLI.getValueType(I.getType());
2353 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2356 void SelectionDAGLowering::visitFPToSI(User &I) {
2357 // FPToSI is never a no-op cast, no need to check
2358 SDValue N = getValue(I.getOperand(0));
2359 MVT DestVT = TLI.getValueType(I.getType());
2360 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2363 void SelectionDAGLowering::visitUIToFP(User &I) {
2364 // UIToFP is never a no-op cast, no need to check
2365 SDValue N = getValue(I.getOperand(0));
2366 MVT DestVT = TLI.getValueType(I.getType());
2367 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2370 void SelectionDAGLowering::visitSIToFP(User &I){
2371 // SIToFP is never a no-op cast, no need to check
2372 SDValue N = getValue(I.getOperand(0));
2373 MVT DestVT = TLI.getValueType(I.getType());
2374 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2377 void SelectionDAGLowering::visitPtrToInt(User &I) {
2378 // What to do depends on the size of the integer and the size of the pointer.
2379 // We can either truncate, zero extend, or no-op, accordingly.
2380 SDValue N = getValue(I.getOperand(0));
2381 MVT SrcVT = N.getValueType();
2382 MVT DestVT = TLI.getValueType(I.getType());
2384 if (DestVT.bitsLT(SrcVT))
2385 Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2387 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2388 Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2389 setValue(&I, Result);
2392 void SelectionDAGLowering::visitIntToPtr(User &I) {
2393 // What to do depends on the size of the integer and the size of the pointer.
2394 // We can either truncate, zero extend, or no-op, accordingly.
2395 SDValue N = getValue(I.getOperand(0));
2396 MVT SrcVT = N.getValueType();
2397 MVT DestVT = TLI.getValueType(I.getType());
2398 if (DestVT.bitsLT(SrcVT))
2399 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2401 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2402 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2406 void SelectionDAGLowering::visitBitCast(User &I) {
2407 SDValue N = getValue(I.getOperand(0));
2408 MVT DestVT = TLI.getValueType(I.getType());
2410 // BitCast assures us that source and destination are the same size so this
2411 // is either a BIT_CONVERT or a no-op.
2412 if (DestVT != N.getValueType())
2413 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2414 DestVT, N)); // convert types
2416 setValue(&I, N); // noop cast.
2419 void SelectionDAGLowering::visitInsertElement(User &I) {
2420 SDValue InVec = getValue(I.getOperand(0));
2421 SDValue InVal = getValue(I.getOperand(1));
2422 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2424 getValue(I.getOperand(2)));
2426 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2427 TLI.getValueType(I.getType()),
2428 InVec, InVal, InIdx));
2431 void SelectionDAGLowering::visitExtractElement(User &I) {
2432 SDValue InVec = getValue(I.getOperand(0));
2433 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2435 getValue(I.getOperand(1)));
2436 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2437 TLI.getValueType(I.getType()), InVec, InIdx));
2441 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2442 // from SIndx and increasing to the element length (undefs are allowed).
2443 static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2444 unsigned MaskNumElts = Mask.size();
2445 for (unsigned i = 0; i != MaskNumElts; ++i)
2446 if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2451 void SelectionDAGLowering::visitShuffleVector(User &I) {
2452 SmallVector<int, 8> Mask;
2453 SDValue Src1 = getValue(I.getOperand(0));
2454 SDValue Src2 = getValue(I.getOperand(1));
2456 // Convert the ConstantVector mask operand into an array of ints, with -1
2457 // representing undef values.
2458 SmallVector<Constant*, 8> MaskElts;
2459 cast<Constant>(I.getOperand(2))->getVectorElements(MaskElts);
2460 unsigned MaskNumElts = MaskElts.size();
2461 for (unsigned i = 0; i != MaskNumElts; ++i) {
2462 if (isa<UndefValue>(MaskElts[i]))
2465 Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2468 MVT VT = TLI.getValueType(I.getType());
2469 MVT SrcVT = Src1.getValueType();
2470 unsigned SrcNumElts = SrcVT.getVectorNumElements();
2472 if (SrcNumElts == MaskNumElts) {
2473 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2478 // Normalize the shuffle vector since mask and vector length don't match.
2479 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2480 // Mask is longer than the source vectors and is a multiple of the source
2481 // vectors. We can use concatenate vector to make the mask and vectors
2483 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2484 // The shuffle is concatenating two vectors together.
2485 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2490 // Pad both vectors with undefs to make them the same length as the mask.
2491 unsigned NumConcat = MaskNumElts / SrcNumElts;
2492 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2493 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2494 SDValue UndefVal = DAG.getUNDEF(SrcVT);
2496 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2497 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2501 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2502 getCurDebugLoc(), VT,
2503 &MOps1[0], NumConcat);
2504 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2505 getCurDebugLoc(), VT,
2506 &MOps2[0], NumConcat);
2508 // Readjust mask for new input vector length.
2509 SmallVector<int, 8> MappedOps;
2510 for (unsigned i = 0; i != MaskNumElts; ++i) {
2512 if (Idx < (int)SrcNumElts)
2513 MappedOps.push_back(Idx);
2515 MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2517 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2522 if (SrcNumElts > MaskNumElts) {
2523 // Analyze the access pattern of the vector to see if we can extract
2524 // two subvectors and do the shuffle. The analysis is done by calculating
2525 // the range of elements the mask access on both vectors.
2526 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2527 int MaxRange[2] = {-1, -1};
2529 for (unsigned i = 0; i != MaskNumElts; ++i) {
2535 if (Idx >= (int)SrcNumElts) {
2539 if (Idx > MaxRange[Input])
2540 MaxRange[Input] = Idx;
2541 if (Idx < MinRange[Input])
2542 MinRange[Input] = Idx;
2545 // Check if the access is smaller than the vector size and can we find
2546 // a reasonable extract index.
2547 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2548 int StartIdx[2]; // StartIdx to extract from
2549 for (int Input=0; Input < 2; ++Input) {
2550 if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2551 RangeUse[Input] = 0; // Unused
2552 StartIdx[Input] = 0;
2553 } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2554 // Fits within range but we should see if we can find a good
2555 // start index that is a multiple of the mask length.
2556 if (MaxRange[Input] < (int)MaskNumElts) {
2557 RangeUse[Input] = 1; // Extract from beginning of the vector
2558 StartIdx[Input] = 0;
2560 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2561 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2562 StartIdx[Input] + MaskNumElts < SrcNumElts)
2563 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2568 if (RangeUse[0] == 0 && RangeUse[0] == 0) {
2569 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2572 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2573 // Extract appropriate subvector and generate a vector shuffle
2574 for (int Input=0; Input < 2; ++Input) {
2575 SDValue& Src = Input == 0 ? Src1 : Src2;
2576 if (RangeUse[Input] == 0) {
2577 Src = DAG.getUNDEF(VT);
2579 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2580 Src, DAG.getIntPtrConstant(StartIdx[Input]));
2583 // Calculate new mask.
2584 SmallVector<int, 8> MappedOps;
2585 for (unsigned i = 0; i != MaskNumElts; ++i) {
2588 MappedOps.push_back(Idx);
2589 else if (Idx < (int)SrcNumElts)
2590 MappedOps.push_back(Idx - StartIdx[0]);
2592 MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2594 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2600 // We can't use either concat vectors or extract subvectors so fall back to
2601 // replacing the shuffle with extract and build vector.
2602 // to insert and build vector.
2603 MVT EltVT = VT.getVectorElementType();
2604 MVT PtrVT = TLI.getPointerTy();
2605 SmallVector<SDValue,8> Ops;
2606 for (unsigned i = 0; i != MaskNumElts; ++i) {
2608 Ops.push_back(DAG.getUNDEF(EltVT));
2611 if (Idx < (int)SrcNumElts)
2612 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2613 EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2615 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2617 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2620 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2621 VT, &Ops[0], Ops.size()));
2624 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2625 const Value *Op0 = I.getOperand(0);
2626 const Value *Op1 = I.getOperand(1);
2627 const Type *AggTy = I.getType();
2628 const Type *ValTy = Op1->getType();
2629 bool IntoUndef = isa<UndefValue>(Op0);
2630 bool FromUndef = isa<UndefValue>(Op1);
2632 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2633 I.idx_begin(), I.idx_end());
2635 SmallVector<MVT, 4> AggValueVTs;
2636 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2637 SmallVector<MVT, 4> ValValueVTs;
2638 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2640 unsigned NumAggValues = AggValueVTs.size();
2641 unsigned NumValValues = ValValueVTs.size();
2642 SmallVector<SDValue, 4> Values(NumAggValues);
2644 SDValue Agg = getValue(Op0);
2645 SDValue Val = getValue(Op1);
2647 // Copy the beginning value(s) from the original aggregate.
2648 for (; i != LinearIndex; ++i)
2649 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2650 SDValue(Agg.getNode(), Agg.getResNo() + i);
2651 // Copy values from the inserted value(s).
2652 for (; i != LinearIndex + NumValValues; ++i)
2653 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2654 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2655 // Copy remaining value(s) from the original aggregate.
2656 for (; i != NumAggValues; ++i)
2657 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2658 SDValue(Agg.getNode(), Agg.getResNo() + i);
2660 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2661 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2662 &Values[0], NumAggValues));
2665 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2666 const Value *Op0 = I.getOperand(0);
2667 const Type *AggTy = Op0->getType();
2668 const Type *ValTy = I.getType();
2669 bool OutOfUndef = isa<UndefValue>(Op0);
2671 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2672 I.idx_begin(), I.idx_end());
2674 SmallVector<MVT, 4> ValValueVTs;
2675 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2677 unsigned NumValValues = ValValueVTs.size();
2678 SmallVector<SDValue, 4> Values(NumValValues);
2680 SDValue Agg = getValue(Op0);
2681 // Copy out the selected value(s).
2682 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2683 Values[i - LinearIndex] =
2685 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2686 SDValue(Agg.getNode(), Agg.getResNo() + i);
2688 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2689 DAG.getVTList(&ValValueVTs[0], NumValValues),
2690 &Values[0], NumValValues));
2694 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2695 SDValue N = getValue(I.getOperand(0));
2696 const Type *Ty = I.getOperand(0)->getType();
2698 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2701 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2702 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2705 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2706 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2707 DAG.getIntPtrConstant(Offset));
2709 Ty = StTy->getElementType(Field);
2711 Ty = cast<SequentialType>(Ty)->getElementType();
2713 // If this is a constant subscript, handle it quickly.
2714 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2715 if (CI->getZExtValue() == 0) continue;
2717 TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2719 unsigned PtrBits = TLI.getPointerTy().getSizeInBits();
2721 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2723 DAG.getConstant(Offs, MVT::i64));
2725 OffsVal = DAG.getIntPtrConstant(Offs);
2726 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2731 // N = N + Idx * ElementSize;
2732 uint64_t ElementSize = TD->getTypeAllocSize(Ty);
2733 SDValue IdxN = getValue(Idx);
2735 // If the index is smaller or larger than intptr_t, truncate or extend
2737 if (IdxN.getValueType().bitsLT(N.getValueType()))
2738 IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
2739 N.getValueType(), IdxN);
2740 else if (IdxN.getValueType().bitsGT(N.getValueType()))
2741 IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2742 N.getValueType(), IdxN);
2744 // If this is a multiply by a power of two, turn it into a shl
2745 // immediately. This is a very common case.
2746 if (ElementSize != 1) {
2747 if (isPowerOf2_64(ElementSize)) {
2748 unsigned Amt = Log2_64(ElementSize);
2749 IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2750 N.getValueType(), IdxN,
2751 DAG.getConstant(Amt, TLI.getPointerTy()));
2753 SDValue Scale = DAG.getIntPtrConstant(ElementSize);
2754 IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2755 N.getValueType(), IdxN, Scale);
2759 N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2760 N.getValueType(), N, IdxN);
2766 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2767 // If this is a fixed sized alloca in the entry block of the function,
2768 // allocate it statically on the stack.
2769 if (FuncInfo.StaticAllocaMap.count(&I))
2770 return; // getValue will auto-populate this.
2772 const Type *Ty = I.getAllocatedType();
2773 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2775 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2778 SDValue AllocSize = getValue(I.getArraySize());
2780 AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2782 DAG.getConstant(TySize, AllocSize.getValueType()));
2786 MVT IntPtr = TLI.getPointerTy();
2787 if (IntPtr.bitsLT(AllocSize.getValueType()))
2788 AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2790 else if (IntPtr.bitsGT(AllocSize.getValueType()))
2791 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2794 // Handle alignment. If the requested alignment is less than or equal to
2795 // the stack alignment, ignore it. If the size is greater than or equal to
2796 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2797 unsigned StackAlign =
2798 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2799 if (Align <= StackAlign)
2802 // Round the size of the allocation up to the stack alignment size
2803 // by add SA-1 to the size.
2804 AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2805 AllocSize.getValueType(), AllocSize,
2806 DAG.getIntPtrConstant(StackAlign-1));
2807 // Mask out the low bits for alignment purposes.
2808 AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2809 AllocSize.getValueType(), AllocSize,
2810 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2812 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2813 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2814 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2817 DAG.setRoot(DSA.getValue(1));
2819 // Inform the Frame Information that we have just allocated a variable-sized
2821 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
2824 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2825 const Value *SV = I.getOperand(0);
2826 SDValue Ptr = getValue(SV);
2828 const Type *Ty = I.getType();
2829 bool isVolatile = I.isVolatile();
2830 unsigned Alignment = I.getAlignment();
2832 SmallVector<MVT, 4> ValueVTs;
2833 SmallVector<uint64_t, 4> Offsets;
2834 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2835 unsigned NumValues = ValueVTs.size();
2840 bool ConstantMemory = false;
2842 // Serialize volatile loads with other side effects.
2844 else if (AA->pointsToConstantMemory(SV)) {
2845 // Do not serialize (non-volatile) loads of constant memory with anything.
2846 Root = DAG.getEntryNode();
2847 ConstantMemory = true;
2849 // Do not serialize non-volatile loads against each other.
2850 Root = DAG.getRoot();
2853 SmallVector<SDValue, 4> Values(NumValues);
2854 SmallVector<SDValue, 4> Chains(NumValues);
2855 MVT PtrVT = Ptr.getValueType();
2856 for (unsigned i = 0; i != NumValues; ++i) {
2857 SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2858 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2860 DAG.getConstant(Offsets[i], PtrVT)),
2862 isVolatile, Alignment);
2864 Chains[i] = L.getValue(1);
2867 if (!ConstantMemory) {
2868 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2870 &Chains[0], NumValues);
2874 PendingLoads.push_back(Chain);
2877 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2878 DAG.getVTList(&ValueVTs[0], NumValues),
2879 &Values[0], NumValues));
2883 void SelectionDAGLowering::visitStore(StoreInst &I) {
2884 Value *SrcV = I.getOperand(0);
2885 Value *PtrV = I.getOperand(1);
2887 SmallVector<MVT, 4> ValueVTs;
2888 SmallVector<uint64_t, 4> Offsets;
2889 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2890 unsigned NumValues = ValueVTs.size();
2894 // Get the lowered operands. Note that we do this after
2895 // checking if NumResults is zero, because with zero results
2896 // the operands won't have values in the map.
2897 SDValue Src = getValue(SrcV);
2898 SDValue Ptr = getValue(PtrV);
2900 SDValue Root = getRoot();
2901 SmallVector<SDValue, 4> Chains(NumValues);
2902 MVT PtrVT = Ptr.getValueType();
2903 bool isVolatile = I.isVolatile();
2904 unsigned Alignment = I.getAlignment();
2905 for (unsigned i = 0; i != NumValues; ++i)
2906 Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2907 SDValue(Src.getNode(), Src.getResNo() + i),
2908 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2910 DAG.getConstant(Offsets[i], PtrVT)),
2912 isVolatile, Alignment);
2914 DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2915 MVT::Other, &Chains[0], NumValues));
2918 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2920 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2921 unsigned Intrinsic) {
2922 bool HasChain = !I.doesNotAccessMemory();
2923 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2925 // Build the operand list.
2926 SmallVector<SDValue, 8> Ops;
2927 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2929 // We don't need to serialize loads against other loads.
2930 Ops.push_back(DAG.getRoot());
2932 Ops.push_back(getRoot());
2936 // Info is set by getTgtMemInstrinsic
2937 TargetLowering::IntrinsicInfo Info;
2938 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2940 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2941 if (!IsTgtIntrinsic)
2942 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2944 // Add all operands of the call to the operand list.
2945 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2946 SDValue Op = getValue(I.getOperand(i));
2947 assert(TLI.isTypeLegal(Op.getValueType()) &&
2948 "Intrinsic uses a non-legal type?");
2952 std::vector<MVT> VTArray;
2953 if (I.getType() != Type::VoidTy) {
2954 MVT VT = TLI.getValueType(I.getType());
2955 if (VT.isVector()) {
2956 const VectorType *DestTy = cast<VectorType>(I.getType());
2957 MVT EltVT = TLI.getValueType(DestTy->getElementType());
2959 VT = MVT::getVectorVT(EltVT, DestTy->getNumElements());
2960 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
2963 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
2964 VTArray.push_back(VT);
2967 VTArray.push_back(MVT::Other);
2969 SDVTList VTs = DAG.getVTList(&VTArray[0], VTArray.size());
2973 if (IsTgtIntrinsic) {
2974 // This is target intrinsic that touches memory
2975 Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2976 VTs, &Ops[0], Ops.size(),
2977 Info.memVT, Info.ptrVal, Info.offset,
2978 Info.align, Info.vol,
2979 Info.readMem, Info.writeMem);
2982 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2983 VTs, &Ops[0], Ops.size());
2984 else if (I.getType() != Type::VoidTy)
2985 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2986 VTs, &Ops[0], Ops.size());
2988 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2989 VTs, &Ops[0], Ops.size());
2992 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2994 PendingLoads.push_back(Chain);
2998 if (I.getType() != Type::VoidTy) {
2999 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
3000 MVT VT = TLI.getValueType(PTy);
3001 Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
3003 setValue(&I, Result);
3007 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
3008 static GlobalVariable *ExtractTypeInfo(Value *V) {
3009 V = V->stripPointerCasts();
3010 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3011 assert ((GV || isa<ConstantPointerNull>(V)) &&
3012 "TypeInfo must be a global variable or NULL");
3018 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
3019 /// call, and add them to the specified machine basic block.
3020 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
3021 MachineBasicBlock *MBB) {
3022 // Inform the MachineModuleInfo of the personality for this landing pad.
3023 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
3024 assert(CE->getOpcode() == Instruction::BitCast &&
3025 isa<Function>(CE->getOperand(0)) &&
3026 "Personality should be a function");
3027 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
3029 // Gather all the type infos for this landing pad and pass them along to
3030 // MachineModuleInfo.
3031 std::vector<GlobalVariable *> TyInfo;
3032 unsigned N = I.getNumOperands();
3034 for (unsigned i = N - 1; i > 2; --i) {
3035 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
3036 unsigned FilterLength = CI->getZExtValue();
3037 unsigned FirstCatch = i + FilterLength + !FilterLength;
3038 assert (FirstCatch <= N && "Invalid filter length");
3040 if (FirstCatch < N) {
3041 TyInfo.reserve(N - FirstCatch);
3042 for (unsigned j = FirstCatch; j < N; ++j)
3043 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3044 MMI->addCatchTypeInfo(MBB, TyInfo);
3048 if (!FilterLength) {
3050 MMI->addCleanup(MBB);
3053 TyInfo.reserve(FilterLength - 1);
3054 for (unsigned j = i + 1; j < FirstCatch; ++j)
3055 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3056 MMI->addFilterTypeInfo(MBB, TyInfo);
3065 TyInfo.reserve(N - 3);
3066 for (unsigned j = 3; j < N; ++j)
3067 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3068 MMI->addCatchTypeInfo(MBB, TyInfo);
3074 /// GetSignificand - Get the significand and build it into a floating-point
3075 /// number with exponent of 1:
3077 /// Op = (Op & 0x007fffff) | 0x3f800000;
3079 /// where Op is the hexidecimal representation of floating point value.
3081 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
3082 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3083 DAG.getConstant(0x007fffff, MVT::i32));
3084 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3085 DAG.getConstant(0x3f800000, MVT::i32));
3086 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3089 /// GetExponent - Get the exponent:
3091 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3093 /// where Op is the hexidecimal representation of floating point value.
3095 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3097 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3098 DAG.getConstant(0x7f800000, MVT::i32));
3099 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3100 DAG.getConstant(23, TLI.getPointerTy()));
3101 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3102 DAG.getConstant(127, MVT::i32));
3103 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3106 /// getF32Constant - Get 32-bit floating point constant.
3108 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3109 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3112 /// Inlined utility function to implement binary input atomic intrinsics for
3113 /// visitIntrinsicCall: I is a call instruction
3114 /// Op is the associated NodeType for I
3116 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3117 SDValue Root = getRoot();
3119 DAG.getAtomic(Op, getCurDebugLoc(),
3120 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3122 getValue(I.getOperand(1)),
3123 getValue(I.getOperand(2)),
3126 DAG.setRoot(L.getValue(1));
3130 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3132 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3133 SDValue Op1 = getValue(I.getOperand(1));
3134 SDValue Op2 = getValue(I.getOperand(2));
3136 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3137 SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3139 setValue(&I, Result);
3143 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
3144 /// limited-precision mode.
3146 SelectionDAGLowering::visitExp(CallInst &I) {
3148 DebugLoc dl = getCurDebugLoc();
3150 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3151 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3152 SDValue Op = getValue(I.getOperand(1));
3154 // Put the exponent in the right bit position for later addition to the
3157 // #define LOG2OFe 1.4426950f
3158 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3159 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3160 getF32Constant(DAG, 0x3fb8aa3b));
3161 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3163 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3164 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3165 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3167 // IntegerPartOfX <<= 23;
3168 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3169 DAG.getConstant(23, TLI.getPointerTy()));
3171 if (LimitFloatPrecision <= 6) {
3172 // For floating-point precision of 6:
3174 // TwoToFractionalPartOfX =
3176 // (0.735607626f + 0.252464424f * x) * x;
3178 // error 0.0144103317, which is 6 bits
3179 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3180 getF32Constant(DAG, 0x3e814304));
3181 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3182 getF32Constant(DAG, 0x3f3c50c8));
3183 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3184 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3185 getF32Constant(DAG, 0x3f7f5e7e));
3186 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3188 // Add the exponent into the result in integer domain.
3189 SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3190 TwoToFracPartOfX, IntegerPartOfX);
3192 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3193 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3194 // For floating-point precision of 12:
3196 // TwoToFractionalPartOfX =
3199 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3201 // 0.000107046256 error, which is 13 to 14 bits
3202 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3203 getF32Constant(DAG, 0x3da235e3));
3204 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3205 getF32Constant(DAG, 0x3e65b8f3));
3206 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3207 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3208 getF32Constant(DAG, 0x3f324b07));
3209 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3210 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3211 getF32Constant(DAG, 0x3f7ff8fd));
3212 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3214 // Add the exponent into the result in integer domain.
3215 SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3216 TwoToFracPartOfX, IntegerPartOfX);
3218 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3219 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3220 // For floating-point precision of 18:
3222 // TwoToFractionalPartOfX =
3226 // (0.554906021e-1f +
3227 // (0.961591928e-2f +
3228 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3230 // error 2.47208000*10^(-7), which is better than 18 bits
3231 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3232 getF32Constant(DAG, 0x3924b03e));
3233 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3234 getF32Constant(DAG, 0x3ab24b87));
3235 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3236 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3237 getF32Constant(DAG, 0x3c1d8c17));
3238 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3239 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3240 getF32Constant(DAG, 0x3d634a1d));
3241 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3242 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3243 getF32Constant(DAG, 0x3e75fe14));
3244 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3245 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3246 getF32Constant(DAG, 0x3f317234));
3247 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3248 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3249 getF32Constant(DAG, 0x3f800000));
3250 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3253 // Add the exponent into the result in integer domain.
3254 SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3255 TwoToFracPartOfX, IntegerPartOfX);
3257 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3260 // No special expansion.
3261 result = DAG.getNode(ISD::FEXP, dl,
3262 getValue(I.getOperand(1)).getValueType(),
3263 getValue(I.getOperand(1)));
3266 setValue(&I, result);
3269 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3270 /// limited-precision mode.
3272 SelectionDAGLowering::visitLog(CallInst &I) {
3274 DebugLoc dl = getCurDebugLoc();
3276 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3277 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3278 SDValue Op = getValue(I.getOperand(1));
3279 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3281 // Scale the exponent by log(2) [0.69314718f].
3282 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3283 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3284 getF32Constant(DAG, 0x3f317218));
3286 // Get the significand and build it into a floating-point number with
3288 SDValue X = GetSignificand(DAG, Op1, dl);
3290 if (LimitFloatPrecision <= 6) {
3291 // For floating-point precision of 6:
3295 // (1.4034025f - 0.23903021f * x) * x;
3297 // error 0.0034276066, which is better than 8 bits
3298 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3299 getF32Constant(DAG, 0xbe74c456));
3300 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3301 getF32Constant(DAG, 0x3fb3a2b1));
3302 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3303 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3304 getF32Constant(DAG, 0x3f949a29));
3306 result = DAG.getNode(ISD::FADD, dl,
3307 MVT::f32, LogOfExponent, LogOfMantissa);
3308 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3309 // For floating-point precision of 12:
3315 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3317 // error 0.000061011436, which is 14 bits
3318 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3319 getF32Constant(DAG, 0xbd67b6d6));
3320 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3321 getF32Constant(DAG, 0x3ee4f4b8));
3322 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3323 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3324 getF32Constant(DAG, 0x3fbc278b));
3325 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3326 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3327 getF32Constant(DAG, 0x40348e95));
3328 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3329 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3330 getF32Constant(DAG, 0x3fdef31a));
3332 result = DAG.getNode(ISD::FADD, dl,
3333 MVT::f32, LogOfExponent, LogOfMantissa);
3334 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3335 // For floating-point precision of 18:
3343 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3345 // error 0.0000023660568, which is better than 18 bits
3346 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3347 getF32Constant(DAG, 0xbc91e5ac));
3348 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3349 getF32Constant(DAG, 0x3e4350aa));
3350 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3351 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3352 getF32Constant(DAG, 0x3f60d3e3));
3353 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3354 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3355 getF32Constant(DAG, 0x4011cdf0));
3356 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3357 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3358 getF32Constant(DAG, 0x406cfd1c));
3359 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3360 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3361 getF32Constant(DAG, 0x408797cb));
3362 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3363 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3364 getF32Constant(DAG, 0x4006dcab));
3366 result = DAG.getNode(ISD::FADD, dl,
3367 MVT::f32, LogOfExponent, LogOfMantissa);
3370 // No special expansion.
3371 result = DAG.getNode(ISD::FLOG, dl,
3372 getValue(I.getOperand(1)).getValueType(),
3373 getValue(I.getOperand(1)));
3376 setValue(&I, result);
3379 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3380 /// limited-precision mode.
3382 SelectionDAGLowering::visitLog2(CallInst &I) {
3384 DebugLoc dl = getCurDebugLoc();
3386 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3387 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3388 SDValue Op = getValue(I.getOperand(1));
3389 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3391 // Get the exponent.
3392 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3394 // Get the significand and build it into a floating-point number with
3396 SDValue X = GetSignificand(DAG, Op1, dl);
3398 // Different possible minimax approximations of significand in
3399 // floating-point for various degrees of accuracy over [1,2].
3400 if (LimitFloatPrecision <= 6) {
3401 // For floating-point precision of 6:
3403 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3405 // error 0.0049451742, which is more than 7 bits
3406 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3407 getF32Constant(DAG, 0xbeb08fe0));
3408 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3409 getF32Constant(DAG, 0x40019463));
3410 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3411 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3412 getF32Constant(DAG, 0x3fd6633d));
3414 result = DAG.getNode(ISD::FADD, dl,
3415 MVT::f32, LogOfExponent, Log2ofMantissa);
3416 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3417 // For floating-point precision of 12:
3423 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3425 // error 0.0000876136000, which is better than 13 bits
3426 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3427 getF32Constant(DAG, 0xbda7262e));
3428 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3429 getF32Constant(DAG, 0x3f25280b));
3430 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3431 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3432 getF32Constant(DAG, 0x4007b923));
3433 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3434 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3435 getF32Constant(DAG, 0x40823e2f));
3436 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3437 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3438 getF32Constant(DAG, 0x4020d29c));
3440 result = DAG.getNode(ISD::FADD, dl,
3441 MVT::f32, LogOfExponent, Log2ofMantissa);
3442 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3443 // For floating-point precision of 18:
3452 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3454 // error 0.0000018516, which is better than 18 bits
3455 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3456 getF32Constant(DAG, 0xbcd2769e));
3457 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3458 getF32Constant(DAG, 0x3e8ce0b9));
3459 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3460 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3461 getF32Constant(DAG, 0x3fa22ae7));
3462 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3463 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3464 getF32Constant(DAG, 0x40525723));
3465 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3466 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3467 getF32Constant(DAG, 0x40aaf200));
3468 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3469 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3470 getF32Constant(DAG, 0x40c39dad));
3471 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3472 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3473 getF32Constant(DAG, 0x4042902c));
3475 result = DAG.getNode(ISD::FADD, dl,
3476 MVT::f32, LogOfExponent, Log2ofMantissa);
3479 // No special expansion.
3480 result = DAG.getNode(ISD::FLOG2, dl,
3481 getValue(I.getOperand(1)).getValueType(),
3482 getValue(I.getOperand(1)));
3485 setValue(&I, result);
3488 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3489 /// limited-precision mode.
3491 SelectionDAGLowering::visitLog10(CallInst &I) {
3493 DebugLoc dl = getCurDebugLoc();
3495 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3496 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3497 SDValue Op = getValue(I.getOperand(1));
3498 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3500 // Scale the exponent by log10(2) [0.30102999f].
3501 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3502 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3503 getF32Constant(DAG, 0x3e9a209a));
3505 // Get the significand and build it into a floating-point number with
3507 SDValue X = GetSignificand(DAG, Op1, dl);
3509 if (LimitFloatPrecision <= 6) {
3510 // For floating-point precision of 6:
3512 // Log10ofMantissa =
3514 // (0.60948995f - 0.10380950f * x) * x;
3516 // error 0.0014886165, which is 6 bits
3517 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3518 getF32Constant(DAG, 0xbdd49a13));
3519 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3520 getF32Constant(DAG, 0x3f1c0789));
3521 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3522 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3523 getF32Constant(DAG, 0x3f011300));
3525 result = DAG.getNode(ISD::FADD, dl,
3526 MVT::f32, LogOfExponent, Log10ofMantissa);
3527 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3528 // For floating-point precision of 12:
3530 // Log10ofMantissa =
3533 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3535 // error 0.00019228036, which is better than 12 bits
3536 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3537 getF32Constant(DAG, 0x3d431f31));
3538 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3539 getF32Constant(DAG, 0x3ea21fb2));
3540 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3541 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3542 getF32Constant(DAG, 0x3f6ae232));
3543 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3544 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3545 getF32Constant(DAG, 0x3f25f7c3));
3547 result = DAG.getNode(ISD::FADD, dl,
3548 MVT::f32, LogOfExponent, Log10ofMantissa);
3549 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3550 // For floating-point precision of 18:
3552 // Log10ofMantissa =
3557 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3559 // error 0.0000037995730, which is better than 18 bits
3560 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3561 getF32Constant(DAG, 0x3c5d51ce));
3562 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3563 getF32Constant(DAG, 0x3e00685a));
3564 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3565 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3566 getF32Constant(DAG, 0x3efb6798));
3567 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3568 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3569 getF32Constant(DAG, 0x3f88d192));
3570 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3571 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3572 getF32Constant(DAG, 0x3fc4316c));
3573 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3574 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3575 getF32Constant(DAG, 0x3f57ce70));
3577 result = DAG.getNode(ISD::FADD, dl,
3578 MVT::f32, LogOfExponent, Log10ofMantissa);
3581 // No special expansion.
3582 result = DAG.getNode(ISD::FLOG10, dl,
3583 getValue(I.getOperand(1)).getValueType(),
3584 getValue(I.getOperand(1)));
3587 setValue(&I, result);
3590 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3591 /// limited-precision mode.
3593 SelectionDAGLowering::visitExp2(CallInst &I) {
3595 DebugLoc dl = getCurDebugLoc();
3597 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3598 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3599 SDValue Op = getValue(I.getOperand(1));
3601 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3603 // FractionalPartOfX = x - (float)IntegerPartOfX;
3604 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3605 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3607 // IntegerPartOfX <<= 23;
3608 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3609 DAG.getConstant(23, TLI.getPointerTy()));
3611 if (LimitFloatPrecision <= 6) {
3612 // For floating-point precision of 6:
3614 // TwoToFractionalPartOfX =
3616 // (0.735607626f + 0.252464424f * x) * x;
3618 // error 0.0144103317, which is 6 bits
3619 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3620 getF32Constant(DAG, 0x3e814304));
3621 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3622 getF32Constant(DAG, 0x3f3c50c8));
3623 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3624 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3625 getF32Constant(DAG, 0x3f7f5e7e));
3626 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3627 SDValue TwoToFractionalPartOfX =
3628 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3630 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3631 MVT::f32, TwoToFractionalPartOfX);
3632 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3633 // For floating-point precision of 12:
3635 // TwoToFractionalPartOfX =
3638 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3640 // error 0.000107046256, which is 13 to 14 bits
3641 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3642 getF32Constant(DAG, 0x3da235e3));
3643 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3644 getF32Constant(DAG, 0x3e65b8f3));
3645 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3646 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3647 getF32Constant(DAG, 0x3f324b07));
3648 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3649 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3650 getF32Constant(DAG, 0x3f7ff8fd));
3651 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3652 SDValue TwoToFractionalPartOfX =
3653 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3655 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3656 MVT::f32, TwoToFractionalPartOfX);
3657 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3658 // For floating-point precision of 18:
3660 // TwoToFractionalPartOfX =
3664 // (0.554906021e-1f +
3665 // (0.961591928e-2f +
3666 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3667 // error 2.47208000*10^(-7), which is better than 18 bits
3668 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3669 getF32Constant(DAG, 0x3924b03e));
3670 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3671 getF32Constant(DAG, 0x3ab24b87));
3672 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3673 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3674 getF32Constant(DAG, 0x3c1d8c17));
3675 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3676 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3677 getF32Constant(DAG, 0x3d634a1d));
3678 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3679 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3680 getF32Constant(DAG, 0x3e75fe14));
3681 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3682 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3683 getF32Constant(DAG, 0x3f317234));
3684 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3685 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3686 getF32Constant(DAG, 0x3f800000));
3687 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3688 SDValue TwoToFractionalPartOfX =
3689 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3691 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3692 MVT::f32, TwoToFractionalPartOfX);
3695 // No special expansion.
3696 result = DAG.getNode(ISD::FEXP2, dl,
3697 getValue(I.getOperand(1)).getValueType(),
3698 getValue(I.getOperand(1)));
3701 setValue(&I, result);
3704 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3705 /// limited-precision mode with x == 10.0f.
3707 SelectionDAGLowering::visitPow(CallInst &I) {
3709 Value *Val = I.getOperand(1);
3710 DebugLoc dl = getCurDebugLoc();
3711 bool IsExp10 = false;
3713 if (getValue(Val).getValueType() == MVT::f32 &&
3714 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3715 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3716 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3717 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3719 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3724 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3725 SDValue Op = getValue(I.getOperand(2));
3727 // Put the exponent in the right bit position for later addition to the
3730 // #define LOG2OF10 3.3219281f
3731 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3732 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3733 getF32Constant(DAG, 0x40549a78));
3734 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3736 // FractionalPartOfX = x - (float)IntegerPartOfX;
3737 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3738 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3740 // IntegerPartOfX <<= 23;
3741 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3742 DAG.getConstant(23, TLI.getPointerTy()));
3744 if (LimitFloatPrecision <= 6) {
3745 // For floating-point precision of 6:
3747 // twoToFractionalPartOfX =
3749 // (0.735607626f + 0.252464424f * x) * x;
3751 // error 0.0144103317, which is 6 bits
3752 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3753 getF32Constant(DAG, 0x3e814304));
3754 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3755 getF32Constant(DAG, 0x3f3c50c8));
3756 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3757 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3758 getF32Constant(DAG, 0x3f7f5e7e));
3759 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3760 SDValue TwoToFractionalPartOfX =
3761 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3763 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3764 MVT::f32, TwoToFractionalPartOfX);
3765 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3766 // For floating-point precision of 12:
3768 // TwoToFractionalPartOfX =
3771 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3773 // error 0.000107046256, which is 13 to 14 bits
3774 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3775 getF32Constant(DAG, 0x3da235e3));
3776 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3777 getF32Constant(DAG, 0x3e65b8f3));
3778 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3779 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3780 getF32Constant(DAG, 0x3f324b07));
3781 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3782 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3783 getF32Constant(DAG, 0x3f7ff8fd));
3784 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3785 SDValue TwoToFractionalPartOfX =
3786 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3788 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3789 MVT::f32, TwoToFractionalPartOfX);
3790 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3791 // For floating-point precision of 18:
3793 // TwoToFractionalPartOfX =
3797 // (0.554906021e-1f +
3798 // (0.961591928e-2f +
3799 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3800 // error 2.47208000*10^(-7), which is better than 18 bits
3801 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3802 getF32Constant(DAG, 0x3924b03e));
3803 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3804 getF32Constant(DAG, 0x3ab24b87));
3805 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3806 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3807 getF32Constant(DAG, 0x3c1d8c17));
3808 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3809 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3810 getF32Constant(DAG, 0x3d634a1d));
3811 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3812 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3813 getF32Constant(DAG, 0x3e75fe14));
3814 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3815 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3816 getF32Constant(DAG, 0x3f317234));
3817 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3818 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3819 getF32Constant(DAG, 0x3f800000));
3820 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3821 SDValue TwoToFractionalPartOfX =
3822 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3824 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3825 MVT::f32, TwoToFractionalPartOfX);
3828 // No special expansion.
3829 result = DAG.getNode(ISD::FPOW, dl,
3830 getValue(I.getOperand(1)).getValueType(),
3831 getValue(I.getOperand(1)),
3832 getValue(I.getOperand(2)));
3835 setValue(&I, result);
3838 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3839 /// we want to emit this as a call to a named external function, return the name
3840 /// otherwise lower it and return null.
3842 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3843 DebugLoc dl = getCurDebugLoc();
3844 switch (Intrinsic) {
3846 // By default, turn this into a target intrinsic node.
3847 visitTargetIntrinsic(I, Intrinsic);
3849 case Intrinsic::vastart: visitVAStart(I); return 0;
3850 case Intrinsic::vaend: visitVAEnd(I); return 0;
3851 case Intrinsic::vacopy: visitVACopy(I); return 0;
3852 case Intrinsic::returnaddress:
3853 setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3854 getValue(I.getOperand(1))));
3856 case Intrinsic::frameaddress:
3857 setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3858 getValue(I.getOperand(1))));
3860 case Intrinsic::setjmp:
3861 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3863 case Intrinsic::longjmp:
3864 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3866 case Intrinsic::memcpy: {
3867 SDValue Op1 = getValue(I.getOperand(1));
3868 SDValue Op2 = getValue(I.getOperand(2));
3869 SDValue Op3 = getValue(I.getOperand(3));
3870 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3871 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3872 I.getOperand(1), 0, I.getOperand(2), 0));
3875 case Intrinsic::memset: {
3876 SDValue Op1 = getValue(I.getOperand(1));
3877 SDValue Op2 = getValue(I.getOperand(2));
3878 SDValue Op3 = getValue(I.getOperand(3));
3879 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3880 DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3881 I.getOperand(1), 0));
3884 case Intrinsic::memmove: {
3885 SDValue Op1 = getValue(I.getOperand(1));
3886 SDValue Op2 = getValue(I.getOperand(2));
3887 SDValue Op3 = getValue(I.getOperand(3));
3888 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3890 // If the source and destination are known to not be aliases, we can
3891 // lower memmove as memcpy.
3892 uint64_t Size = -1ULL;
3893 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3894 Size = C->getZExtValue();
3895 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3896 AliasAnalysis::NoAlias) {
3897 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3898 I.getOperand(1), 0, I.getOperand(2), 0));
3902 DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3903 I.getOperand(1), 0, I.getOperand(2), 0));
3906 case Intrinsic::dbg_stoppoint: {
3907 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3908 if (DIDescriptor::ValidDebugInfo(SPI.getContext(), OptLevel)) {
3909 MachineFunction &MF = DAG.getMachineFunction();
3910 DICompileUnit CU(cast<GlobalVariable>(SPI.getContext()));
3911 DebugLoc Loc = DebugLoc::get(MF.getOrCreateDebugLocID(CU.getGV(),
3912 SPI.getLine(), SPI.getColumn()));
3913 setCurDebugLoc(Loc);
3915 if (OptLevel == CodeGenOpt::None)
3916 DAG.setRoot(DAG.getDbgStopPoint(Loc, getRoot(),
3923 case Intrinsic::dbg_region_start: {
3924 DwarfWriter *DW = DAG.getDwarfWriter();
3925 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3927 if (DIDescriptor::ValidDebugInfo(RSI.getContext(), OptLevel) &&
3928 DW && DW->ShouldEmitDwarfDebug()) {
3930 DW->RecordRegionStart(cast<GlobalVariable>(RSI.getContext()));
3931 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3932 getRoot(), LabelID));
3937 case Intrinsic::dbg_region_end: {
3938 DwarfWriter *DW = DAG.getDwarfWriter();
3939 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3941 if (DIDescriptor::ValidDebugInfo(REI.getContext(), OptLevel) &&
3942 DW && DW->ShouldEmitDwarfDebug()) {
3943 MachineFunction &MF = DAG.getMachineFunction();
3944 DISubprogram Subprogram(cast<GlobalVariable>(REI.getContext()));
3946 if (Subprogram.isNull() || Subprogram.describes(MF.getFunction())) {
3948 DW->RecordRegionEnd(cast<GlobalVariable>(REI.getContext()));
3949 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3950 getRoot(), LabelID));
3952 // This is end of inlined function. Debugging information for inlined
3953 // function is not handled yet (only supported by FastISel).
3954 if (OptLevel == CodeGenOpt::None) {
3955 unsigned ID = DW->RecordInlinedFnEnd(Subprogram);
3957 // Returned ID is 0 if this is unbalanced "end of inlined
3958 // scope". This could happen if optimizer eats dbg intrinsics or
3959 // "beginning of inlined scope" is not recoginized due to missing
3960 // location info. In such cases, do ignore this region.end.
3961 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3969 case Intrinsic::dbg_func_start: {
3970 DwarfWriter *DW = DAG.getDwarfWriter();
3971 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3972 Value *SP = FSI.getSubprogram();
3973 if (!DIDescriptor::ValidDebugInfo(SP, OptLevel))
3976 MachineFunction &MF = DAG.getMachineFunction();
3977 if (OptLevel == CodeGenOpt::None) {
3978 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is what
3979 // (most?) gdb expects.
3980 DebugLoc PrevLoc = CurDebugLoc;
3981 DISubprogram Subprogram(cast<GlobalVariable>(SP));
3982 DICompileUnit CompileUnit = Subprogram.getCompileUnit();
3984 if (!Subprogram.describes(MF.getFunction())) {
3985 // This is a beginning of an inlined function.
3987 // If llvm.dbg.func.start is seen in a new block before any
3988 // llvm.dbg.stoppoint intrinsic then the location info is unknown.
3989 // FIXME : Why DebugLoc is reset at the beginning of each block ?
3990 if (PrevLoc.isUnknown())
3993 // Record the source line.
3994 unsigned Line = Subprogram.getLineNumber();
3995 setCurDebugLoc(DebugLoc::get(
3996 MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
3998 if (DW && DW->ShouldEmitDwarfDebug()) {
3999 DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
4000 unsigned LabelID = DW->RecordInlinedFnStart(Subprogram,
4001 DICompileUnit(PrevLocTpl.CompileUnit),
4004 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
4005 getRoot(), LabelID));
4008 // Record the source line.
4009 unsigned Line = Subprogram.getLineNumber();
4010 MF.setDefaultDebugLoc(DebugLoc::get(
4011 MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
4012 if (DW && DW->ShouldEmitDwarfDebug()) {
4013 // llvm.dbg.func_start also defines beginning of function scope.
4014 DW->RecordRegionStart(cast<GlobalVariable>(FSI.getSubprogram()));
4018 DISubprogram Subprogram(cast<GlobalVariable>(SP));
4021 Subprogram.getLinkageName(SPName);
4023 && strcmp(SPName.c_str(), MF.getFunction()->getNameStart())) {
4024 // This is beginning of inlined function. Debugging information for
4025 // inlined function is not handled yet (only supported by FastISel).
4029 // llvm.dbg.func.start implicitly defines a dbg_stoppoint which is
4030 // what (most?) gdb expects.
4031 DICompileUnit CompileUnit = Subprogram.getCompileUnit();
4033 // Record the source line but does not create a label for the normal
4034 // function start. It will be emitted at asm emission time. However,
4035 // create a label if this is a beginning of inlined function.
4036 unsigned Line = Subprogram.getLineNumber();
4037 setCurDebugLoc(DebugLoc::get(
4038 MF.getOrCreateDebugLocID(CompileUnit.getGV(), Line, 0)));
4039 // FIXME - Start new region because llvm.dbg.func_start also defines
4040 // beginning of function scope.
4045 case Intrinsic::dbg_declare: {
4046 if (OptLevel == CodeGenOpt::None) {
4047 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
4048 Value *Variable = DI.getVariable();
4049 if (DIDescriptor::ValidDebugInfo(Variable, OptLevel))
4050 DAG.setRoot(DAG.getNode(ISD::DECLARE, dl, MVT::Other, getRoot(),
4051 getValue(DI.getAddress()), getValue(Variable)));
4053 // FIXME: Do something sensible here when we support debug declare.
4057 case Intrinsic::eh_exception: {
4058 // Insert the EXCEPTIONADDR instruction.
4059 assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
4060 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
4062 Ops[0] = DAG.getRoot();
4063 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
4065 DAG.setRoot(Op.getValue(1));
4069 case Intrinsic::eh_selector_i32:
4070 case Intrinsic::eh_selector_i64: {
4071 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4072 MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
4073 MVT::i32 : MVT::i64);
4076 if (CurMBB->isLandingPad())
4077 AddCatchInfo(I, MMI, CurMBB);
4080 FuncInfo.CatchInfoLost.insert(&I);
4082 // FIXME: Mark exception selector register as live in. Hack for PR1508.
4083 unsigned Reg = TLI.getExceptionSelectorRegister();
4084 if (Reg) CurMBB->addLiveIn(Reg);
4087 // Insert the EHSELECTION instruction.
4088 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
4090 Ops[0] = getValue(I.getOperand(1));
4092 SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4094 DAG.setRoot(Op.getValue(1));
4096 setValue(&I, DAG.getConstant(0, VT));
4102 case Intrinsic::eh_typeid_for_i32:
4103 case Intrinsic::eh_typeid_for_i64: {
4104 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4105 MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
4106 MVT::i32 : MVT::i64);
4109 // Find the type id for the given typeinfo.
4110 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4112 unsigned TypeID = MMI->getTypeIDFor(GV);
4113 setValue(&I, DAG.getConstant(TypeID, VT));
4115 // Return something different to eh_selector.
4116 setValue(&I, DAG.getConstant(1, VT));
4122 case Intrinsic::eh_return_i32:
4123 case Intrinsic::eh_return_i64:
4124 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4125 MMI->setCallsEHReturn(true);
4126 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
4129 getValue(I.getOperand(1)),
4130 getValue(I.getOperand(2))));
4132 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4136 case Intrinsic::eh_unwind_init:
4137 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4138 MMI->setCallsUnwindInit(true);
4143 case Intrinsic::eh_dwarf_cfa: {
4144 MVT VT = getValue(I.getOperand(1)).getValueType();
4146 if (VT.bitsGT(TLI.getPointerTy()))
4147 CfaArg = DAG.getNode(ISD::TRUNCATE, dl,
4148 TLI.getPointerTy(), getValue(I.getOperand(1)));
4150 CfaArg = DAG.getNode(ISD::SIGN_EXTEND, dl,
4151 TLI.getPointerTy(), getValue(I.getOperand(1)));
4153 SDValue Offset = DAG.getNode(ISD::ADD, dl,
4155 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4156 TLI.getPointerTy()),
4158 setValue(&I, DAG.getNode(ISD::ADD, dl,
4160 DAG.getNode(ISD::FRAMEADDR, dl,
4163 TLI.getPointerTy())),
4168 case Intrinsic::convertff:
4169 case Intrinsic::convertfsi:
4170 case Intrinsic::convertfui:
4171 case Intrinsic::convertsif:
4172 case Intrinsic::convertuif:
4173 case Intrinsic::convertss:
4174 case Intrinsic::convertsu:
4175 case Intrinsic::convertus:
4176 case Intrinsic::convertuu: {
4177 ISD::CvtCode Code = ISD::CVT_INVALID;
4178 switch (Intrinsic) {
4179 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
4180 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4181 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4182 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4183 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4184 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
4185 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
4186 case Intrinsic::convertus: Code = ISD::CVT_US; break;
4187 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
4189 MVT DestVT = TLI.getValueType(I.getType());
4190 Value* Op1 = I.getOperand(1);
4191 setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4192 DAG.getValueType(DestVT),
4193 DAG.getValueType(getValue(Op1).getValueType()),
4194 getValue(I.getOperand(2)),
4195 getValue(I.getOperand(3)),
4200 case Intrinsic::sqrt:
4201 setValue(&I, DAG.getNode(ISD::FSQRT, dl,
4202 getValue(I.getOperand(1)).getValueType(),
4203 getValue(I.getOperand(1))));
4205 case Intrinsic::powi:
4206 setValue(&I, DAG.getNode(ISD::FPOWI, dl,
4207 getValue(I.getOperand(1)).getValueType(),
4208 getValue(I.getOperand(1)),
4209 getValue(I.getOperand(2))));
4211 case Intrinsic::sin:
4212 setValue(&I, DAG.getNode(ISD::FSIN, dl,
4213 getValue(I.getOperand(1)).getValueType(),
4214 getValue(I.getOperand(1))));
4216 case Intrinsic::cos:
4217 setValue(&I, DAG.getNode(ISD::FCOS, dl,
4218 getValue(I.getOperand(1)).getValueType(),
4219 getValue(I.getOperand(1))));
4221 case Intrinsic::log:
4224 case Intrinsic::log2:
4227 case Intrinsic::log10:
4230 case Intrinsic::exp:
4233 case Intrinsic::exp2:
4236 case Intrinsic::pow:
4239 case Intrinsic::pcmarker: {
4240 SDValue Tmp = getValue(I.getOperand(1));
4241 DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4244 case Intrinsic::readcyclecounter: {
4245 SDValue Op = getRoot();
4246 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4247 DAG.getVTList(MVT::i64, MVT::Other),
4250 DAG.setRoot(Tmp.getValue(1));
4253 case Intrinsic::part_select: {
4254 // Currently not implemented: just abort
4255 assert(0 && "part_select intrinsic not implemented");
4258 case Intrinsic::part_set: {
4259 // Currently not implemented: just abort
4260 assert(0 && "part_set intrinsic not implemented");
4263 case Intrinsic::bswap:
4264 setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4265 getValue(I.getOperand(1)).getValueType(),
4266 getValue(I.getOperand(1))));
4268 case Intrinsic::cttz: {
4269 SDValue Arg = getValue(I.getOperand(1));
4270 MVT Ty = Arg.getValueType();
4271 SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4272 setValue(&I, result);
4275 case Intrinsic::ctlz: {
4276 SDValue Arg = getValue(I.getOperand(1));
4277 MVT Ty = Arg.getValueType();
4278 SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4279 setValue(&I, result);
4282 case Intrinsic::ctpop: {
4283 SDValue Arg = getValue(I.getOperand(1));
4284 MVT Ty = Arg.getValueType();
4285 SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4286 setValue(&I, result);
4289 case Intrinsic::stacksave: {
4290 SDValue Op = getRoot();
4291 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4292 DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4294 DAG.setRoot(Tmp.getValue(1));
4297 case Intrinsic::stackrestore: {
4298 SDValue Tmp = getValue(I.getOperand(1));
4299 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4302 case Intrinsic::stackprotector: {
4303 // Emit code into the DAG to store the stack guard onto the stack.
4304 MachineFunction &MF = DAG.getMachineFunction();
4305 MachineFrameInfo *MFI = MF.getFrameInfo();
4306 MVT PtrTy = TLI.getPointerTy();
4308 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4309 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4311 int FI = FuncInfo.StaticAllocaMap[Slot];
4312 MFI->setStackProtectorIndex(FI);
4314 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4316 // Store the stack protector onto the stack.
4317 SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4318 PseudoSourceValue::getFixedStack(FI),
4320 setValue(&I, Result);
4321 DAG.setRoot(Result);
4324 case Intrinsic::var_annotation:
4325 // Discard annotate attributes
4328 case Intrinsic::init_trampoline: {
4329 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4333 Ops[1] = getValue(I.getOperand(1));
4334 Ops[2] = getValue(I.getOperand(2));
4335 Ops[3] = getValue(I.getOperand(3));
4336 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4337 Ops[5] = DAG.getSrcValue(F);
4339 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4340 DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4344 DAG.setRoot(Tmp.getValue(1));
4348 case Intrinsic::gcroot:
4350 Value *Alloca = I.getOperand(1);
4351 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4353 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4354 GFI->addStackRoot(FI->getIndex(), TypeMap);
4358 case Intrinsic::gcread:
4359 case Intrinsic::gcwrite:
4360 assert(0 && "GC failed to lower gcread/gcwrite intrinsics!");
4363 case Intrinsic::flt_rounds: {
4364 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4368 case Intrinsic::trap: {
4369 DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4373 case Intrinsic::uadd_with_overflow:
4374 return implVisitAluOverflow(I, ISD::UADDO);
4375 case Intrinsic::sadd_with_overflow:
4376 return implVisitAluOverflow(I, ISD::SADDO);
4377 case Intrinsic::usub_with_overflow:
4378 return implVisitAluOverflow(I, ISD::USUBO);
4379 case Intrinsic::ssub_with_overflow:
4380 return implVisitAluOverflow(I, ISD::SSUBO);
4381 case Intrinsic::umul_with_overflow:
4382 return implVisitAluOverflow(I, ISD::UMULO);
4383 case Intrinsic::smul_with_overflow:
4384 return implVisitAluOverflow(I, ISD::SMULO);
4386 case Intrinsic::prefetch: {
4389 Ops[1] = getValue(I.getOperand(1));
4390 Ops[2] = getValue(I.getOperand(2));
4391 Ops[3] = getValue(I.getOperand(3));
4392 DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4396 case Intrinsic::memory_barrier: {
4399 for (int x = 1; x < 6; ++x)
4400 Ops[x] = getValue(I.getOperand(x));
4402 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4405 case Intrinsic::atomic_cmp_swap: {
4406 SDValue Root = getRoot();
4408 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4409 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4411 getValue(I.getOperand(1)),
4412 getValue(I.getOperand(2)),
4413 getValue(I.getOperand(3)),
4416 DAG.setRoot(L.getValue(1));
4419 case Intrinsic::atomic_load_add:
4420 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4421 case Intrinsic::atomic_load_sub:
4422 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4423 case Intrinsic::atomic_load_or:
4424 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4425 case Intrinsic::atomic_load_xor:
4426 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4427 case Intrinsic::atomic_load_and:
4428 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4429 case Intrinsic::atomic_load_nand:
4430 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4431 case Intrinsic::atomic_load_max:
4432 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4433 case Intrinsic::atomic_load_min:
4434 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4435 case Intrinsic::atomic_load_umin:
4436 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4437 case Intrinsic::atomic_load_umax:
4438 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4439 case Intrinsic::atomic_swap:
4440 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4445 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4447 MachineBasicBlock *LandingPad) {
4448 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4449 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4450 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4451 unsigned BeginLabel = 0, EndLabel = 0;
4453 TargetLowering::ArgListTy Args;
4454 TargetLowering::ArgListEntry Entry;
4455 Args.reserve(CS.arg_size());
4456 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4458 SDValue ArgNode = getValue(*i);
4459 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4461 unsigned attrInd = i - CS.arg_begin() + 1;
4462 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4463 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4464 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4465 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4466 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4467 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4468 Entry.Alignment = CS.getParamAlignment(attrInd);
4469 Args.push_back(Entry);
4472 if (LandingPad && MMI) {
4473 // Insert a label before the invoke call to mark the try range. This can be
4474 // used to detect deletion of the invoke via the MachineModuleInfo.
4475 BeginLabel = MMI->NextLabelID();
4476 // Both PendingLoads and PendingExports must be flushed here;
4477 // this call might not return.
4479 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4480 getControlRoot(), BeginLabel));
4483 std::pair<SDValue,SDValue> Result =
4484 TLI.LowerCallTo(getRoot(), CS.getType(),
4485 CS.paramHasAttr(0, Attribute::SExt),
4486 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4487 CS.paramHasAttr(0, Attribute::InReg),
4488 CS.getCallingConv(),
4489 IsTailCall && PerformTailCallOpt,
4490 Callee, Args, DAG, getCurDebugLoc());
4491 if (CS.getType() != Type::VoidTy)
4492 setValue(CS.getInstruction(), Result.first);
4493 DAG.setRoot(Result.second);
4495 if (LandingPad && MMI) {
4496 // Insert a label at the end of the invoke call to mark the try range. This
4497 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4498 EndLabel = MMI->NextLabelID();
4499 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4500 getRoot(), EndLabel));
4502 // Inform MachineModuleInfo of range.
4503 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4508 void SelectionDAGLowering::visitCall(CallInst &I) {
4509 const char *RenameFn = 0;
4510 if (Function *F = I.getCalledFunction()) {
4511 if (F->isDeclaration()) {
4512 const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4514 if (unsigned IID = II->getIntrinsicID(F)) {
4515 RenameFn = visitIntrinsicCall(I, IID);
4520 if (unsigned IID = F->getIntrinsicID()) {
4521 RenameFn = visitIntrinsicCall(I, IID);
4527 // Check for well-known libc/libm calls. If the function is internal, it
4528 // can't be a library call.
4529 unsigned NameLen = F->getNameLen();
4530 if (!F->hasLocalLinkage() && NameLen) {
4531 const char *NameStr = F->getNameStart();
4532 if (NameStr[0] == 'c' &&
4533 ((NameLen == 8 && !strcmp(NameStr, "copysign")) ||
4534 (NameLen == 9 && !strcmp(NameStr, "copysignf")))) {
4535 if (I.getNumOperands() == 3 && // Basic sanity checks.
4536 I.getOperand(1)->getType()->isFloatingPoint() &&
4537 I.getType() == I.getOperand(1)->getType() &&
4538 I.getType() == I.getOperand(2)->getType()) {
4539 SDValue LHS = getValue(I.getOperand(1));
4540 SDValue RHS = getValue(I.getOperand(2));
4541 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4542 LHS.getValueType(), LHS, RHS));
4545 } else if (NameStr[0] == 'f' &&
4546 ((NameLen == 4 && !strcmp(NameStr, "fabs")) ||
4547 (NameLen == 5 && !strcmp(NameStr, "fabsf")) ||
4548 (NameLen == 5 && !strcmp(NameStr, "fabsl")))) {
4549 if (I.getNumOperands() == 2 && // Basic sanity checks.
4550 I.getOperand(1)->getType()->isFloatingPoint() &&
4551 I.getType() == I.getOperand(1)->getType()) {
4552 SDValue Tmp = getValue(I.getOperand(1));
4553 setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4554 Tmp.getValueType(), Tmp));
4557 } else if (NameStr[0] == 's' &&
4558 ((NameLen == 3 && !strcmp(NameStr, "sin")) ||
4559 (NameLen == 4 && !strcmp(NameStr, "sinf")) ||
4560 (NameLen == 4 && !strcmp(NameStr, "sinl")))) {
4561 if (I.getNumOperands() == 2 && // Basic sanity checks.
4562 I.getOperand(1)->getType()->isFloatingPoint() &&
4563 I.getType() == I.getOperand(1)->getType()) {
4564 SDValue Tmp = getValue(I.getOperand(1));
4565 setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4566 Tmp.getValueType(), Tmp));
4569 } else if (NameStr[0] == 'c' &&
4570 ((NameLen == 3 && !strcmp(NameStr, "cos")) ||
4571 (NameLen == 4 && !strcmp(NameStr, "cosf")) ||
4572 (NameLen == 4 && !strcmp(NameStr, "cosl")))) {
4573 if (I.getNumOperands() == 2 && // Basic sanity checks.
4574 I.getOperand(1)->getType()->isFloatingPoint() &&
4575 I.getType() == I.getOperand(1)->getType()) {
4576 SDValue Tmp = getValue(I.getOperand(1));
4577 setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4578 Tmp.getValueType(), Tmp));
4583 } else if (isa<InlineAsm>(I.getOperand(0))) {
4590 Callee = getValue(I.getOperand(0));
4592 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4594 LowerCallTo(&I, Callee, I.isTailCall());
4598 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4599 /// this value and returns the result as a ValueVT value. This uses
4600 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4601 /// If the Flag pointer is NULL, no flag is used.
4602 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4604 SDValue *Flag) const {
4605 // Assemble the legal parts into the final values.
4606 SmallVector<SDValue, 4> Values(ValueVTs.size());
4607 SmallVector<SDValue, 8> Parts;
4608 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4609 // Copy the legal parts from the registers.
4610 MVT ValueVT = ValueVTs[Value];
4611 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
4612 MVT RegisterVT = RegVTs[Value];
4614 Parts.resize(NumRegs);
4615 for (unsigned i = 0; i != NumRegs; ++i) {
4618 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4620 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4621 *Flag = P.getValue(2);
4623 Chain = P.getValue(1);
4625 // If the source register was virtual and if we know something about it,
4626 // add an assert node.
4627 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4628 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4629 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4630 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4631 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4632 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4634 unsigned RegSize = RegisterVT.getSizeInBits();
4635 unsigned NumSignBits = LOI.NumSignBits;
4636 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4638 // FIXME: We capture more information than the dag can represent. For
4639 // now, just use the tightest assertzext/assertsext possible.
4641 MVT FromVT(MVT::Other);
4642 if (NumSignBits == RegSize)
4643 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4644 else if (NumZeroBits >= RegSize-1)
4645 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4646 else if (NumSignBits > RegSize-8)
4647 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4648 else if (NumZeroBits >= RegSize-8)
4649 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4650 else if (NumSignBits > RegSize-16)
4651 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4652 else if (NumZeroBits >= RegSize-16)
4653 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4654 else if (NumSignBits > RegSize-32)
4655 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4656 else if (NumZeroBits >= RegSize-32)
4657 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4659 if (FromVT != MVT::Other) {
4660 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4661 RegisterVT, P, DAG.getValueType(FromVT));
4670 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4671 NumRegs, RegisterVT, ValueVT);
4676 return DAG.getNode(ISD::MERGE_VALUES, dl,
4677 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4678 &Values[0], ValueVTs.size());
4681 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4682 /// specified value into the registers specified by this object. This uses
4683 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4684 /// If the Flag pointer is NULL, no flag is used.
4685 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4686 SDValue &Chain, SDValue *Flag) const {
4687 // Get the list of the values's legal parts.
4688 unsigned NumRegs = Regs.size();
4689 SmallVector<SDValue, 8> Parts(NumRegs);
4690 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4691 MVT ValueVT = ValueVTs[Value];
4692 unsigned NumParts = TLI->getNumRegisters(ValueVT);
4693 MVT RegisterVT = RegVTs[Value];
4695 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4696 &Parts[Part], NumParts, RegisterVT);
4700 // Copy the parts into the registers.
4701 SmallVector<SDValue, 8> Chains(NumRegs);
4702 for (unsigned i = 0; i != NumRegs; ++i) {
4705 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4707 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4708 *Flag = Part.getValue(1);
4710 Chains[i] = Part.getValue(0);
4713 if (NumRegs == 1 || Flag)
4714 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4715 // flagged to it. That is the CopyToReg nodes and the user are considered
4716 // a single scheduling unit. If we create a TokenFactor and return it as
4717 // chain, then the TokenFactor is both a predecessor (operand) of the
4718 // user as well as a successor (the TF operands are flagged to the user).
4719 // c1, f1 = CopyToReg
4720 // c2, f2 = CopyToReg
4721 // c3 = TokenFactor c1, c2
4724 Chain = Chains[NumRegs-1];
4726 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4729 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4730 /// operand list. This adds the code marker and includes the number of
4731 /// values added into it.
4732 void RegsForValue::AddInlineAsmOperands(unsigned Code,
4733 bool HasMatching,unsigned MatchingIdx,
4735 std::vector<SDValue> &Ops) const {
4736 MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4737 assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4738 unsigned Flag = Code | (Regs.size() << 3);
4740 Flag |= 0x80000000 | (MatchingIdx << 16);
4741 Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4742 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4743 unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
4744 MVT RegisterVT = RegVTs[Value];
4745 for (unsigned i = 0; i != NumRegs; ++i) {
4746 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4747 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4752 /// isAllocatableRegister - If the specified register is safe to allocate,
4753 /// i.e. it isn't a stack pointer or some other special register, return the
4754 /// register class for the register. Otherwise, return null.
4755 static const TargetRegisterClass *
4756 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4757 const TargetLowering &TLI,
4758 const TargetRegisterInfo *TRI) {
4759 MVT FoundVT = MVT::Other;
4760 const TargetRegisterClass *FoundRC = 0;
4761 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4762 E = TRI->regclass_end(); RCI != E; ++RCI) {
4763 MVT ThisVT = MVT::Other;
4765 const TargetRegisterClass *RC = *RCI;
4766 // If none of the the value types for this register class are valid, we
4767 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4768 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4770 if (TLI.isTypeLegal(*I)) {
4771 // If we have already found this register in a different register class,
4772 // choose the one with the largest VT specified. For example, on
4773 // PowerPC, we favor f64 register classes over f32.
4774 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4781 if (ThisVT == MVT::Other) continue;
4783 // NOTE: This isn't ideal. In particular, this might allocate the
4784 // frame pointer in functions that need it (due to them not being taken
4785 // out of allocation, because a variable sized allocation hasn't been seen
4786 // yet). This is a slight code pessimization, but should still work.
4787 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4788 E = RC->allocation_order_end(MF); I != E; ++I)
4790 // We found a matching register class. Keep looking at others in case
4791 // we find one with larger registers that this physreg is also in.
4802 /// AsmOperandInfo - This contains information for each constraint that we are
4804 class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4805 public TargetLowering::AsmOperandInfo {
4807 /// CallOperand - If this is the result output operand or a clobber
4808 /// this is null, otherwise it is the incoming operand to the CallInst.
4809 /// This gets modified as the asm is processed.
4810 SDValue CallOperand;
4812 /// AssignedRegs - If this is a register or register class operand, this
4813 /// contains the set of register corresponding to the operand.
4814 RegsForValue AssignedRegs;
4816 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4817 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4820 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4821 /// busy in OutputRegs/InputRegs.
4822 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4823 std::set<unsigned> &OutputRegs,
4824 std::set<unsigned> &InputRegs,
4825 const TargetRegisterInfo &TRI) const {
4827 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4828 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4831 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4832 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4836 /// getCallOperandValMVT - Return the MVT of the Value* that this operand
4837 /// corresponds to. If there is no Value* for this operand, it returns
4839 MVT getCallOperandValMVT(const TargetLowering &TLI,
4840 const TargetData *TD) const {
4841 if (CallOperandVal == 0) return MVT::Other;
4843 if (isa<BasicBlock>(CallOperandVal))
4844 return TLI.getPointerTy();
4846 const llvm::Type *OpTy = CallOperandVal->getType();
4848 // If this is an indirect operand, the operand is a pointer to the
4851 OpTy = cast<PointerType>(OpTy)->getElementType();
4853 // If OpTy is not a single value, it may be a struct/union that we
4854 // can tile with integers.
4855 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4856 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4865 OpTy = IntegerType::get(BitSize);
4870 return TLI.getValueType(OpTy, true);
4874 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4876 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4877 const TargetRegisterInfo &TRI) {
4878 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4880 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4881 for (; *Aliases; ++Aliases)
4882 Regs.insert(*Aliases);
4885 } // end llvm namespace.
4888 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4889 /// specified operand. We prefer to assign virtual registers, to allow the
4890 /// register allocator handle the assignment process. However, if the asm uses
4891 /// features that we can't model on machineinstrs, we have SDISel do the
4892 /// allocation. This produces generally horrible, but correct, code.
4894 /// OpInfo describes the operand.
4895 /// Input and OutputRegs are the set of already allocated physical registers.
4897 void SelectionDAGLowering::
4898 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4899 std::set<unsigned> &OutputRegs,
4900 std::set<unsigned> &InputRegs) {
4901 // Compute whether this value requires an input register, an output register,
4903 bool isOutReg = false;
4904 bool isInReg = false;
4905 switch (OpInfo.Type) {
4906 case InlineAsm::isOutput:
4909 // If there is an input constraint that matches this, we need to reserve
4910 // the input register so no other inputs allocate to it.
4911 isInReg = OpInfo.hasMatchingInput();
4913 case InlineAsm::isInput:
4917 case InlineAsm::isClobber:
4924 MachineFunction &MF = DAG.getMachineFunction();
4925 SmallVector<unsigned, 4> Regs;
4927 // If this is a constraint for a single physreg, or a constraint for a
4928 // register class, find it.
4929 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4930 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4931 OpInfo.ConstraintVT);
4933 unsigned NumRegs = 1;
4934 if (OpInfo.ConstraintVT != MVT::Other) {
4935 // If this is a FP input in an integer register (or visa versa) insert a bit
4936 // cast of the input value. More generally, handle any case where the input
4937 // value disagrees with the register class we plan to stick this in.
4938 if (OpInfo.Type == InlineAsm::isInput &&
4939 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4940 // Try to convert to the first MVT that the reg class contains. If the
4941 // types are identical size, use a bitcast to convert (e.g. two differing
4943 MVT RegVT = *PhysReg.second->vt_begin();
4944 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4945 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4946 RegVT, OpInfo.CallOperand);
4947 OpInfo.ConstraintVT = RegVT;
4948 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4949 // If the input is a FP value and we want it in FP registers, do a
4950 // bitcast to the corresponding integer type. This turns an f64 value
4951 // into i64, which can be passed with two i32 values on a 32-bit
4953 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
4954 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4955 RegVT, OpInfo.CallOperand);
4956 OpInfo.ConstraintVT = RegVT;
4960 NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT);
4964 MVT ValueVT = OpInfo.ConstraintVT;
4966 // If this is a constraint for a specific physical register, like {r17},
4968 if (unsigned AssignedReg = PhysReg.first) {
4969 const TargetRegisterClass *RC = PhysReg.second;
4970 if (OpInfo.ConstraintVT == MVT::Other)
4971 ValueVT = *RC->vt_begin();
4973 // Get the actual register value type. This is important, because the user
4974 // may have asked for (e.g.) the AX register in i32 type. We need to
4975 // remember that AX is actually i16 to get the right extension.
4976 RegVT = *RC->vt_begin();
4978 // This is a explicit reference to a physical register.
4979 Regs.push_back(AssignedReg);
4981 // If this is an expanded reference, add the rest of the regs to Regs.
4983 TargetRegisterClass::iterator I = RC->begin();
4984 for (; *I != AssignedReg; ++I)
4985 assert(I != RC->end() && "Didn't find reg!");
4987 // Already added the first reg.
4989 for (; NumRegs; --NumRegs, ++I) {
4990 assert(I != RC->end() && "Ran out of registers to allocate!");
4994 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4995 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4996 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5000 // Otherwise, if this was a reference to an LLVM register class, create vregs
5001 // for this reference.
5002 if (const TargetRegisterClass *RC = PhysReg.second) {
5003 RegVT = *RC->vt_begin();
5004 if (OpInfo.ConstraintVT == MVT::Other)
5007 // Create the appropriate number of virtual registers.
5008 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5009 for (; NumRegs; --NumRegs)
5010 Regs.push_back(RegInfo.createVirtualRegister(RC));
5012 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
5016 // This is a reference to a register class that doesn't directly correspond
5017 // to an LLVM register class. Allocate NumRegs consecutive, available,
5018 // registers from the class.
5019 std::vector<unsigned> RegClassRegs
5020 = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
5021 OpInfo.ConstraintVT);
5023 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
5024 unsigned NumAllocated = 0;
5025 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
5026 unsigned Reg = RegClassRegs[i];
5027 // See if this register is available.
5028 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
5029 (isInReg && InputRegs.count(Reg))) { // Already used.
5030 // Make sure we find consecutive registers.
5035 // Check to see if this register is allocatable (i.e. don't give out the
5037 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
5038 if (!RC) { // Couldn't allocate this register.
5039 // Reset NumAllocated to make sure we return consecutive registers.
5044 // Okay, this register is good, we can use it.
5047 // If we allocated enough consecutive registers, succeed.
5048 if (NumAllocated == NumRegs) {
5049 unsigned RegStart = (i-NumAllocated)+1;
5050 unsigned RegEnd = i+1;
5051 // Mark all of the allocated registers used.
5052 for (unsigned i = RegStart; i != RegEnd; ++i)
5053 Regs.push_back(RegClassRegs[i]);
5055 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
5056 OpInfo.ConstraintVT);
5057 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
5062 // Otherwise, we couldn't allocate enough registers for this.
5065 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
5066 /// processed uses a memory 'm' constraint.
5068 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
5069 const TargetLowering &TLI) {
5070 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
5071 InlineAsm::ConstraintInfo &CI = CInfos[i];
5072 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
5073 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
5074 if (CType == TargetLowering::C_Memory)
5078 // Indirect operand accesses access memory.
5086 /// visitInlineAsm - Handle a call to an InlineAsm object.
5088 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
5089 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
5091 /// ConstraintOperands - Information about all of the constraints.
5092 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5094 std::set<unsigned> OutputRegs, InputRegs;
5096 // Do a prepass over the constraints, canonicalizing them, and building up the
5097 // ConstraintOperands list.
5098 std::vector<InlineAsm::ConstraintInfo>
5099 ConstraintInfos = IA->ParseConstraints();
5101 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5103 SDValue Chain, Flag;
5105 // We won't need to flush pending loads if this asm doesn't touch
5106 // memory and is nonvolatile.
5107 if (hasMemory || IA->hasSideEffects())
5110 Chain = DAG.getRoot();
5112 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
5113 unsigned ResNo = 0; // ResNo - The result number of the next output.
5114 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5115 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5116 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5118 MVT OpVT = MVT::Other;
5120 // Compute the value type for each operand.
5121 switch (OpInfo.Type) {
5122 case InlineAsm::isOutput:
5123 // Indirect outputs just consume an argument.
5124 if (OpInfo.isIndirect) {
5125 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5129 // The return value of the call is this value. As such, there is no
5130 // corresponding argument.
5131 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5132 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5133 OpVT = TLI.getValueType(STy->getElementType(ResNo));
5135 assert(ResNo == 0 && "Asm only has one result!");
5136 OpVT = TLI.getValueType(CS.getType());
5140 case InlineAsm::isInput:
5141 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5143 case InlineAsm::isClobber:
5148 // If this is an input or an indirect output, process the call argument.
5149 // BasicBlocks are labels, currently appearing only in asm's.
5150 if (OpInfo.CallOperandVal) {
5151 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5152 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5154 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5157 OpVT = OpInfo.getCallOperandValMVT(TLI, TD);
5160 OpInfo.ConstraintVT = OpVT;
5163 // Second pass over the constraints: compute which constraint option to use
5164 // and assign registers to constraints that want a specific physreg.
5165 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5166 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5168 // If this is an output operand with a matching input operand, look up the
5169 // matching input. If their types mismatch, e.g. one is an integer, the
5170 // other is floating point, or their sizes are different, flag it as an
5172 if (OpInfo.hasMatchingInput()) {
5173 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5174 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5175 if ((OpInfo.ConstraintVT.isInteger() !=
5176 Input.ConstraintVT.isInteger()) ||
5177 (OpInfo.ConstraintVT.getSizeInBits() !=
5178 Input.ConstraintVT.getSizeInBits())) {
5179 cerr << "llvm: error: Unsupported asm: input constraint with a "
5180 << "matching output constraint of incompatible type!\n";
5183 Input.ConstraintVT = OpInfo.ConstraintVT;
5187 // Compute the constraint code and ConstraintType to use.
5188 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5190 // If this is a memory input, and if the operand is not indirect, do what we
5191 // need to to provide an address for the memory input.
5192 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5193 !OpInfo.isIndirect) {
5194 assert(OpInfo.Type == InlineAsm::isInput &&
5195 "Can only indirectify direct input operands!");
5197 // Memory operands really want the address of the value. If we don't have
5198 // an indirect input, put it in the constpool if we can, otherwise spill
5199 // it to a stack slot.
5201 // If the operand is a float, integer, or vector constant, spill to a
5202 // constant pool entry to get its address.
5203 Value *OpVal = OpInfo.CallOperandVal;
5204 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5205 isa<ConstantVector>(OpVal)) {
5206 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5207 TLI.getPointerTy());
5209 // Otherwise, create a stack slot and emit a store to it before the
5211 const Type *Ty = OpVal->getType();
5212 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5213 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5214 MachineFunction &MF = DAG.getMachineFunction();
5215 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5216 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5217 Chain = DAG.getStore(Chain, getCurDebugLoc(),
5218 OpInfo.CallOperand, StackSlot, NULL, 0);
5219 OpInfo.CallOperand = StackSlot;
5222 // There is no longer a Value* corresponding to this operand.
5223 OpInfo.CallOperandVal = 0;
5224 // It is now an indirect operand.
5225 OpInfo.isIndirect = true;
5228 // If this constraint is for a specific register, allocate it before
5230 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5231 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5233 ConstraintInfos.clear();
5236 // Second pass - Loop over all of the operands, assigning virtual or physregs
5237 // to register class operands.
5238 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5239 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5241 // C_Register operands have already been allocated, Other/Memory don't need
5243 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5244 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5247 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5248 std::vector<SDValue> AsmNodeOperands;
5249 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5250 AsmNodeOperands.push_back(
5251 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5254 // Loop over all of the inputs, copying the operand values into the
5255 // appropriate registers and processing the output regs.
5256 RegsForValue RetValRegs;
5258 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5259 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5261 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5262 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5264 switch (OpInfo.Type) {
5265 case InlineAsm::isOutput: {
5266 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5267 OpInfo.ConstraintType != TargetLowering::C_Register) {
5268 // Memory output, or 'other' output (e.g. 'X' constraint).
5269 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5271 // Add information to the INLINEASM node to know about this output.
5272 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5273 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5274 TLI.getPointerTy()));
5275 AsmNodeOperands.push_back(OpInfo.CallOperand);
5279 // Otherwise, this is a register or register class output.
5281 // Copy the output from the appropriate register. Find a register that
5283 if (OpInfo.AssignedRegs.Regs.empty()) {
5284 cerr << "llvm: error: Couldn't allocate output reg for constraint '"
5285 << OpInfo.ConstraintCode << "'!\n";
5289 // If this is an indirect operand, store through the pointer after the
5291 if (OpInfo.isIndirect) {
5292 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5293 OpInfo.CallOperandVal));
5295 // This is the result value of the call.
5296 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5297 // Concatenate this output onto the outputs list.
5298 RetValRegs.append(OpInfo.AssignedRegs);
5301 // Add information to the INLINEASM node to know that this register is
5303 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5304 6 /* EARLYCLOBBER REGDEF */ :
5308 DAG, AsmNodeOperands);
5311 case InlineAsm::isInput: {
5312 SDValue InOperandVal = OpInfo.CallOperand;
5314 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5315 // If this is required to match an output register we have already set,
5316 // just use its register.
5317 unsigned OperandNo = OpInfo.getMatchedOperand();
5319 // Scan until we find the definition we already emitted of this operand.
5320 // When we find it, create a RegsForValue operand.
5321 unsigned CurOp = 2; // The first operand.
5322 for (; OperandNo; --OperandNo) {
5323 // Advance to the next operand.
5325 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5326 assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5327 (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5328 (OpFlag & 7) == 4 /*MEM*/) &&
5329 "Skipped past definitions?");
5330 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5334 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5335 if ((OpFlag & 7) == 2 /*REGDEF*/
5336 || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5337 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5338 assert(!OpInfo.isIndirect &&
5339 "Don't know how to handle tied indirect register inputs yet!");
5340 RegsForValue MatchedRegs;
5341 MatchedRegs.TLI = &TLI;
5342 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5343 MVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5344 MatchedRegs.RegVTs.push_back(RegVT);
5345 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5346 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5349 push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5351 // Use the produced MatchedRegs object to
5352 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5354 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5355 true, OpInfo.getMatchedOperand(),
5356 DAG, AsmNodeOperands);
5359 assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5360 assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5361 "Unexpected number of operands");
5362 // Add information to the INLINEASM node to know about this input.
5363 // See InlineAsm.h isUseOperandTiedToDef.
5364 OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5365 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5366 TLI.getPointerTy()));
5367 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5372 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5373 assert(!OpInfo.isIndirect &&
5374 "Don't know how to handle indirect other inputs yet!");
5376 std::vector<SDValue> Ops;
5377 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5378 hasMemory, Ops, DAG);
5380 cerr << "llvm: error: Invalid operand for inline asm constraint '"
5381 << OpInfo.ConstraintCode << "'!\n";
5385 // Add information to the INLINEASM node to know about this input.
5386 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5387 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5388 TLI.getPointerTy()));
5389 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5391 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5392 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5393 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5394 "Memory operands expect pointer values");
5396 // Add information to the INLINEASM node to know about this input.
5397 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5398 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5399 TLI.getPointerTy()));
5400 AsmNodeOperands.push_back(InOperandVal);
5404 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5405 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5406 "Unknown constraint type!");
5407 assert(!OpInfo.isIndirect &&
5408 "Don't know how to handle indirect register inputs yet!");
5410 // Copy the input into the appropriate registers.
5411 if (OpInfo.AssignedRegs.Regs.empty()) {
5412 cerr << "llvm: error: Couldn't allocate output reg for constraint '"
5413 << OpInfo.ConstraintCode << "'!\n";
5417 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5420 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5421 DAG, AsmNodeOperands);
5424 case InlineAsm::isClobber: {
5425 // Add the clobbered value to the operand list, so that the register
5426 // allocator is aware that the physreg got clobbered.
5427 if (!OpInfo.AssignedRegs.Regs.empty())
5428 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5429 false, 0, DAG,AsmNodeOperands);
5435 // Finish up input operands.
5436 AsmNodeOperands[0] = Chain;
5437 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5439 Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5440 DAG.getVTList(MVT::Other, MVT::Flag),
5441 &AsmNodeOperands[0], AsmNodeOperands.size());
5442 Flag = Chain.getValue(1);
5444 // If this asm returns a register value, copy the result from that register
5445 // and set it as the value of the call.
5446 if (!RetValRegs.Regs.empty()) {
5447 SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5450 // FIXME: Why don't we do this for inline asms with MRVs?
5451 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5452 MVT ResultType = TLI.getValueType(CS.getType());
5454 // If any of the results of the inline asm is a vector, it may have the
5455 // wrong width/num elts. This can happen for register classes that can
5456 // contain multiple different value types. The preg or vreg allocated may
5457 // not have the same VT as was expected. Convert it to the right type
5458 // with bit_convert.
5459 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5460 Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5463 } else if (ResultType != Val.getValueType() &&
5464 ResultType.isInteger() && Val.getValueType().isInteger()) {
5465 // If a result value was tied to an input value, the computed result may
5466 // have a wider width than the expected result. Extract the relevant
5468 Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5471 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5474 setValue(CS.getInstruction(), Val);
5475 // Don't need to use this as a chain in this case.
5476 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5480 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5482 // Process indirect outputs, first output all of the flagged copies out of
5484 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5485 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5486 Value *Ptr = IndirectStoresToEmit[i].second;
5487 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5489 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5493 // Emit the non-flagged stores from the physregs.
5494 SmallVector<SDValue, 8> OutChains;
5495 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5496 OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5497 StoresToEmit[i].first,
5498 getValue(StoresToEmit[i].second),
5499 StoresToEmit[i].second, 0));
5500 if (!OutChains.empty())
5501 Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5502 &OutChains[0], OutChains.size());
5507 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
5508 SDValue Src = getValue(I.getOperand(0));
5510 // Scale up by the type size in the original i32 type width. Various
5511 // mid-level optimizers may make assumptions about demanded bits etc from the
5512 // i32-ness of the optimizer: we do not want to promote to i64 and then
5513 // multiply on 64-bit targets.
5514 // FIXME: Malloc inst should go away: PR715.
5515 uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
5516 if (ElementSize != 1)
5517 Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
5518 Src, DAG.getConstant(ElementSize, Src.getValueType()));
5520 MVT IntPtr = TLI.getPointerTy();
5522 if (IntPtr.bitsLT(Src.getValueType()))
5523 Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
5524 else if (IntPtr.bitsGT(Src.getValueType()))
5525 Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
5527 TargetLowering::ArgListTy Args;
5528 TargetLowering::ArgListEntry Entry;
5530 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5531 Args.push_back(Entry);
5533 std::pair<SDValue,SDValue> Result =
5534 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
5535 CallingConv::C, PerformTailCallOpt,
5536 DAG.getExternalSymbol("malloc", IntPtr),
5537 Args, DAG, getCurDebugLoc());
5538 setValue(&I, Result.first); // Pointers always fit in registers
5539 DAG.setRoot(Result.second);
5542 void SelectionDAGLowering::visitFree(FreeInst &I) {
5543 TargetLowering::ArgListTy Args;
5544 TargetLowering::ArgListEntry Entry;
5545 Entry.Node = getValue(I.getOperand(0));
5546 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5547 Args.push_back(Entry);
5548 MVT IntPtr = TLI.getPointerTy();
5549 std::pair<SDValue,SDValue> Result =
5550 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
5551 CallingConv::C, PerformTailCallOpt,
5552 DAG.getExternalSymbol("free", IntPtr), Args, DAG,
5554 DAG.setRoot(Result.second);
5557 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5558 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5559 MVT::Other, getRoot(),
5560 getValue(I.getOperand(1)),
5561 DAG.getSrcValue(I.getOperand(1))));
5564 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5565 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5566 getRoot(), getValue(I.getOperand(0)),
5567 DAG.getSrcValue(I.getOperand(0)));
5569 DAG.setRoot(V.getValue(1));
5572 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5573 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5574 MVT::Other, getRoot(),
5575 getValue(I.getOperand(1)),
5576 DAG.getSrcValue(I.getOperand(1))));
5579 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5580 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5581 MVT::Other, getRoot(),
5582 getValue(I.getOperand(1)),
5583 getValue(I.getOperand(2)),
5584 DAG.getSrcValue(I.getOperand(1)),
5585 DAG.getSrcValue(I.getOperand(2))));
5588 /// TargetLowering::LowerArguments - This is the default LowerArguments
5589 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
5590 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
5591 /// integrated into SDISel.
5592 void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
5593 SmallVectorImpl<SDValue> &ArgValues,
5595 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
5596 SmallVector<SDValue, 3+16> Ops;
5597 Ops.push_back(DAG.getRoot());
5598 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
5599 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
5601 // Add one result value for each formal argument.
5602 SmallVector<MVT, 16> RetVals;
5604 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5606 SmallVector<MVT, 4> ValueVTs;
5607 ComputeValueVTs(*this, I->getType(), ValueVTs);
5608 for (unsigned Value = 0, NumValues = ValueVTs.size();
5609 Value != NumValues; ++Value) {
5610 MVT VT = ValueVTs[Value];
5611 const Type *ArgTy = VT.getTypeForMVT();
5612 ISD::ArgFlagsTy Flags;
5613 unsigned OriginalAlignment =
5614 getTargetData()->getABITypeAlignment(ArgTy);
5616 if (F.paramHasAttr(j, Attribute::ZExt))
5618 if (F.paramHasAttr(j, Attribute::SExt))
5620 if (F.paramHasAttr(j, Attribute::InReg))
5622 if (F.paramHasAttr(j, Attribute::StructRet))
5624 if (F.paramHasAttr(j, Attribute::ByVal)) {
5626 const PointerType *Ty = cast<PointerType>(I->getType());
5627 const Type *ElementTy = Ty->getElementType();
5628 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5629 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5630 // For ByVal, alignment should be passed from FE. BE will guess if
5631 // this info is not there but there are cases it cannot get right.
5632 if (F.getParamAlignment(j))
5633 FrameAlign = F.getParamAlignment(j);
5634 Flags.setByValAlign(FrameAlign);
5635 Flags.setByValSize(FrameSize);
5637 if (F.paramHasAttr(j, Attribute::Nest))
5639 Flags.setOrigAlign(OriginalAlignment);
5641 MVT RegisterVT = getRegisterType(VT);
5642 unsigned NumRegs = getNumRegisters(VT);
5643 for (unsigned i = 0; i != NumRegs; ++i) {
5644 RetVals.push_back(RegisterVT);
5645 ISD::ArgFlagsTy MyFlags = Flags;
5646 if (NumRegs > 1 && i == 0)
5648 // if it isn't first piece, alignment must be 1
5650 MyFlags.setOrigAlign(1);
5651 Ops.push_back(DAG.getArgFlags(MyFlags));
5656 RetVals.push_back(MVT::Other);
5659 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, dl,
5660 DAG.getVTList(&RetVals[0], RetVals.size()),
5661 &Ops[0], Ops.size()).getNode();
5663 // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but
5664 // allows exposing the loads that may be part of the argument access to the
5665 // first DAGCombiner pass.
5666 SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG);
5668 // The number of results should match up, except that the lowered one may have
5669 // an extra flag result.
5670 assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() ||
5671 (Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() &&
5672 TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag))
5673 && "Lowering produced unexpected number of results!");
5675 // The FORMAL_ARGUMENTS node itself is likely no longer needed.
5676 if (Result != TmpRes.getNode() && Result->use_empty()) {
5677 HandleSDNode Dummy(DAG.getRoot());
5678 DAG.RemoveDeadNode(Result);
5681 Result = TmpRes.getNode();
5683 unsigned NumArgRegs = Result->getNumValues() - 1;
5684 DAG.setRoot(SDValue(Result, NumArgRegs));
5686 // Set up the return result vector.
5689 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5691 SmallVector<MVT, 4> ValueVTs;
5692 ComputeValueVTs(*this, I->getType(), ValueVTs);
5693 for (unsigned Value = 0, NumValues = ValueVTs.size();
5694 Value != NumValues; ++Value) {
5695 MVT VT = ValueVTs[Value];
5696 MVT PartVT = getRegisterType(VT);
5698 unsigned NumParts = getNumRegisters(VT);
5699 SmallVector<SDValue, 4> Parts(NumParts);
5700 for (unsigned j = 0; j != NumParts; ++j)
5701 Parts[j] = SDValue(Result, i++);
5703 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5704 if (F.paramHasAttr(Idx, Attribute::SExt))
5705 AssertOp = ISD::AssertSext;
5706 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5707 AssertOp = ISD::AssertZext;
5709 ArgValues.push_back(getCopyFromParts(DAG, dl, &Parts[0], NumParts,
5710 PartVT, VT, AssertOp));
5713 assert(i == NumArgRegs && "Argument register count mismatch!");
5717 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5718 /// implementation, which just inserts an ISD::CALL node, which is later custom
5719 /// lowered by the target to something concrete. FIXME: When all targets are
5720 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
5721 std::pair<SDValue, SDValue>
5722 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5723 bool RetSExt, bool RetZExt, bool isVarArg,
5725 unsigned CallingConv, bool isTailCall,
5727 ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5728 assert((!isTailCall || PerformTailCallOpt) &&
5729 "isTailCall set when tail-call optimizations are disabled!");
5731 SmallVector<SDValue, 32> Ops;
5732 Ops.push_back(Chain); // Op#0 - Chain
5733 Ops.push_back(Callee);
5735 // Handle all of the outgoing arguments.
5736 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5737 SmallVector<MVT, 4> ValueVTs;
5738 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5739 for (unsigned Value = 0, NumValues = ValueVTs.size();
5740 Value != NumValues; ++Value) {
5741 MVT VT = ValueVTs[Value];
5742 const Type *ArgTy = VT.getTypeForMVT();
5743 SDValue Op = SDValue(Args[i].Node.getNode(),
5744 Args[i].Node.getResNo() + Value);
5745 ISD::ArgFlagsTy Flags;
5746 unsigned OriginalAlignment =
5747 getTargetData()->getABITypeAlignment(ArgTy);
5753 if (Args[i].isInReg)
5757 if (Args[i].isByVal) {
5759 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5760 const Type *ElementTy = Ty->getElementType();
5761 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5762 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5763 // For ByVal, alignment should come from FE. BE will guess if this
5764 // info is not there but there are cases it cannot get right.
5765 if (Args[i].Alignment)
5766 FrameAlign = Args[i].Alignment;
5767 Flags.setByValAlign(FrameAlign);
5768 Flags.setByValSize(FrameSize);
5772 Flags.setOrigAlign(OriginalAlignment);
5774 MVT PartVT = getRegisterType(VT);
5775 unsigned NumParts = getNumRegisters(VT);
5776 SmallVector<SDValue, 4> Parts(NumParts);
5777 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5780 ExtendKind = ISD::SIGN_EXTEND;
5781 else if (Args[i].isZExt)
5782 ExtendKind = ISD::ZERO_EXTEND;
5784 getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5786 for (unsigned i = 0; i != NumParts; ++i) {
5787 // if it isn't first piece, alignment must be 1
5788 ISD::ArgFlagsTy MyFlags = Flags;
5789 if (NumParts > 1 && i == 0)
5792 MyFlags.setOrigAlign(1);
5794 Ops.push_back(Parts[i]);
5795 Ops.push_back(DAG.getArgFlags(MyFlags));
5800 // Figure out the result value types. We start by making a list of
5801 // the potentially illegal return value types.
5802 SmallVector<MVT, 4> LoweredRetTys;
5803 SmallVector<MVT, 4> RetTys;
5804 ComputeValueVTs(*this, RetTy, RetTys);
5806 // Then we translate that to a list of legal types.
5807 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5809 MVT RegisterVT = getRegisterType(VT);
5810 unsigned NumRegs = getNumRegisters(VT);
5811 for (unsigned i = 0; i != NumRegs; ++i)
5812 LoweredRetTys.push_back(RegisterVT);
5815 LoweredRetTys.push_back(MVT::Other); // Always has a chain.
5817 // Create the CALL node.
5818 SDValue Res = DAG.getCall(CallingConv, dl,
5819 isVarArg, isTailCall, isInreg,
5820 DAG.getVTList(&LoweredRetTys[0],
5821 LoweredRetTys.size()),
5824 Chain = Res.getValue(LoweredRetTys.size() - 1);
5826 // Gather up the call result into a single value.
5827 if (RetTy != Type::VoidTy && !RetTys.empty()) {
5828 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5831 AssertOp = ISD::AssertSext;
5833 AssertOp = ISD::AssertZext;
5835 SmallVector<SDValue, 4> ReturnValues;
5837 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5839 MVT RegisterVT = getRegisterType(VT);
5840 unsigned NumRegs = getNumRegisters(VT);
5841 unsigned RegNoEnd = NumRegs + RegNo;
5842 SmallVector<SDValue, 4> Results;
5843 for (; RegNo != RegNoEnd; ++RegNo)
5844 Results.push_back(Res.getValue(RegNo));
5845 SDValue ReturnValue =
5846 getCopyFromParts(DAG, dl, &Results[0], NumRegs, RegisterVT, VT,
5848 ReturnValues.push_back(ReturnValue);
5850 Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5851 DAG.getVTList(&RetTys[0], RetTys.size()),
5852 &ReturnValues[0], ReturnValues.size());
5855 return std::make_pair(Res, Chain);
5858 void TargetLowering::LowerOperationWrapper(SDNode *N,
5859 SmallVectorImpl<SDValue> &Results,
5860 SelectionDAG &DAG) {
5861 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5863 Results.push_back(Res);
5866 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5867 assert(0 && "LowerOperation not implemented for this target!");
5873 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5874 SDValue Op = getValue(V);
5875 assert((Op.getOpcode() != ISD::CopyFromReg ||
5876 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5877 "Copy from a reg to the same reg!");
5878 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5880 RegsForValue RFV(TLI, Reg, V->getType());
5881 SDValue Chain = DAG.getEntryNode();
5882 RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5883 PendingExports.push_back(Chain);
5886 #include "llvm/CodeGen/SelectionDAGISel.h"
5888 void SelectionDAGISel::
5889 LowerArguments(BasicBlock *LLVMBB) {
5890 // If this is the entry block, emit arguments.
5891 Function &F = *LLVMBB->getParent();
5892 SDValue OldRoot = SDL->DAG.getRoot();
5893 SmallVector<SDValue, 16> Args;
5894 TLI.LowerArguments(F, SDL->DAG, Args, SDL->getCurDebugLoc());
5897 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
5899 SmallVector<MVT, 4> ValueVTs;
5900 ComputeValueVTs(TLI, AI->getType(), ValueVTs);
5901 unsigned NumValues = ValueVTs.size();
5902 if (!AI->use_empty()) {
5903 SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues,
5904 SDL->getCurDebugLoc()));
5905 // If this argument is live outside of the entry block, insert a copy from
5906 // whereever we got it to the vreg that other BB's will reference it as.
5907 SDL->CopyToExportRegsIfNeeded(AI);
5912 // Finally, if the target has anything special to do, allow it to do so.
5913 // FIXME: this should insert code into the DAG!
5914 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5917 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5918 /// ensure constants are generated when needed. Remember the virtual registers
5919 /// that need to be added to the Machine PHI nodes as input. We cannot just
5920 /// directly add them, because expansion might result in multiple MBB's for one
5921 /// BB. As such, the start of the BB might correspond to a different MBB than
5925 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5926 TerminatorInst *TI = LLVMBB->getTerminator();
5928 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5930 // Check successor nodes' PHI nodes that expect a constant to be available
5932 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5933 BasicBlock *SuccBB = TI->getSuccessor(succ);
5934 if (!isa<PHINode>(SuccBB->begin())) continue;
5935 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5937 // If this terminator has multiple identical successors (common for
5938 // switches), only handle each succ once.
5939 if (!SuccsHandled.insert(SuccMBB)) continue;
5941 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5944 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5945 // nodes and Machine PHI nodes, but the incoming operands have not been
5947 for (BasicBlock::iterator I = SuccBB->begin();
5948 (PN = dyn_cast<PHINode>(I)); ++I) {
5949 // Ignore dead phi's.
5950 if (PN->use_empty()) continue;
5953 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5955 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5956 unsigned &RegOut = SDL->ConstantsOut[C];
5958 RegOut = FuncInfo->CreateRegForValue(C);
5959 SDL->CopyValueToVirtualRegister(C, RegOut);
5963 Reg = FuncInfo->ValueMap[PHIOp];
5965 assert(isa<AllocaInst>(PHIOp) &&
5966 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5967 "Didn't codegen value into a register!??");
5968 Reg = FuncInfo->CreateRegForValue(PHIOp);
5969 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5973 // Remember that this register needs to added to the machine PHI node as
5974 // the input for this MBB.
5975 SmallVector<MVT, 4> ValueVTs;
5976 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5977 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5978 MVT VT = ValueVTs[vti];
5979 unsigned NumRegisters = TLI.getNumRegisters(VT);
5980 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5981 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5982 Reg += NumRegisters;
5986 SDL->ConstantsOut.clear();
5989 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5990 /// supports legal types, and it emits MachineInstrs directly instead of
5991 /// creating SelectionDAG nodes.
5994 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5996 TerminatorInst *TI = LLVMBB->getTerminator();
5998 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5999 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
6001 // Check successor nodes' PHI nodes that expect a constant to be available
6003 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
6004 BasicBlock *SuccBB = TI->getSuccessor(succ);
6005 if (!isa<PHINode>(SuccBB->begin())) continue;
6006 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
6008 // If this terminator has multiple identical successors (common for
6009 // switches), only handle each succ once.
6010 if (!SuccsHandled.insert(SuccMBB)) continue;
6012 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
6015 // At this point we know that there is a 1-1 correspondence between LLVM PHI
6016 // nodes and Machine PHI nodes, but the incoming operands have not been
6018 for (BasicBlock::iterator I = SuccBB->begin();
6019 (PN = dyn_cast<PHINode>(I)); ++I) {
6020 // Ignore dead phi's.
6021 if (PN->use_empty()) continue;
6023 // Only handle legal types. Two interesting things to note here. First,
6024 // by bailing out early, we may leave behind some dead instructions,
6025 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
6026 // own moves. Second, this check is necessary becuase FastISel doesn't
6027 // use CreateRegForValue to create registers, so it always creates
6028 // exactly one register for each non-void instruction.
6029 MVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
6030 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
6033 VT = TLI.getTypeToTransformTo(VT);
6035 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6040 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
6042 unsigned Reg = F->getRegForValue(PHIOp);
6044 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
6047 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));