1 //===-- SelectionDAGBuild.cpp - Selection-DAG building --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements routines for translating from LLVM IR into SelectionDAG IR.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "isel"
15 #include "SelectionDAGBuild.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Constants.h"
20 #include "llvm/CallingConv.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/InlineAsm.h"
25 #include "llvm/Instructions.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Module.h"
29 #include "llvm/CodeGen/FastISel.h"
30 #include "llvm/CodeGen/GCStrategy.h"
31 #include "llvm/CodeGen/GCMetadata.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineFrameInfo.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineJumpTableInfo.h"
36 #include "llvm/CodeGen/MachineModuleInfo.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/PseudoSourceValue.h"
39 #include "llvm/CodeGen/SelectionDAG.h"
40 #include "llvm/CodeGen/DwarfWriter.h"
41 #include "llvm/Analysis/DebugInfo.h"
42 #include "llvm/Target/TargetRegisterInfo.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Target/TargetFrameInfo.h"
45 #include "llvm/Target/TargetInstrInfo.h"
46 #include "llvm/Target/TargetIntrinsicInfo.h"
47 #include "llvm/Target/TargetLowering.h"
48 #include "llvm/Target/TargetOptions.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MathExtras.h"
54 #include "llvm/Support/raw_ostream.h"
58 /// LimitFloatPrecision - Generate low-precision inline sequences for
59 /// some float libcalls (6, 8 or 12 bits).
60 static unsigned LimitFloatPrecision;
62 static cl::opt<unsigned, true>
63 LimitFPPrecision("limit-float-precision",
64 cl::desc("Generate low-precision inline sequences "
65 "for some float libcalls"),
66 cl::location(LimitFloatPrecision),
69 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
70 /// of insertvalue or extractvalue indices that identify a member, return
71 /// the linearized index of the start of the member.
73 static unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
74 const unsigned *Indices,
75 const unsigned *IndicesEnd,
76 unsigned CurIndex = 0) {
77 // Base case: We're done.
78 if (Indices && Indices == IndicesEnd)
81 // Given a struct type, recursively traverse the elements.
82 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
83 for (StructType::element_iterator EB = STy->element_begin(),
85 EE = STy->element_end();
87 if (Indices && *Indices == unsigned(EI - EB))
88 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
89 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
93 // Given an array type, recursively traverse the elements.
94 else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
95 const Type *EltTy = ATy->getElementType();
96 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
97 if (Indices && *Indices == i)
98 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
99 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
103 // We haven't found the type we're looking for, so keep searching.
107 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
108 /// MVTs that represent all the individual underlying
109 /// non-aggregate types that comprise it.
111 /// If Offsets is non-null, it points to a vector to be filled in
112 /// with the in-memory offsets of each of the individual values.
114 static void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
115 SmallVectorImpl<MVT> &ValueVTs,
116 SmallVectorImpl<uint64_t> *Offsets = 0,
117 uint64_t StartingOffset = 0) {
118 // Given a struct type, recursively traverse the elements.
119 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
120 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
121 for (StructType::element_iterator EB = STy->element_begin(),
123 EE = STy->element_end();
125 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
126 StartingOffset + SL->getElementOffset(EI - EB));
129 // Given an array type, recursively traverse the elements.
130 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
131 const Type *EltTy = ATy->getElementType();
132 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
133 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
134 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
135 StartingOffset + i * EltSize);
138 // Interpret void as zero return values.
139 if (Ty == Type::VoidTy)
141 // Base case: we can get an MVT for this LLVM IR type.
142 ValueVTs.push_back(TLI.getValueType(Ty));
144 Offsets->push_back(StartingOffset);
148 /// RegsForValue - This struct represents the registers (physical or virtual)
149 /// that a particular set of values is assigned, and the type information about
150 /// the value. The most common situation is to represent one value at a time,
151 /// but struct or array values are handled element-wise as multiple values.
152 /// The splitting of aggregates is performed recursively, so that we never
153 /// have aggregate-typed registers. The values at this point do not necessarily
154 /// have legal types, so each value may require one or more registers of some
157 struct VISIBILITY_HIDDEN RegsForValue {
158 /// TLI - The TargetLowering object.
160 const TargetLowering *TLI;
162 /// ValueVTs - The value types of the values, which may not be legal, and
163 /// may need be promoted or synthesized from one or more registers.
165 SmallVector<MVT, 4> ValueVTs;
167 /// RegVTs - The value types of the registers. This is the same size as
168 /// ValueVTs and it records, for each value, what the type of the assigned
169 /// register or registers are. (Individual values are never synthesized
170 /// from more than one type of register.)
172 /// With virtual registers, the contents of RegVTs is redundant with TLI's
173 /// getRegisterType member function, however when with physical registers
174 /// it is necessary to have a separate record of the types.
176 SmallVector<MVT, 4> RegVTs;
178 /// Regs - This list holds the registers assigned to the values.
179 /// Each legal or promoted value requires one register, and each
180 /// expanded value requires multiple registers.
182 SmallVector<unsigned, 4> Regs;
184 RegsForValue() : TLI(0) {}
186 RegsForValue(const TargetLowering &tli,
187 const SmallVector<unsigned, 4> ®s,
188 MVT regvt, MVT valuevt)
189 : TLI(&tli), ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
190 RegsForValue(const TargetLowering &tli,
191 const SmallVector<unsigned, 4> ®s,
192 const SmallVector<MVT, 4> ®vts,
193 const SmallVector<MVT, 4> &valuevts)
194 : TLI(&tli), ValueVTs(valuevts), RegVTs(regvts), Regs(regs) {}
195 RegsForValue(const TargetLowering &tli,
196 unsigned Reg, const Type *Ty) : TLI(&tli) {
197 ComputeValueVTs(tli, Ty, ValueVTs);
199 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
200 MVT ValueVT = ValueVTs[Value];
201 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
202 MVT RegisterVT = TLI->getRegisterType(ValueVT);
203 for (unsigned i = 0; i != NumRegs; ++i)
204 Regs.push_back(Reg + i);
205 RegVTs.push_back(RegisterVT);
210 /// append - Add the specified values to this one.
211 void append(const RegsForValue &RHS) {
213 ValueVTs.append(RHS.ValueVTs.begin(), RHS.ValueVTs.end());
214 RegVTs.append(RHS.RegVTs.begin(), RHS.RegVTs.end());
215 Regs.append(RHS.Regs.begin(), RHS.Regs.end());
219 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
220 /// this value and returns the result as a ValueVTs value. This uses
221 /// Chain/Flag as the input and updates them for the output Chain/Flag.
222 /// If the Flag pointer is NULL, no flag is used.
223 SDValue getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
224 SDValue &Chain, SDValue *Flag) const;
226 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
227 /// specified value into the registers specified by this object. This uses
228 /// Chain/Flag as the input and updates them for the output Chain/Flag.
229 /// If the Flag pointer is NULL, no flag is used.
230 void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
231 SDValue &Chain, SDValue *Flag) const;
233 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
234 /// operand list. This adds the code marker, matching input operand index
235 /// (if applicable), and includes the number of values added into it.
236 void AddInlineAsmOperands(unsigned Code,
237 bool HasMatching, unsigned MatchingIdx,
238 SelectionDAG &DAG, std::vector<SDValue> &Ops) const;
242 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
243 /// PHI nodes or outside of the basic block that defines it, or used by a
244 /// switch or atomic instruction, which may expand to multiple basic blocks.
245 static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
246 if (isa<PHINode>(I)) return true;
247 BasicBlock *BB = I->getParent();
248 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
249 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
254 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
255 /// entry block, return true. This includes arguments used by switches, since
256 /// the switch may expand into multiple basic blocks.
257 static bool isOnlyUsedInEntryBlock(Argument *A, bool EnableFastISel) {
258 // With FastISel active, we may be splitting blocks, so force creation
259 // of virtual registers for all non-dead arguments.
260 // Don't force virtual registers for byval arguments though, because
261 // fast-isel can't handle those in all cases.
262 if (EnableFastISel && !A->hasByValAttr())
263 return A->use_empty();
265 BasicBlock *Entry = A->getParent()->begin();
266 for (Value::use_iterator UI = A->use_begin(), E = A->use_end(); UI != E; ++UI)
267 if (cast<Instruction>(*UI)->getParent() != Entry || isa<SwitchInst>(*UI))
268 return false; // Use not in entry block.
272 FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli)
276 void FunctionLoweringInfo::set(Function &fn, MachineFunction &mf,
278 bool EnableFastISel) {
281 RegInfo = &MF->getRegInfo();
283 // Create a vreg for each argument register that is not dead and is used
284 // outside of the entry block for the function.
285 for (Function::arg_iterator AI = Fn->arg_begin(), E = Fn->arg_end();
287 if (!isOnlyUsedInEntryBlock(AI, EnableFastISel))
288 InitializeRegForValue(AI);
290 // Initialize the mapping of values to registers. This is only set up for
291 // instruction values that are used outside of the block that defines
293 Function::iterator BB = Fn->begin(), EB = Fn->end();
294 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
295 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
296 if (ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
297 const Type *Ty = AI->getAllocatedType();
298 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
300 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
303 TySize *= CUI->getZExtValue(); // Get total allocated size.
304 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
305 StaticAllocaMap[AI] =
306 MF->getFrameInfo()->CreateStackObject(TySize, Align);
309 for (; BB != EB; ++BB)
310 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
311 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
312 if (!isa<AllocaInst>(I) ||
313 !StaticAllocaMap.count(cast<AllocaInst>(I)))
314 InitializeRegForValue(I);
316 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
317 // also creates the initial PHI MachineInstrs, though none of the input
318 // operands are populated.
319 for (BB = Fn->begin(), EB = Fn->end(); BB != EB; ++BB) {
320 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(BB);
324 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
328 for (BasicBlock::iterator
329 I = BB->begin(), E = BB->end(); I != E; ++I) {
330 if (CallInst *CI = dyn_cast<CallInst>(I)) {
331 if (Function *F = CI->getCalledFunction()) {
332 switch (F->getIntrinsicID()) {
334 case Intrinsic::dbg_stoppoint: {
335 DbgStopPointInst *SPI = cast<DbgStopPointInst>(I);
336 if (isValidDebugInfoIntrinsic(*SPI, CodeGenOpt::Default))
337 DL = ExtractDebugLocation(*SPI, MF->getDebugLocInfo());
340 case Intrinsic::dbg_func_start: {
341 DbgFuncStartInst *FSI = cast<DbgFuncStartInst>(I);
342 if (isValidDebugInfoIntrinsic(*FSI, CodeGenOpt::Default))
343 DL = ExtractDebugLocation(*FSI, MF->getDebugLocInfo());
350 PN = dyn_cast<PHINode>(I);
351 if (!PN || PN->use_empty()) continue;
353 unsigned PHIReg = ValueMap[PN];
354 assert(PHIReg && "PHI node does not have an assigned virtual register!");
356 SmallVector<MVT, 4> ValueVTs;
357 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
358 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
359 MVT VT = ValueVTs[vti];
360 unsigned NumRegisters = TLI.getNumRegisters(VT);
361 const TargetInstrInfo *TII = MF->getTarget().getInstrInfo();
362 for (unsigned i = 0; i != NumRegisters; ++i)
363 BuildMI(MBB, DL, TII->get(TargetInstrInfo::PHI), PHIReg + i);
364 PHIReg += NumRegisters;
370 unsigned FunctionLoweringInfo::MakeReg(MVT VT) {
371 return RegInfo->createVirtualRegister(TLI.getRegClassFor(VT));
374 /// CreateRegForValue - Allocate the appropriate number of virtual registers of
375 /// the correctly promoted or expanded types. Assign these registers
376 /// consecutive vreg numbers and return the first assigned number.
378 /// In the case that the given value has struct or array type, this function
379 /// will assign registers for each member or element.
381 unsigned FunctionLoweringInfo::CreateRegForValue(const Value *V) {
382 SmallVector<MVT, 4> ValueVTs;
383 ComputeValueVTs(TLI, V->getType(), ValueVTs);
385 unsigned FirstReg = 0;
386 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
387 MVT ValueVT = ValueVTs[Value];
388 MVT RegisterVT = TLI.getRegisterType(ValueVT);
390 unsigned NumRegs = TLI.getNumRegisters(ValueVT);
391 for (unsigned i = 0; i != NumRegs; ++i) {
392 unsigned R = MakeReg(RegisterVT);
393 if (!FirstReg) FirstReg = R;
399 /// getCopyFromParts - Create a value that contains the specified legal parts
400 /// combined into the value they represent. If the parts combine to a type
401 /// larger then ValueVT then AssertOp can be used to specify whether the extra
402 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
403 /// (ISD::AssertSext).
404 static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc dl,
405 const SDValue *Parts,
406 unsigned NumParts, MVT PartVT, MVT ValueVT,
407 ISD::NodeType AssertOp = ISD::DELETED_NODE) {
408 assert(NumParts > 0 && "No parts to assemble!");
409 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
410 SDValue Val = Parts[0];
413 // Assemble the value from multiple parts.
414 if (!ValueVT.isVector() && ValueVT.isInteger()) {
415 unsigned PartBits = PartVT.getSizeInBits();
416 unsigned ValueBits = ValueVT.getSizeInBits();
418 // Assemble the power of 2 part.
419 unsigned RoundParts = NumParts & (NumParts - 1) ?
420 1 << Log2_32(NumParts) : NumParts;
421 unsigned RoundBits = PartBits * RoundParts;
422 MVT RoundVT = RoundBits == ValueBits ?
423 ValueVT : MVT::getIntegerVT(RoundBits);
426 MVT HalfVT = MVT::getIntegerVT(RoundBits/2);
428 if (RoundParts > 2) {
429 Lo = getCopyFromParts(DAG, dl, Parts, RoundParts/2, PartVT, HalfVT);
430 Hi = getCopyFromParts(DAG, dl, Parts+RoundParts/2, RoundParts/2,
433 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[0]);
434 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HalfVT, Parts[1]);
436 if (TLI.isBigEndian())
438 Val = DAG.getNode(ISD::BUILD_PAIR, dl, RoundVT, Lo, Hi);
440 if (RoundParts < NumParts) {
441 // Assemble the trailing non-power-of-2 part.
442 unsigned OddParts = NumParts - RoundParts;
443 MVT OddVT = MVT::getIntegerVT(OddParts * PartBits);
444 Hi = getCopyFromParts(DAG, dl,
445 Parts+RoundParts, OddParts, PartVT, OddVT);
447 // Combine the round and odd parts.
449 if (TLI.isBigEndian())
451 MVT TotalVT = MVT::getIntegerVT(NumParts * PartBits);
452 Hi = DAG.getNode(ISD::ANY_EXTEND, dl, TotalVT, Hi);
453 Hi = DAG.getNode(ISD::SHL, dl, TotalVT, Hi,
454 DAG.getConstant(Lo.getValueType().getSizeInBits(),
455 TLI.getPointerTy()));
456 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, TotalVT, Lo);
457 Val = DAG.getNode(ISD::OR, dl, TotalVT, Lo, Hi);
459 } else if (ValueVT.isVector()) {
460 // Handle a multi-element vector.
461 MVT IntermediateVT, RegisterVT;
462 unsigned NumIntermediates;
464 TLI.getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
466 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
467 NumParts = NumRegs; // Silence a compiler warning.
468 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
469 assert(RegisterVT == Parts[0].getValueType() &&
470 "Part type doesn't match part!");
472 // Assemble the parts into intermediate operands.
473 SmallVector<SDValue, 8> Ops(NumIntermediates);
474 if (NumIntermediates == NumParts) {
475 // If the register was not expanded, truncate or copy the value,
477 for (unsigned i = 0; i != NumParts; ++i)
478 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i], 1,
479 PartVT, IntermediateVT);
480 } else if (NumParts > 0) {
481 // If the intermediate type was expanded, build the intermediate operands
483 assert(NumParts % NumIntermediates == 0 &&
484 "Must expand into a divisible number of parts!");
485 unsigned Factor = NumParts / NumIntermediates;
486 for (unsigned i = 0; i != NumIntermediates; ++i)
487 Ops[i] = getCopyFromParts(DAG, dl, &Parts[i * Factor], Factor,
488 PartVT, IntermediateVT);
491 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the intermediate
493 Val = DAG.getNode(IntermediateVT.isVector() ?
494 ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, dl,
495 ValueVT, &Ops[0], NumIntermediates);
496 } else if (PartVT.isFloatingPoint()) {
497 // FP split into multiple FP parts (for ppcf128)
498 assert(ValueVT == MVT(MVT::ppcf128) && PartVT == MVT(MVT::f64) &&
501 Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[0]);
502 Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT(MVT::f64), Parts[1]);
503 if (TLI.isBigEndian())
505 Val = DAG.getNode(ISD::BUILD_PAIR, dl, ValueVT, Lo, Hi);
507 // FP split into integer parts (soft fp)
508 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
509 !PartVT.isVector() && "Unexpected split");
510 MVT IntVT = MVT::getIntegerVT(ValueVT.getSizeInBits());
511 Val = getCopyFromParts(DAG, dl, Parts, NumParts, PartVT, IntVT);
515 // There is now one part, held in Val. Correct it to match ValueVT.
516 PartVT = Val.getValueType();
518 if (PartVT == ValueVT)
521 if (PartVT.isVector()) {
522 assert(ValueVT.isVector() && "Unknown vector conversion!");
523 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
526 if (ValueVT.isVector()) {
527 assert(ValueVT.getVectorElementType() == PartVT &&
528 ValueVT.getVectorNumElements() == 1 &&
529 "Only trivial scalar-to-vector conversions should get here!");
530 return DAG.getNode(ISD::BUILD_VECTOR, dl, ValueVT, Val);
533 if (PartVT.isInteger() &&
534 ValueVT.isInteger()) {
535 if (ValueVT.bitsLT(PartVT)) {
536 // For a truncate, see if we have any information to
537 // indicate whether the truncated bits will always be
538 // zero or sign-extension.
539 if (AssertOp != ISD::DELETED_NODE)
540 Val = DAG.getNode(AssertOp, dl, PartVT, Val,
541 DAG.getValueType(ValueVT));
542 return DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
544 return DAG.getNode(ISD::ANY_EXTEND, dl, ValueVT, Val);
548 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
549 if (ValueVT.bitsLT(Val.getValueType()))
550 // FP_ROUND's are always exact here.
551 return DAG.getNode(ISD::FP_ROUND, dl, ValueVT, Val,
552 DAG.getIntPtrConstant(1));
553 return DAG.getNode(ISD::FP_EXTEND, dl, ValueVT, Val);
556 if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
557 return DAG.getNode(ISD::BIT_CONVERT, dl, ValueVT, Val);
559 llvm_unreachable("Unknown mismatch!");
563 /// getCopyToParts - Create a series of nodes that contain the specified value
564 /// split into legal parts. If the parts contain more bits than Val, then, for
565 /// integers, ExtendKind can be used to specify how to generate the extra bits.
566 static void getCopyToParts(SelectionDAG &DAG, DebugLoc dl, SDValue Val,
567 SDValue *Parts, unsigned NumParts, MVT PartVT,
568 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
569 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
570 MVT PtrVT = TLI.getPointerTy();
571 MVT ValueVT = Val.getValueType();
572 unsigned PartBits = PartVT.getSizeInBits();
573 unsigned OrigNumParts = NumParts;
574 assert(TLI.isTypeLegal(PartVT) && "Copying to an illegal type!");
579 if (!ValueVT.isVector()) {
580 if (PartVT == ValueVT) {
581 assert(NumParts == 1 && "No-op copy with multiple parts!");
586 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
587 // If the parts cover more bits than the value has, promote the value.
588 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
589 assert(NumParts == 1 && "Do not know what to promote to!");
590 Val = DAG.getNode(ISD::FP_EXTEND, dl, PartVT, Val);
591 } else if (PartVT.isInteger() && ValueVT.isInteger()) {
592 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
593 Val = DAG.getNode(ExtendKind, dl, ValueVT, Val);
595 llvm_unreachable("Unknown mismatch!");
597 } else if (PartBits == ValueVT.getSizeInBits()) {
598 // Different types of the same size.
599 assert(NumParts == 1 && PartVT != ValueVT);
600 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
601 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
602 // If the parts cover less bits than value has, truncate the value.
603 if (PartVT.isInteger() && ValueVT.isInteger()) {
604 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
605 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
607 llvm_unreachable("Unknown mismatch!");
611 // The value may have changed - recompute ValueVT.
612 ValueVT = Val.getValueType();
613 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
614 "Failed to tile the value with PartVT!");
617 assert(PartVT == ValueVT && "Type conversion failed!");
622 // Expand the value into multiple parts.
623 if (NumParts & (NumParts - 1)) {
624 // The number of parts is not a power of 2. Split off and copy the tail.
625 assert(PartVT.isInteger() && ValueVT.isInteger() &&
626 "Do not know what to expand to!");
627 unsigned RoundParts = 1 << Log2_32(NumParts);
628 unsigned RoundBits = RoundParts * PartBits;
629 unsigned OddParts = NumParts - RoundParts;
630 SDValue OddVal = DAG.getNode(ISD::SRL, dl, ValueVT, Val,
631 DAG.getConstant(RoundBits,
632 TLI.getPointerTy()));
633 getCopyToParts(DAG, dl, OddVal, Parts + RoundParts, OddParts, PartVT);
634 if (TLI.isBigEndian())
635 // The odd parts were reversed by getCopyToParts - unreverse them.
636 std::reverse(Parts + RoundParts, Parts + NumParts);
637 NumParts = RoundParts;
638 ValueVT = MVT::getIntegerVT(NumParts * PartBits);
639 Val = DAG.getNode(ISD::TRUNCATE, dl, ValueVT, Val);
642 // The number of parts is a power of 2. Repeatedly bisect the value using
644 Parts[0] = DAG.getNode(ISD::BIT_CONVERT, dl,
645 MVT::getIntegerVT(ValueVT.getSizeInBits()),
647 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
648 for (unsigned i = 0; i < NumParts; i += StepSize) {
649 unsigned ThisBits = StepSize * PartBits / 2;
650 MVT ThisVT = MVT::getIntegerVT (ThisBits);
651 SDValue &Part0 = Parts[i];
652 SDValue &Part1 = Parts[i+StepSize/2];
654 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
656 DAG.getConstant(1, PtrVT));
657 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
659 DAG.getConstant(0, PtrVT));
661 if (ThisBits == PartBits && ThisVT != PartVT) {
662 Part0 = DAG.getNode(ISD::BIT_CONVERT, dl,
664 Part1 = DAG.getNode(ISD::BIT_CONVERT, dl,
670 if (TLI.isBigEndian())
671 std::reverse(Parts, Parts + OrigNumParts);
678 if (PartVT != ValueVT) {
679 if (PartVT.isVector()) {
680 Val = DAG.getNode(ISD::BIT_CONVERT, dl, PartVT, Val);
682 assert(ValueVT.getVectorElementType() == PartVT &&
683 ValueVT.getVectorNumElements() == 1 &&
684 "Only trivial vector-to-scalar conversions should get here!");
685 Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
687 DAG.getConstant(0, PtrVT));
695 // Handle a multi-element vector.
696 MVT IntermediateVT, RegisterVT;
697 unsigned NumIntermediates;
698 unsigned NumRegs = TLI
699 .getVectorTypeBreakdown(ValueVT, IntermediateVT, NumIntermediates,
701 unsigned NumElements = ValueVT.getVectorNumElements();
703 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
704 NumParts = NumRegs; // Silence a compiler warning.
705 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
707 // Split the vector into intermediate operands.
708 SmallVector<SDValue, 8> Ops(NumIntermediates);
709 for (unsigned i = 0; i != NumIntermediates; ++i)
710 if (IntermediateVT.isVector())
711 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl,
713 DAG.getConstant(i * (NumElements / NumIntermediates),
716 Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
718 DAG.getConstant(i, PtrVT));
720 // Split the intermediate operands into legal parts.
721 if (NumParts == NumIntermediates) {
722 // If the register was not expanded, promote or copy the value,
724 for (unsigned i = 0; i != NumParts; ++i)
725 getCopyToParts(DAG, dl, Ops[i], &Parts[i], 1, PartVT);
726 } else if (NumParts > 0) {
727 // If the intermediate type was expanded, split each the value into
729 assert(NumParts % NumIntermediates == 0 &&
730 "Must expand into a divisible number of parts!");
731 unsigned Factor = NumParts / NumIntermediates;
732 for (unsigned i = 0; i != NumIntermediates; ++i)
733 getCopyToParts(DAG, dl, Ops[i], &Parts[i * Factor], Factor, PartVT);
738 void SelectionDAGLowering::init(GCFunctionInfo *gfi, AliasAnalysis &aa) {
741 TD = DAG.getTarget().getTargetData();
744 /// clear - Clear out the curret SelectionDAG and the associated
745 /// state and prepare this SelectionDAGLowering object to be used
746 /// for a new block. This doesn't clear out information about
747 /// additional blocks that are needed to complete switch lowering
748 /// or PHI node updating; that information is cleared out as it is
750 void SelectionDAGLowering::clear() {
752 PendingLoads.clear();
753 PendingExports.clear();
755 CurDebugLoc = DebugLoc::getUnknownLoc();
758 /// getRoot - Return the current virtual root of the Selection DAG,
759 /// flushing any PendingLoad items. This must be done before emitting
760 /// a store or any other node that may need to be ordered after any
761 /// prior load instructions.
763 SDValue SelectionDAGLowering::getRoot() {
764 if (PendingLoads.empty())
765 return DAG.getRoot();
767 if (PendingLoads.size() == 1) {
768 SDValue Root = PendingLoads[0];
770 PendingLoads.clear();
774 // Otherwise, we have to make a token factor node.
775 SDValue Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
776 &PendingLoads[0], PendingLoads.size());
777 PendingLoads.clear();
782 /// getControlRoot - Similar to getRoot, but instead of flushing all the
783 /// PendingLoad items, flush all the PendingExports items. It is necessary
784 /// to do this before emitting a terminator instruction.
786 SDValue SelectionDAGLowering::getControlRoot() {
787 SDValue Root = DAG.getRoot();
789 if (PendingExports.empty())
792 // Turn all of the CopyToReg chains into one factored node.
793 if (Root.getOpcode() != ISD::EntryToken) {
794 unsigned i = 0, e = PendingExports.size();
795 for (; i != e; ++i) {
796 assert(PendingExports[i].getNode()->getNumOperands() > 1);
797 if (PendingExports[i].getNode()->getOperand(0) == Root)
798 break; // Don't add the root if we already indirectly depend on it.
802 PendingExports.push_back(Root);
805 Root = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
807 PendingExports.size());
808 PendingExports.clear();
813 void SelectionDAGLowering::visit(Instruction &I) {
814 visit(I.getOpcode(), I);
817 void SelectionDAGLowering::visit(unsigned Opcode, User &I) {
818 // Note: this doesn't use InstVisitor, because it has to work with
819 // ConstantExpr's in addition to instructions.
821 default: llvm_unreachable("Unknown instruction type encountered!");
822 // Build the switch statement using the Instruction.def file.
823 #define HANDLE_INST(NUM, OPCODE, CLASS) \
824 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
825 #include "llvm/Instruction.def"
829 SDValue SelectionDAGLowering::getValue(const Value *V) {
830 SDValue &N = NodeMap[V];
831 if (N.getNode()) return N;
833 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V))) {
834 MVT VT = TLI.getValueType(V->getType(), true);
836 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
837 return N = DAG.getConstant(*CI, VT);
839 if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
840 return N = DAG.getGlobalAddress(GV, VT);
842 if (isa<ConstantPointerNull>(C))
843 return N = DAG.getConstant(0, TLI.getPointerTy());
845 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C))
846 return N = DAG.getConstantFP(*CFP, VT);
848 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
849 return N = DAG.getUNDEF(VT);
851 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
852 visit(CE->getOpcode(), *CE);
853 SDValue N1 = NodeMap[V];
854 assert(N1.getNode() && "visit didn't populate the ValueMap!");
858 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
859 SmallVector<SDValue, 4> Constants;
860 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
862 SDNode *Val = getValue(*OI).getNode();
863 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
864 Constants.push_back(SDValue(Val, i));
866 return DAG.getMergeValues(&Constants[0], Constants.size(),
870 if (isa<StructType>(C->getType()) || isa<ArrayType>(C->getType())) {
871 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
872 "Unknown struct or array constant!");
874 SmallVector<MVT, 4> ValueVTs;
875 ComputeValueVTs(TLI, C->getType(), ValueVTs);
876 unsigned NumElts = ValueVTs.size();
878 return SDValue(); // empty struct
879 SmallVector<SDValue, 4> Constants(NumElts);
880 for (unsigned i = 0; i != NumElts; ++i) {
881 MVT EltVT = ValueVTs[i];
882 if (isa<UndefValue>(C))
883 Constants[i] = DAG.getUNDEF(EltVT);
884 else if (EltVT.isFloatingPoint())
885 Constants[i] = DAG.getConstantFP(0, EltVT);
887 Constants[i] = DAG.getConstant(0, EltVT);
889 return DAG.getMergeValues(&Constants[0], NumElts, getCurDebugLoc());
892 const VectorType *VecTy = cast<VectorType>(V->getType());
893 unsigned NumElements = VecTy->getNumElements();
895 // Now that we know the number and type of the elements, get that number of
896 // elements into the Ops array based on what kind of constant it is.
897 SmallVector<SDValue, 16> Ops;
898 if (ConstantVector *CP = dyn_cast<ConstantVector>(C)) {
899 for (unsigned i = 0; i != NumElements; ++i)
900 Ops.push_back(getValue(CP->getOperand(i)));
902 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
903 MVT EltVT = TLI.getValueType(VecTy->getElementType());
906 if (EltVT.isFloatingPoint())
907 Op = DAG.getConstantFP(0, EltVT);
909 Op = DAG.getConstant(0, EltVT);
910 Ops.assign(NumElements, Op);
913 // Create a BUILD_VECTOR node.
914 return NodeMap[V] = DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
915 VT, &Ops[0], Ops.size());
918 // If this is a static alloca, generate it as the frameindex instead of
920 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
921 DenseMap<const AllocaInst*, int>::iterator SI =
922 FuncInfo.StaticAllocaMap.find(AI);
923 if (SI != FuncInfo.StaticAllocaMap.end())
924 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
927 unsigned InReg = FuncInfo.ValueMap[V];
928 assert(InReg && "Value not in map!");
930 RegsForValue RFV(TLI, InReg, V->getType());
931 SDValue Chain = DAG.getEntryNode();
932 return RFV.getCopyFromRegs(DAG, getCurDebugLoc(), Chain, NULL);
936 void SelectionDAGLowering::visitRet(ReturnInst &I) {
937 if (I.getNumOperands() == 0) {
938 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(),
939 MVT::Other, getControlRoot()));
943 SmallVector<SDValue, 8> NewValues;
944 NewValues.push_back(getControlRoot());
945 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
946 SmallVector<MVT, 4> ValueVTs;
947 ComputeValueVTs(TLI, I.getOperand(i)->getType(), ValueVTs);
948 unsigned NumValues = ValueVTs.size();
949 if (NumValues == 0) continue;
951 SDValue RetOp = getValue(I.getOperand(i));
952 for (unsigned j = 0, f = NumValues; j != f; ++j) {
953 MVT VT = ValueVTs[j];
955 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
957 const Function *F = I.getParent()->getParent();
958 if (F->paramHasAttr(0, Attribute::SExt))
959 ExtendKind = ISD::SIGN_EXTEND;
960 else if (F->paramHasAttr(0, Attribute::ZExt))
961 ExtendKind = ISD::ZERO_EXTEND;
963 // FIXME: C calling convention requires the return type to be promoted to
964 // at least 32-bit. But this is not necessary for non-C calling
965 // conventions. The frontend should mark functions whose return values
966 // require promoting with signext or zeroext attributes.
967 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
968 MVT MinVT = TLI.getRegisterType(MVT::i32);
969 if (VT.bitsLT(MinVT))
973 unsigned NumParts = TLI.getNumRegisters(VT);
974 MVT PartVT = TLI.getRegisterType(VT);
975 SmallVector<SDValue, 4> Parts(NumParts);
976 getCopyToParts(DAG, getCurDebugLoc(),
977 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
978 &Parts[0], NumParts, PartVT, ExtendKind);
980 // 'inreg' on function refers to return value
981 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
982 if (F->paramHasAttr(0, Attribute::InReg))
985 // Propagate extension type if any
986 if (F->paramHasAttr(0, Attribute::SExt))
988 else if (F->paramHasAttr(0, Attribute::ZExt))
991 for (unsigned i = 0; i < NumParts; ++i) {
992 NewValues.push_back(Parts[i]);
993 NewValues.push_back(DAG.getArgFlags(Flags));
997 DAG.setRoot(DAG.getNode(ISD::RET, getCurDebugLoc(), MVT::Other,
998 &NewValues[0], NewValues.size()));
1001 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1002 /// created for it, emit nodes to copy the value into the virtual
1004 void SelectionDAGLowering::CopyToExportRegsIfNeeded(Value *V) {
1005 if (!V->use_empty()) {
1006 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1007 if (VMI != FuncInfo.ValueMap.end())
1008 CopyValueToVirtualRegister(V, VMI->second);
1012 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1013 /// the current basic block, add it to ValueMap now so that we'll get a
1015 void SelectionDAGLowering::ExportFromCurrentBlock(Value *V) {
1016 // No need to export constants.
1017 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1019 // Already exported?
1020 if (FuncInfo.isExportedInst(V)) return;
1022 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1023 CopyValueToVirtualRegister(V, Reg);
1026 bool SelectionDAGLowering::isExportableFromCurrentBlock(Value *V,
1027 const BasicBlock *FromBB) {
1028 // The operands of the setcc have to be in this block. We don't know
1029 // how to export them from some other block.
1030 if (Instruction *VI = dyn_cast<Instruction>(V)) {
1031 // Can export from current BB.
1032 if (VI->getParent() == FromBB)
1035 // Is already exported, noop.
1036 return FuncInfo.isExportedInst(V);
1039 // If this is an argument, we can export it if the BB is the entry block or
1040 // if it is already exported.
1041 if (isa<Argument>(V)) {
1042 if (FromBB == &FromBB->getParent()->getEntryBlock())
1045 // Otherwise, can only export this if it is already exported.
1046 return FuncInfo.isExportedInst(V);
1049 // Otherwise, constants can always be exported.
1053 static bool InBlock(const Value *V, const BasicBlock *BB) {
1054 if (const Instruction *I = dyn_cast<Instruction>(V))
1055 return I->getParent() == BB;
1059 /// getFCmpCondCode - Return the ISD condition code corresponding to
1060 /// the given LLVM IR floating-point condition code. This includes
1061 /// consideration of global floating-point math flags.
1063 static ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred) {
1064 ISD::CondCode FPC, FOC;
1066 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
1067 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
1068 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
1069 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
1070 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
1071 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
1072 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
1073 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
1074 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
1075 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
1076 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
1077 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
1078 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
1079 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
1080 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
1081 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
1083 llvm_unreachable("Invalid FCmp predicate opcode!");
1084 FOC = FPC = ISD::SETFALSE;
1087 if (FiniteOnlyFPMath())
1093 /// getICmpCondCode - Return the ISD condition code corresponding to
1094 /// the given LLVM IR integer condition code.
1096 static ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred) {
1098 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
1099 case ICmpInst::ICMP_NE: return ISD::SETNE;
1100 case ICmpInst::ICMP_SLE: return ISD::SETLE;
1101 case ICmpInst::ICMP_ULE: return ISD::SETULE;
1102 case ICmpInst::ICMP_SGE: return ISD::SETGE;
1103 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
1104 case ICmpInst::ICMP_SLT: return ISD::SETLT;
1105 case ICmpInst::ICMP_ULT: return ISD::SETULT;
1106 case ICmpInst::ICMP_SGT: return ISD::SETGT;
1107 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
1109 llvm_unreachable("Invalid ICmp predicate opcode!");
1114 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
1115 /// This function emits a branch and is used at the leaves of an OR or an
1116 /// AND operator tree.
1119 SelectionDAGLowering::EmitBranchForMergedCondition(Value *Cond,
1120 MachineBasicBlock *TBB,
1121 MachineBasicBlock *FBB,
1122 MachineBasicBlock *CurBB) {
1123 const BasicBlock *BB = CurBB->getBasicBlock();
1125 // If the leaf of the tree is a comparison, merge the condition into
1127 if (CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
1128 // The operands of the cmp have to be in this block. We don't know
1129 // how to export them from some other block. If this is the first block
1130 // of the sequence, no exporting is needed.
1131 if (CurBB == CurMBB ||
1132 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
1133 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
1134 ISD::CondCode Condition;
1135 if (ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
1136 Condition = getICmpCondCode(IC->getPredicate());
1137 } else if (FCmpInst *FC = dyn_cast<FCmpInst>(Cond)) {
1138 Condition = getFCmpCondCode(FC->getPredicate());
1140 Condition = ISD::SETEQ; // silence warning.
1141 llvm_unreachable("Unknown compare instruction");
1144 CaseBlock CB(Condition, BOp->getOperand(0),
1145 BOp->getOperand(1), NULL, TBB, FBB, CurBB);
1146 SwitchCases.push_back(CB);
1151 // Create a CaseBlock record representing this branch.
1152 CaseBlock CB(ISD::SETEQ, Cond, ConstantInt::getTrue(),
1153 NULL, TBB, FBB, CurBB);
1154 SwitchCases.push_back(CB);
1157 /// FindMergedConditions - If Cond is an expression like
1158 void SelectionDAGLowering::FindMergedConditions(Value *Cond,
1159 MachineBasicBlock *TBB,
1160 MachineBasicBlock *FBB,
1161 MachineBasicBlock *CurBB,
1163 // If this node is not part of the or/and tree, emit it as a branch.
1164 Instruction *BOp = dyn_cast<Instruction>(Cond);
1165 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
1166 (unsigned)BOp->getOpcode() != Opc || !BOp->hasOneUse() ||
1167 BOp->getParent() != CurBB->getBasicBlock() ||
1168 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
1169 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
1170 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB);
1174 // Create TmpBB after CurBB.
1175 MachineFunction::iterator BBI = CurBB;
1176 MachineFunction &MF = DAG.getMachineFunction();
1177 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
1178 CurBB->getParent()->insert(++BBI, TmpBB);
1180 if (Opc == Instruction::Or) {
1181 // Codegen X | Y as:
1189 // Emit the LHS condition.
1190 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, Opc);
1192 // Emit the RHS condition into TmpBB.
1193 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1195 assert(Opc == Instruction::And && "Unknown merge op!");
1196 // Codegen X & Y as:
1203 // This requires creation of TmpBB after CurBB.
1205 // Emit the LHS condition.
1206 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, Opc);
1208 // Emit the RHS condition into TmpBB.
1209 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, Opc);
1213 /// If the set of cases should be emitted as a series of branches, return true.
1214 /// If we should emit this as a bunch of and/or'd together conditions, return
1217 SelectionDAGLowering::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases){
1218 if (Cases.size() != 2) return true;
1220 // If this is two comparisons of the same values or'd or and'd together, they
1221 // will get folded into a single comparison, so don't emit two blocks.
1222 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
1223 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
1224 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
1225 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
1232 void SelectionDAGLowering::visitBr(BranchInst &I) {
1233 // Update machine-CFG edges.
1234 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
1236 // Figure out which block is immediately after the current one.
1237 MachineBasicBlock *NextBlock = 0;
1238 MachineFunction::iterator BBI = CurMBB;
1239 if (++BBI != CurMBB->getParent()->end())
1242 if (I.isUnconditional()) {
1243 // Update machine-CFG edges.
1244 CurMBB->addSuccessor(Succ0MBB);
1246 // If this is not a fall-through branch, emit the branch.
1247 if (Succ0MBB != NextBlock)
1248 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1249 MVT::Other, getControlRoot(),
1250 DAG.getBasicBlock(Succ0MBB)));
1254 // If this condition is one of the special cases we handle, do special stuff
1256 Value *CondVal = I.getCondition();
1257 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
1259 // If this is a series of conditions that are or'd or and'd together, emit
1260 // this as a sequence of branches instead of setcc's with and/or operations.
1261 // For example, instead of something like:
1274 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
1275 if (BOp->hasOneUse() &&
1276 (BOp->getOpcode() == Instruction::And ||
1277 BOp->getOpcode() == Instruction::Or)) {
1278 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, CurMBB, BOp->getOpcode());
1279 // If the compares in later blocks need to use values not currently
1280 // exported from this block, export them now. This block should always
1281 // be the first entry.
1282 assert(SwitchCases[0].ThisBB == CurMBB && "Unexpected lowering!");
1284 // Allow some cases to be rejected.
1285 if (ShouldEmitAsBranches(SwitchCases)) {
1286 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i) {
1287 ExportFromCurrentBlock(SwitchCases[i].CmpLHS);
1288 ExportFromCurrentBlock(SwitchCases[i].CmpRHS);
1291 // Emit the branch for this block.
1292 visitSwitchCase(SwitchCases[0]);
1293 SwitchCases.erase(SwitchCases.begin());
1297 // Okay, we decided not to do this, remove any inserted MBB's and clear
1299 for (unsigned i = 1, e = SwitchCases.size(); i != e; ++i)
1300 CurMBB->getParent()->erase(SwitchCases[i].ThisBB);
1302 SwitchCases.clear();
1306 // Create a CaseBlock record representing this branch.
1307 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(),
1308 NULL, Succ0MBB, Succ1MBB, CurMBB);
1309 // Use visitSwitchCase to actually insert the fast branch sequence for this
1311 visitSwitchCase(CB);
1314 /// visitSwitchCase - Emits the necessary code to represent a single node in
1315 /// the binary search tree resulting from lowering a switch instruction.
1316 void SelectionDAGLowering::visitSwitchCase(CaseBlock &CB) {
1318 SDValue CondLHS = getValue(CB.CmpLHS);
1319 DebugLoc dl = getCurDebugLoc();
1321 // Build the setcc now.
1322 if (CB.CmpMHS == NULL) {
1323 // Fold "(X == true)" to X and "(X == false)" to !X to
1324 // handle common cases produced by branch lowering.
1325 if (CB.CmpRHS == ConstantInt::getTrue() && CB.CC == ISD::SETEQ)
1327 else if (CB.CmpRHS == ConstantInt::getFalse() && CB.CC == ISD::SETEQ) {
1328 SDValue True = DAG.getConstant(1, CondLHS.getValueType());
1329 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
1331 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, getValue(CB.CmpRHS), CB.CC);
1333 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
1335 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
1336 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
1338 SDValue CmpOp = getValue(CB.CmpMHS);
1339 MVT VT = CmpOp.getValueType();
1341 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
1342 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, VT),
1345 SDValue SUB = DAG.getNode(ISD::SUB, dl,
1346 VT, CmpOp, DAG.getConstant(Low, VT));
1347 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
1348 DAG.getConstant(High-Low, VT), ISD::SETULE);
1352 // Update successor info
1353 CurMBB->addSuccessor(CB.TrueBB);
1354 CurMBB->addSuccessor(CB.FalseBB);
1356 // Set NextBlock to be the MBB immediately after the current one, if any.
1357 // This is used to avoid emitting unnecessary branches to the next block.
1358 MachineBasicBlock *NextBlock = 0;
1359 MachineFunction::iterator BBI = CurMBB;
1360 if (++BBI != CurMBB->getParent()->end())
1363 // If the lhs block is the next block, invert the condition so that we can
1364 // fall through to the lhs instead of the rhs block.
1365 if (CB.TrueBB == NextBlock) {
1366 std::swap(CB.TrueBB, CB.FalseBB);
1367 SDValue True = DAG.getConstant(1, Cond.getValueType());
1368 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
1370 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
1371 MVT::Other, getControlRoot(), Cond,
1372 DAG.getBasicBlock(CB.TrueBB));
1374 // If the branch was constant folded, fix up the CFG.
1375 if (BrCond.getOpcode() == ISD::BR) {
1376 CurMBB->removeSuccessor(CB.FalseBB);
1377 DAG.setRoot(BrCond);
1379 // Otherwise, go ahead and insert the false branch.
1380 if (BrCond == getControlRoot())
1381 CurMBB->removeSuccessor(CB.TrueBB);
1383 if (CB.FalseBB == NextBlock)
1384 DAG.setRoot(BrCond);
1386 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
1387 DAG.getBasicBlock(CB.FalseBB)));
1391 /// visitJumpTable - Emit JumpTable node in the current MBB
1392 void SelectionDAGLowering::visitJumpTable(JumpTable &JT) {
1393 // Emit the code for the jump table
1394 assert(JT.Reg != -1U && "Should lower JT Header first!");
1395 MVT PTy = TLI.getPointerTy();
1396 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(),
1398 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
1399 DAG.setRoot(DAG.getNode(ISD::BR_JT, getCurDebugLoc(),
1400 MVT::Other, Index.getValue(1),
1404 /// visitJumpTableHeader - This function emits necessary code to produce index
1405 /// in the JumpTable from switch case.
1406 void SelectionDAGLowering::visitJumpTableHeader(JumpTable &JT,
1407 JumpTableHeader &JTH) {
1408 // Subtract the lowest switch case value from the value being switched on and
1409 // conditional branch to default mbb if the result is greater than the
1410 // difference between smallest and largest cases.
1411 SDValue SwitchOp = getValue(JTH.SValue);
1412 MVT VT = SwitchOp.getValueType();
1413 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1414 DAG.getConstant(JTH.First, VT));
1416 // The SDNode we just created, which holds the value being switched on minus
1417 // the the smallest case value, needs to be copied to a virtual register so it
1418 // can be used as an index into the jump table in a subsequent basic block.
1419 // This value may be smaller or larger than the target's pointer type, and
1420 // therefore require extension or truncating.
1421 if (VT.bitsGT(TLI.getPointerTy()))
1422 SwitchOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1423 TLI.getPointerTy(), SUB);
1425 SwitchOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1426 TLI.getPointerTy(), SUB);
1428 unsigned JumpTableReg = FuncInfo.MakeReg(TLI.getPointerTy());
1429 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1430 JumpTableReg, SwitchOp);
1431 JT.Reg = JumpTableReg;
1433 // Emit the range check for the jump table, and branch to the default block
1434 // for the switch statement if the value being switched on exceeds the largest
1435 // case in the switch.
1436 SDValue CMP = DAG.getSetCC(getCurDebugLoc(),
1437 TLI.getSetCCResultType(SUB.getValueType()), SUB,
1438 DAG.getConstant(JTH.Last-JTH.First,VT),
1441 // Set NextBlock to be the MBB immediately after the current one, if any.
1442 // This is used to avoid emitting unnecessary branches to the next block.
1443 MachineBasicBlock *NextBlock = 0;
1444 MachineFunction::iterator BBI = CurMBB;
1445 if (++BBI != CurMBB->getParent()->end())
1448 SDValue BrCond = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1449 MVT::Other, CopyTo, CMP,
1450 DAG.getBasicBlock(JT.Default));
1452 if (JT.MBB == NextBlock)
1453 DAG.setRoot(BrCond);
1455 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrCond,
1456 DAG.getBasicBlock(JT.MBB)));
1459 /// visitBitTestHeader - This function emits necessary code to produce value
1460 /// suitable for "bit tests"
1461 void SelectionDAGLowering::visitBitTestHeader(BitTestBlock &B) {
1462 // Subtract the minimum value
1463 SDValue SwitchOp = getValue(B.SValue);
1464 MVT VT = SwitchOp.getValueType();
1465 SDValue SUB = DAG.getNode(ISD::SUB, getCurDebugLoc(), VT, SwitchOp,
1466 DAG.getConstant(B.First, VT));
1469 SDValue RangeCmp = DAG.getSetCC(getCurDebugLoc(),
1470 TLI.getSetCCResultType(SUB.getValueType()),
1471 SUB, DAG.getConstant(B.Range, VT),
1475 if (VT.bitsGT(TLI.getPointerTy()))
1476 ShiftOp = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
1477 TLI.getPointerTy(), SUB);
1479 ShiftOp = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
1480 TLI.getPointerTy(), SUB);
1482 B.Reg = FuncInfo.MakeReg(TLI.getPointerTy());
1483 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), getCurDebugLoc(),
1486 // Set NextBlock to be the MBB immediately after the current one, if any.
1487 // This is used to avoid emitting unnecessary branches to the next block.
1488 MachineBasicBlock *NextBlock = 0;
1489 MachineFunction::iterator BBI = CurMBB;
1490 if (++BBI != CurMBB->getParent()->end())
1493 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
1495 CurMBB->addSuccessor(B.Default);
1496 CurMBB->addSuccessor(MBB);
1498 SDValue BrRange = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1499 MVT::Other, CopyTo, RangeCmp,
1500 DAG.getBasicBlock(B.Default));
1502 if (MBB == NextBlock)
1503 DAG.setRoot(BrRange);
1505 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, CopyTo,
1506 DAG.getBasicBlock(MBB)));
1509 /// visitBitTestCase - this function produces one "bit test"
1510 void SelectionDAGLowering::visitBitTestCase(MachineBasicBlock* NextMBB,
1513 // Make desired shift
1514 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), getCurDebugLoc(), Reg,
1515 TLI.getPointerTy());
1516 SDValue SwitchVal = DAG.getNode(ISD::SHL, getCurDebugLoc(),
1518 DAG.getConstant(1, TLI.getPointerTy()),
1521 // Emit bit tests and jumps
1522 SDValue AndOp = DAG.getNode(ISD::AND, getCurDebugLoc(),
1523 TLI.getPointerTy(), SwitchVal,
1524 DAG.getConstant(B.Mask, TLI.getPointerTy()));
1525 SDValue AndCmp = DAG.getSetCC(getCurDebugLoc(),
1526 TLI.getSetCCResultType(AndOp.getValueType()),
1527 AndOp, DAG.getConstant(0, TLI.getPointerTy()),
1530 CurMBB->addSuccessor(B.TargetBB);
1531 CurMBB->addSuccessor(NextMBB);
1533 SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(),
1534 MVT::Other, getControlRoot(),
1535 AndCmp, DAG.getBasicBlock(B.TargetBB));
1537 // Set NextBlock to be the MBB immediately after the current one, if any.
1538 // This is used to avoid emitting unnecessary branches to the next block.
1539 MachineBasicBlock *NextBlock = 0;
1540 MachineFunction::iterator BBI = CurMBB;
1541 if (++BBI != CurMBB->getParent()->end())
1544 if (NextMBB == NextBlock)
1547 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(), MVT::Other, BrAnd,
1548 DAG.getBasicBlock(NextMBB)));
1551 void SelectionDAGLowering::visitInvoke(InvokeInst &I) {
1552 // Retrieve successors.
1553 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
1554 MachineBasicBlock *LandingPad = FuncInfo.MBBMap[I.getSuccessor(1)];
1556 const Value *Callee(I.getCalledValue());
1557 if (isa<InlineAsm>(Callee))
1560 LowerCallTo(&I, getValue(Callee), false, LandingPad);
1562 // If the value of the invoke is used outside of its defining block, make it
1563 // available as a virtual register.
1564 CopyToExportRegsIfNeeded(&I);
1566 // Update successor info
1567 CurMBB->addSuccessor(Return);
1568 CurMBB->addSuccessor(LandingPad);
1570 // Drop into normal successor.
1571 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
1572 MVT::Other, getControlRoot(),
1573 DAG.getBasicBlock(Return)));
1576 void SelectionDAGLowering::visitUnwind(UnwindInst &I) {
1579 /// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
1580 /// small case ranges).
1581 bool SelectionDAGLowering::handleSmallSwitchRange(CaseRec& CR,
1582 CaseRecVector& WorkList,
1584 MachineBasicBlock* Default) {
1585 Case& BackCase = *(CR.Range.second-1);
1587 // Size is the number of Cases represented by this range.
1588 size_t Size = CR.Range.second - CR.Range.first;
1592 // Get the MachineFunction which holds the current MBB. This is used when
1593 // inserting any additional MBBs necessary to represent the switch.
1594 MachineFunction *CurMF = CurMBB->getParent();
1596 // Figure out which block is immediately after the current one.
1597 MachineBasicBlock *NextBlock = 0;
1598 MachineFunction::iterator BBI = CR.CaseBB;
1600 if (++BBI != CurMBB->getParent()->end())
1603 // TODO: If any two of the cases has the same destination, and if one value
1604 // is the same as the other, but has one bit unset that the other has set,
1605 // use bit manipulation to do two compares at once. For example:
1606 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
1608 // Rearrange the case blocks so that the last one falls through if possible.
1609 if (NextBlock && Default != NextBlock && BackCase.BB != NextBlock) {
1610 // The last case block won't fall through into 'NextBlock' if we emit the
1611 // branches in this order. See if rearranging a case value would help.
1612 for (CaseItr I = CR.Range.first, E = CR.Range.second-1; I != E; ++I) {
1613 if (I->BB == NextBlock) {
1614 std::swap(*I, BackCase);
1620 // Create a CaseBlock record representing a conditional branch to
1621 // the Case's target mbb if the value being switched on SV is equal
1623 MachineBasicBlock *CurBlock = CR.CaseBB;
1624 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) {
1625 MachineBasicBlock *FallThrough;
1627 FallThrough = CurMF->CreateMachineBasicBlock(CurBlock->getBasicBlock());
1628 CurMF->insert(BBI, FallThrough);
1630 // Put SV in a virtual register to make it available from the new blocks.
1631 ExportFromCurrentBlock(SV);
1633 // If the last case doesn't match, go to the default block.
1634 FallThrough = Default;
1637 Value *RHS, *LHS, *MHS;
1639 if (I->High == I->Low) {
1640 // This is just small small case range :) containing exactly 1 case
1642 LHS = SV; RHS = I->High; MHS = NULL;
1645 LHS = I->Low; MHS = SV; RHS = I->High;
1647 CaseBlock CB(CC, LHS, RHS, MHS, I->BB, FallThrough, CurBlock);
1649 // If emitting the first comparison, just call visitSwitchCase to emit the
1650 // code into the current block. Otherwise, push the CaseBlock onto the
1651 // vector to be later processed by SDISel, and insert the node's MBB
1652 // before the next MBB.
1653 if (CurBlock == CurMBB)
1654 visitSwitchCase(CB);
1656 SwitchCases.push_back(CB);
1658 CurBlock = FallThrough;
1664 static inline bool areJTsAllowed(const TargetLowering &TLI) {
1665 return !DisableJumpTables &&
1666 (TLI.isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1667 TLI.isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
1670 static APInt ComputeRange(const APInt &First, const APInt &Last) {
1671 APInt LastExt(Last), FirstExt(First);
1672 uint32_t BitWidth = std::max(Last.getBitWidth(), First.getBitWidth()) + 1;
1673 LastExt.sext(BitWidth); FirstExt.sext(BitWidth);
1674 return (LastExt - FirstExt + 1ULL);
1677 /// handleJTSwitchCase - Emit jumptable for current switch case range
1678 bool SelectionDAGLowering::handleJTSwitchCase(CaseRec& CR,
1679 CaseRecVector& WorkList,
1681 MachineBasicBlock* Default) {
1682 Case& FrontCase = *CR.Range.first;
1683 Case& BackCase = *(CR.Range.second-1);
1685 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1686 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1689 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1693 if (!areJTsAllowed(TLI) || TSize <= 3)
1696 APInt Range = ComputeRange(First, Last);
1697 double Density = (double)TSize / Range.roundToDouble();
1701 DEBUG(errs() << "Lowering jump table\n"
1702 << "First entry: " << First << ". Last entry: " << Last << '\n'
1703 << "Range: " << Range
1704 << "Size: " << TSize << ". Density: " << Density << "\n\n");
1706 // Get the MachineFunction which holds the current MBB. This is used when
1707 // inserting any additional MBBs necessary to represent the switch.
1708 MachineFunction *CurMF = CurMBB->getParent();
1710 // Figure out which block is immediately after the current one.
1711 MachineBasicBlock *NextBlock = 0;
1712 MachineFunction::iterator BBI = CR.CaseBB;
1714 if (++BBI != CurMBB->getParent()->end())
1717 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1719 // Create a new basic block to hold the code for loading the address
1720 // of the jump table, and jumping to it. Update successor information;
1721 // we will either branch to the default case for the switch, or the jump
1723 MachineBasicBlock *JumpTableBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1724 CurMF->insert(BBI, JumpTableBB);
1725 CR.CaseBB->addSuccessor(Default);
1726 CR.CaseBB->addSuccessor(JumpTableBB);
1728 // Build a vector of destination BBs, corresponding to each target
1729 // of the jump table. If the value of the jump table slot corresponds to
1730 // a case statement, push the case's BB onto the vector, otherwise, push
1732 std::vector<MachineBasicBlock*> DestBBs;
1734 for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++TEI) {
1735 const APInt& Low = cast<ConstantInt>(I->Low)->getValue();
1736 const APInt& High = cast<ConstantInt>(I->High)->getValue();
1738 if (Low.sle(TEI) && TEI.sle(High)) {
1739 DestBBs.push_back(I->BB);
1743 DestBBs.push_back(Default);
1747 // Update successor info. Add one edge to each unique successor.
1748 BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs());
1749 for (std::vector<MachineBasicBlock*>::iterator I = DestBBs.begin(),
1750 E = DestBBs.end(); I != E; ++I) {
1751 if (!SuccsHandled[(*I)->getNumber()]) {
1752 SuccsHandled[(*I)->getNumber()] = true;
1753 JumpTableBB->addSuccessor(*I);
1757 // Create a jump table index for this jump table, or return an existing
1759 unsigned JTI = CurMF->getJumpTableInfo()->getJumpTableIndex(DestBBs);
1761 // Set the jump table information so that we can codegen it as a second
1762 // MachineBasicBlock
1763 JumpTable JT(-1U, JTI, JumpTableBB, Default);
1764 JumpTableHeader JTH(First, Last, SV, CR.CaseBB, (CR.CaseBB == CurMBB));
1765 if (CR.CaseBB == CurMBB)
1766 visitJumpTableHeader(JT, JTH);
1768 JTCases.push_back(JumpTableBlock(JTH, JT));
1773 /// handleBTSplitSwitchCase - emit comparison and split binary search tree into
1775 bool SelectionDAGLowering::handleBTSplitSwitchCase(CaseRec& CR,
1776 CaseRecVector& WorkList,
1778 MachineBasicBlock* Default) {
1779 // Get the MachineFunction which holds the current MBB. This is used when
1780 // inserting any additional MBBs necessary to represent the switch.
1781 MachineFunction *CurMF = CurMBB->getParent();
1783 // Figure out which block is immediately after the current one.
1784 MachineBasicBlock *NextBlock = 0;
1785 MachineFunction::iterator BBI = CR.CaseBB;
1787 if (++BBI != CurMBB->getParent()->end())
1790 Case& FrontCase = *CR.Range.first;
1791 Case& BackCase = *(CR.Range.second-1);
1792 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
1794 // Size is the number of Cases represented by this range.
1795 unsigned Size = CR.Range.second - CR.Range.first;
1797 const APInt& First = cast<ConstantInt>(FrontCase.Low)->getValue();
1798 const APInt& Last = cast<ConstantInt>(BackCase.High)->getValue();
1800 CaseItr Pivot = CR.Range.first + Size/2;
1802 // Select optimal pivot, maximizing sum density of LHS and RHS. This will
1803 // (heuristically) allow us to emit JumpTable's later.
1805 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1809 size_t LSize = FrontCase.size();
1810 size_t RSize = TSize-LSize;
1811 DEBUG(errs() << "Selecting best pivot: \n"
1812 << "First: " << First << ", Last: " << Last <<'\n'
1813 << "LSize: " << LSize << ", RSize: " << RSize << '\n');
1814 for (CaseItr I = CR.Range.first, J=I+1, E = CR.Range.second;
1816 const APInt& LEnd = cast<ConstantInt>(I->High)->getValue();
1817 const APInt& RBegin = cast<ConstantInt>(J->Low)->getValue();
1818 APInt Range = ComputeRange(LEnd, RBegin);
1819 assert((Range - 2ULL).isNonNegative() &&
1820 "Invalid case distance");
1821 double LDensity = (double)LSize / (LEnd - First + 1ULL).roundToDouble();
1822 double RDensity = (double)RSize / (Last - RBegin + 1ULL).roundToDouble();
1823 double Metric = Range.logBase2()*(LDensity+RDensity);
1824 // Should always split in some non-trivial place
1825 DEBUG(errs() <<"=>Step\n"
1826 << "LEnd: " << LEnd << ", RBegin: " << RBegin << '\n'
1827 << "LDensity: " << LDensity
1828 << ", RDensity: " << RDensity << '\n'
1829 << "Metric: " << Metric << '\n');
1830 if (FMetric < Metric) {
1833 DEBUG(errs() << "Current metric set to: " << FMetric << '\n');
1839 if (areJTsAllowed(TLI)) {
1840 // If our case is dense we *really* should handle it earlier!
1841 assert((FMetric > 0) && "Should handle dense range earlier!");
1843 Pivot = CR.Range.first + Size/2;
1846 CaseRange LHSR(CR.Range.first, Pivot);
1847 CaseRange RHSR(Pivot, CR.Range.second);
1848 Constant *C = Pivot->Low;
1849 MachineBasicBlock *FalseBB = 0, *TrueBB = 0;
1851 // We know that we branch to the LHS if the Value being switched on is
1852 // less than the Pivot value, C. We use this to optimize our binary
1853 // tree a bit, by recognizing that if SV is greater than or equal to the
1854 // LHS's Case Value, and that Case Value is exactly one less than the
1855 // Pivot's Value, then we can branch directly to the LHS's Target,
1856 // rather than creating a leaf node for it.
1857 if ((LHSR.second - LHSR.first) == 1 &&
1858 LHSR.first->High == CR.GE &&
1859 cast<ConstantInt>(C)->getValue() ==
1860 (cast<ConstantInt>(CR.GE)->getValue() + 1LL)) {
1861 TrueBB = LHSR.first->BB;
1863 TrueBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1864 CurMF->insert(BBI, TrueBB);
1865 WorkList.push_back(CaseRec(TrueBB, C, CR.GE, LHSR));
1867 // Put SV in a virtual register to make it available from the new blocks.
1868 ExportFromCurrentBlock(SV);
1871 // Similar to the optimization above, if the Value being switched on is
1872 // known to be less than the Constant CR.LT, and the current Case Value
1873 // is CR.LT - 1, then we can branch directly to the target block for
1874 // the current Case Value, rather than emitting a RHS leaf node for it.
1875 if ((RHSR.second - RHSR.first) == 1 && CR.LT &&
1876 cast<ConstantInt>(RHSR.first->Low)->getValue() ==
1877 (cast<ConstantInt>(CR.LT)->getValue() - 1LL)) {
1878 FalseBB = RHSR.first->BB;
1880 FalseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
1881 CurMF->insert(BBI, FalseBB);
1882 WorkList.push_back(CaseRec(FalseBB,CR.LT,C,RHSR));
1884 // Put SV in a virtual register to make it available from the new blocks.
1885 ExportFromCurrentBlock(SV);
1888 // Create a CaseBlock record representing a conditional branch to
1889 // the LHS node if the value being switched on SV is less than C.
1890 // Otherwise, branch to LHS.
1891 CaseBlock CB(ISD::SETLT, SV, C, NULL, TrueBB, FalseBB, CR.CaseBB);
1893 if (CR.CaseBB == CurMBB)
1894 visitSwitchCase(CB);
1896 SwitchCases.push_back(CB);
1901 /// handleBitTestsSwitchCase - if current case range has few destination and
1902 /// range span less, than machine word bitwidth, encode case range into series
1903 /// of masks and emit bit tests with these masks.
1904 bool SelectionDAGLowering::handleBitTestsSwitchCase(CaseRec& CR,
1905 CaseRecVector& WorkList,
1907 MachineBasicBlock* Default){
1908 unsigned IntPtrBits = TLI.getPointerTy().getSizeInBits();
1910 Case& FrontCase = *CR.Range.first;
1911 Case& BackCase = *(CR.Range.second-1);
1913 // Get the MachineFunction which holds the current MBB. This is used when
1914 // inserting any additional MBBs necessary to represent the switch.
1915 MachineFunction *CurMF = CurMBB->getParent();
1917 // If target does not have legal shift left, do not emit bit tests at all.
1918 if (!TLI.isOperationLegal(ISD::SHL, TLI.getPointerTy()))
1922 for (CaseItr I = CR.Range.first, E = CR.Range.second;
1924 // Single case counts one, case range - two.
1925 numCmps += (I->Low == I->High ? 1 : 2);
1928 // Count unique destinations
1929 SmallSet<MachineBasicBlock*, 4> Dests;
1930 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1931 Dests.insert(I->BB);
1932 if (Dests.size() > 3)
1933 // Don't bother the code below, if there are too much unique destinations
1936 DEBUG(errs() << "Total number of unique destinations: " << Dests.size() << '\n'
1937 << "Total number of comparisons: " << numCmps << '\n');
1939 // Compute span of values.
1940 const APInt& minValue = cast<ConstantInt>(FrontCase.Low)->getValue();
1941 const APInt& maxValue = cast<ConstantInt>(BackCase.High)->getValue();
1942 APInt cmpRange = maxValue - minValue;
1944 DEBUG(errs() << "Compare range: " << cmpRange << '\n'
1945 << "Low bound: " << minValue << '\n'
1946 << "High bound: " << maxValue << '\n');
1948 if (cmpRange.uge(APInt(cmpRange.getBitWidth(), IntPtrBits)) ||
1949 (!(Dests.size() == 1 && numCmps >= 3) &&
1950 !(Dests.size() == 2 && numCmps >= 5) &&
1951 !(Dests.size() >= 3 && numCmps >= 6)))
1954 DEBUG(errs() << "Emitting bit tests\n");
1955 APInt lowBound = APInt::getNullValue(cmpRange.getBitWidth());
1957 // Optimize the case where all the case values fit in a
1958 // word without having to subtract minValue. In this case,
1959 // we can optimize away the subtraction.
1960 if (minValue.isNonNegative() &&
1961 maxValue.slt(APInt(maxValue.getBitWidth(), IntPtrBits))) {
1962 cmpRange = maxValue;
1964 lowBound = minValue;
1967 CaseBitsVector CasesBits;
1968 unsigned i, count = 0;
1970 for (CaseItr I = CR.Range.first, E = CR.Range.second; I!=E; ++I) {
1971 MachineBasicBlock* Dest = I->BB;
1972 for (i = 0; i < count; ++i)
1973 if (Dest == CasesBits[i].BB)
1977 assert((count < 3) && "Too much destinations to test!");
1978 CasesBits.push_back(CaseBits(0, Dest, 0));
1982 const APInt& lowValue = cast<ConstantInt>(I->Low)->getValue();
1983 const APInt& highValue = cast<ConstantInt>(I->High)->getValue();
1985 uint64_t lo = (lowValue - lowBound).getZExtValue();
1986 uint64_t hi = (highValue - lowBound).getZExtValue();
1988 for (uint64_t j = lo; j <= hi; j++) {
1989 CasesBits[i].Mask |= 1ULL << j;
1990 CasesBits[i].Bits++;
1994 std::sort(CasesBits.begin(), CasesBits.end(), CaseBitsCmp());
1998 // Figure out which block is immediately after the current one.
1999 MachineFunction::iterator BBI = CR.CaseBB;
2002 const BasicBlock *LLVMBB = CR.CaseBB->getBasicBlock();
2004 DEBUG(errs() << "Cases:\n");
2005 for (unsigned i = 0, e = CasesBits.size(); i!=e; ++i) {
2006 DEBUG(errs() << "Mask: " << CasesBits[i].Mask
2007 << ", Bits: " << CasesBits[i].Bits
2008 << ", BB: " << CasesBits[i].BB << '\n');
2010 MachineBasicBlock *CaseBB = CurMF->CreateMachineBasicBlock(LLVMBB);
2011 CurMF->insert(BBI, CaseBB);
2012 BTC.push_back(BitTestCase(CasesBits[i].Mask,
2016 // Put SV in a virtual register to make it available from the new blocks.
2017 ExportFromCurrentBlock(SV);
2020 BitTestBlock BTB(lowBound, cmpRange, SV,
2021 -1U, (CR.CaseBB == CurMBB),
2022 CR.CaseBB, Default, BTC);
2024 if (CR.CaseBB == CurMBB)
2025 visitBitTestHeader(BTB);
2027 BitTestCases.push_back(BTB);
2033 /// Clusterify - Transform simple list of Cases into list of CaseRange's
2034 size_t SelectionDAGLowering::Clusterify(CaseVector& Cases,
2035 const SwitchInst& SI) {
2038 // Start with "simple" cases
2039 for (size_t i = 1; i < SI.getNumSuccessors(); ++i) {
2040 MachineBasicBlock *SMBB = FuncInfo.MBBMap[SI.getSuccessor(i)];
2041 Cases.push_back(Case(SI.getSuccessorValue(i),
2042 SI.getSuccessorValue(i),
2045 std::sort(Cases.begin(), Cases.end(), CaseCmp());
2047 // Merge case into clusters
2048 if (Cases.size() >= 2)
2049 // Must recompute end() each iteration because it may be
2050 // invalidated by erase if we hold on to it
2051 for (CaseItr I = Cases.begin(), J = ++(Cases.begin()); J != Cases.end(); ) {
2052 const APInt& nextValue = cast<ConstantInt>(J->Low)->getValue();
2053 const APInt& currentValue = cast<ConstantInt>(I->High)->getValue();
2054 MachineBasicBlock* nextBB = J->BB;
2055 MachineBasicBlock* currentBB = I->BB;
2057 // If the two neighboring cases go to the same destination, merge them
2058 // into a single case.
2059 if ((nextValue - currentValue == 1) && (currentBB == nextBB)) {
2067 for (CaseItr I=Cases.begin(), E=Cases.end(); I!=E; ++I, ++numCmps) {
2068 if (I->Low != I->High)
2069 // A range counts double, since it requires two compares.
2076 void SelectionDAGLowering::visitSwitch(SwitchInst &SI) {
2077 // Figure out which block is immediately after the current one.
2078 MachineBasicBlock *NextBlock = 0;
2079 MachineFunction::iterator BBI = CurMBB;
2081 MachineBasicBlock *Default = FuncInfo.MBBMap[SI.getDefaultDest()];
2083 // If there is only the default destination, branch to it if it is not the
2084 // next basic block. Otherwise, just fall through.
2085 if (SI.getNumOperands() == 2) {
2086 // Update machine-CFG edges.
2088 // If this is not a fall-through branch, emit the branch.
2089 CurMBB->addSuccessor(Default);
2090 if (Default != NextBlock)
2091 DAG.setRoot(DAG.getNode(ISD::BR, getCurDebugLoc(),
2092 MVT::Other, getControlRoot(),
2093 DAG.getBasicBlock(Default)));
2097 // If there are any non-default case statements, create a vector of Cases
2098 // representing each one, and sort the vector so that we can efficiently
2099 // create a binary search tree from them.
2101 size_t numCmps = Clusterify(Cases, SI);
2102 DEBUG(errs() << "Clusterify finished. Total clusters: " << Cases.size()
2103 << ". Total compares: " << numCmps << '\n');
2106 // Get the Value to be switched on and default basic blocks, which will be
2107 // inserted into CaseBlock records, representing basic blocks in the binary
2109 Value *SV = SI.getOperand(0);
2111 // Push the initial CaseRec onto the worklist
2112 CaseRecVector WorkList;
2113 WorkList.push_back(CaseRec(CurMBB,0,0,CaseRange(Cases.begin(),Cases.end())));
2115 while (!WorkList.empty()) {
2116 // Grab a record representing a case range to process off the worklist
2117 CaseRec CR = WorkList.back();
2118 WorkList.pop_back();
2120 if (handleBitTestsSwitchCase(CR, WorkList, SV, Default))
2123 // If the range has few cases (two or less) emit a series of specific
2125 if (handleSmallSwitchRange(CR, WorkList, SV, Default))
2128 // If the switch has more than 5 blocks, and at least 40% dense, and the
2129 // target supports indirect branches, then emit a jump table rather than
2130 // lowering the switch to a binary tree of conditional branches.
2131 if (handleJTSwitchCase(CR, WorkList, SV, Default))
2134 // Emit binary tree. We need to pick a pivot, and push left and right ranges
2135 // onto the worklist. Leafs are handled via handleSmallSwitchRange() call.
2136 handleBTSplitSwitchCase(CR, WorkList, SV, Default);
2141 void SelectionDAGLowering::visitFSub(User &I) {
2142 // -0.0 - X --> fneg
2143 const Type *Ty = I.getType();
2144 if (isa<VectorType>(Ty)) {
2145 if (ConstantVector *CV = dyn_cast<ConstantVector>(I.getOperand(0))) {
2146 const VectorType *DestTy = cast<VectorType>(I.getType());
2147 const Type *ElTy = DestTy->getElementType();
2148 unsigned VL = DestTy->getNumElements();
2149 std::vector<Constant*> NZ(VL, Context->getConstantFPNegativeZero(ElTy));
2150 Constant *CNZ = DAG.getContext()->getConstantVector(&NZ[0], NZ.size());
2152 SDValue Op2 = getValue(I.getOperand(1));
2153 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2154 Op2.getValueType(), Op2));
2159 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0)))
2160 if (CFP->isExactlyValue(
2161 Context->getConstantFPNegativeZero(Ty)->getValueAPF())) {
2162 SDValue Op2 = getValue(I.getOperand(1));
2163 setValue(&I, DAG.getNode(ISD::FNEG, getCurDebugLoc(),
2164 Op2.getValueType(), Op2));
2168 visitBinary(I, ISD::FSUB);
2171 void SelectionDAGLowering::visitBinary(User &I, unsigned OpCode) {
2172 SDValue Op1 = getValue(I.getOperand(0));
2173 SDValue Op2 = getValue(I.getOperand(1));
2175 setValue(&I, DAG.getNode(OpCode, getCurDebugLoc(),
2176 Op1.getValueType(), Op1, Op2));
2179 void SelectionDAGLowering::visitShift(User &I, unsigned Opcode) {
2180 SDValue Op1 = getValue(I.getOperand(0));
2181 SDValue Op2 = getValue(I.getOperand(1));
2182 if (!isa<VectorType>(I.getType()) &&
2183 Op2.getValueType() != TLI.getShiftAmountTy()) {
2184 // If the operand is smaller than the shift count type, promote it.
2185 if (TLI.getShiftAmountTy().bitsGT(Op2.getValueType()))
2186 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2187 TLI.getShiftAmountTy(), Op2);
2188 // If the operand is larger than the shift count type but the shift
2189 // count type has enough bits to represent any shift value, truncate
2190 // it now. This is a common case and it exposes the truncate to
2191 // optimization early.
2192 else if (TLI.getShiftAmountTy().getSizeInBits() >=
2193 Log2_32_Ceil(Op2.getValueType().getSizeInBits()))
2194 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2195 TLI.getShiftAmountTy(), Op2);
2196 // Otherwise we'll need to temporarily settle for some other
2197 // convenient type; type legalization will make adjustments as
2199 else if (TLI.getPointerTy().bitsLT(Op2.getValueType()))
2200 Op2 = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2201 TLI.getPointerTy(), Op2);
2202 else if (TLI.getPointerTy().bitsGT(Op2.getValueType()))
2203 Op2 = DAG.getNode(ISD::ANY_EXTEND, getCurDebugLoc(),
2204 TLI.getPointerTy(), Op2);
2207 setValue(&I, DAG.getNode(Opcode, getCurDebugLoc(),
2208 Op1.getValueType(), Op1, Op2));
2211 void SelectionDAGLowering::visitICmp(User &I) {
2212 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
2213 if (ICmpInst *IC = dyn_cast<ICmpInst>(&I))
2214 predicate = IC->getPredicate();
2215 else if (ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
2216 predicate = ICmpInst::Predicate(IC->getPredicate());
2217 SDValue Op1 = getValue(I.getOperand(0));
2218 SDValue Op2 = getValue(I.getOperand(1));
2219 ISD::CondCode Opcode = getICmpCondCode(predicate);
2221 MVT DestVT = TLI.getValueType(I.getType());
2222 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Opcode));
2225 void SelectionDAGLowering::visitFCmp(User &I) {
2226 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
2227 if (FCmpInst *FC = dyn_cast<FCmpInst>(&I))
2228 predicate = FC->getPredicate();
2229 else if (ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
2230 predicate = FCmpInst::Predicate(FC->getPredicate());
2231 SDValue Op1 = getValue(I.getOperand(0));
2232 SDValue Op2 = getValue(I.getOperand(1));
2233 ISD::CondCode Condition = getFCmpCondCode(predicate);
2234 MVT DestVT = TLI.getValueType(I.getType());
2235 setValue(&I, DAG.getSetCC(getCurDebugLoc(), DestVT, Op1, Op2, Condition));
2238 void SelectionDAGLowering::visitSelect(User &I) {
2239 SmallVector<MVT, 4> ValueVTs;
2240 ComputeValueVTs(TLI, I.getType(), ValueVTs);
2241 unsigned NumValues = ValueVTs.size();
2242 if (NumValues != 0) {
2243 SmallVector<SDValue, 4> Values(NumValues);
2244 SDValue Cond = getValue(I.getOperand(0));
2245 SDValue TrueVal = getValue(I.getOperand(1));
2246 SDValue FalseVal = getValue(I.getOperand(2));
2248 for (unsigned i = 0; i != NumValues; ++i)
2249 Values[i] = DAG.getNode(ISD::SELECT, getCurDebugLoc(),
2250 TrueVal.getValueType(), Cond,
2251 SDValue(TrueVal.getNode(), TrueVal.getResNo() + i),
2252 SDValue(FalseVal.getNode(), FalseVal.getResNo() + i));
2254 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2255 DAG.getVTList(&ValueVTs[0], NumValues),
2256 &Values[0], NumValues));
2261 void SelectionDAGLowering::visitTrunc(User &I) {
2262 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
2263 SDValue N = getValue(I.getOperand(0));
2264 MVT DestVT = TLI.getValueType(I.getType());
2265 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2268 void SelectionDAGLowering::visitZExt(User &I) {
2269 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2270 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
2271 SDValue N = getValue(I.getOperand(0));
2272 MVT DestVT = TLI.getValueType(I.getType());
2273 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N));
2276 void SelectionDAGLowering::visitSExt(User &I) {
2277 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
2278 // SExt also can't be a cast to bool for same reason. So, nothing much to do
2279 SDValue N = getValue(I.getOperand(0));
2280 MVT DestVT = TLI.getValueType(I.getType());
2281 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(), DestVT, N));
2284 void SelectionDAGLowering::visitFPTrunc(User &I) {
2285 // FPTrunc is never a no-op cast, no need to check
2286 SDValue N = getValue(I.getOperand(0));
2287 MVT DestVT = TLI.getValueType(I.getType());
2288 setValue(&I, DAG.getNode(ISD::FP_ROUND, getCurDebugLoc(),
2289 DestVT, N, DAG.getIntPtrConstant(0)));
2292 void SelectionDAGLowering::visitFPExt(User &I){
2293 // FPTrunc is never a no-op cast, no need to check
2294 SDValue N = getValue(I.getOperand(0));
2295 MVT DestVT = TLI.getValueType(I.getType());
2296 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurDebugLoc(), DestVT, N));
2299 void SelectionDAGLowering::visitFPToUI(User &I) {
2300 // FPToUI is never a no-op cast, no need to check
2301 SDValue N = getValue(I.getOperand(0));
2302 MVT DestVT = TLI.getValueType(I.getType());
2303 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurDebugLoc(), DestVT, N));
2306 void SelectionDAGLowering::visitFPToSI(User &I) {
2307 // FPToSI is never a no-op cast, no need to check
2308 SDValue N = getValue(I.getOperand(0));
2309 MVT DestVT = TLI.getValueType(I.getType());
2310 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurDebugLoc(), DestVT, N));
2313 void SelectionDAGLowering::visitUIToFP(User &I) {
2314 // UIToFP is never a no-op cast, no need to check
2315 SDValue N = getValue(I.getOperand(0));
2316 MVT DestVT = TLI.getValueType(I.getType());
2317 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurDebugLoc(), DestVT, N));
2320 void SelectionDAGLowering::visitSIToFP(User &I){
2321 // SIToFP is never a no-op cast, no need to check
2322 SDValue N = getValue(I.getOperand(0));
2323 MVT DestVT = TLI.getValueType(I.getType());
2324 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurDebugLoc(), DestVT, N));
2327 void SelectionDAGLowering::visitPtrToInt(User &I) {
2328 // What to do depends on the size of the integer and the size of the pointer.
2329 // We can either truncate, zero extend, or no-op, accordingly.
2330 SDValue N = getValue(I.getOperand(0));
2331 MVT SrcVT = N.getValueType();
2332 MVT DestVT = TLI.getValueType(I.getType());
2334 if (DestVT.bitsLT(SrcVT))
2335 Result = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N);
2337 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2338 Result = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), DestVT, N);
2339 setValue(&I, Result);
2342 void SelectionDAGLowering::visitIntToPtr(User &I) {
2343 // What to do depends on the size of the integer and the size of the pointer.
2344 // We can either truncate, zero extend, or no-op, accordingly.
2345 SDValue N = getValue(I.getOperand(0));
2346 MVT SrcVT = N.getValueType();
2347 MVT DestVT = TLI.getValueType(I.getType());
2348 if (DestVT.bitsLT(SrcVT))
2349 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), DestVT, N));
2351 // Note: ZERO_EXTEND can handle cases where the sizes are equal too
2352 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2356 void SelectionDAGLowering::visitBitCast(User &I) {
2357 SDValue N = getValue(I.getOperand(0));
2358 MVT DestVT = TLI.getValueType(I.getType());
2360 // BitCast assures us that source and destination are the same size so this
2361 // is either a BIT_CONVERT or a no-op.
2362 if (DestVT != N.getValueType())
2363 setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
2364 DestVT, N)); // convert types
2366 setValue(&I, N); // noop cast.
2369 void SelectionDAGLowering::visitInsertElement(User &I) {
2370 SDValue InVec = getValue(I.getOperand(0));
2371 SDValue InVal = getValue(I.getOperand(1));
2372 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2374 getValue(I.getOperand(2)));
2376 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurDebugLoc(),
2377 TLI.getValueType(I.getType()),
2378 InVec, InVal, InIdx));
2381 void SelectionDAGLowering::visitExtractElement(User &I) {
2382 SDValue InVec = getValue(I.getOperand(0));
2383 SDValue InIdx = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2385 getValue(I.getOperand(1)));
2386 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2387 TLI.getValueType(I.getType()), InVec, InIdx));
2391 // Utility for visitShuffleVector - Returns true if the mask is mask starting
2392 // from SIndx and increasing to the element length (undefs are allowed).
2393 static bool SequentialMask(SmallVectorImpl<int> &Mask, unsigned SIndx) {
2394 unsigned MaskNumElts = Mask.size();
2395 for (unsigned i = 0; i != MaskNumElts; ++i)
2396 if ((Mask[i] >= 0) && (Mask[i] != (int)(i + SIndx)))
2401 void SelectionDAGLowering::visitShuffleVector(User &I) {
2402 SmallVector<int, 8> Mask;
2403 SDValue Src1 = getValue(I.getOperand(0));
2404 SDValue Src2 = getValue(I.getOperand(1));
2406 // Convert the ConstantVector mask operand into an array of ints, with -1
2407 // representing undef values.
2408 SmallVector<Constant*, 8> MaskElts;
2409 cast<Constant>(I.getOperand(2))->getVectorElements(*Context, MaskElts);
2410 unsigned MaskNumElts = MaskElts.size();
2411 for (unsigned i = 0; i != MaskNumElts; ++i) {
2412 if (isa<UndefValue>(MaskElts[i]))
2415 Mask.push_back(cast<ConstantInt>(MaskElts[i])->getSExtValue());
2418 MVT VT = TLI.getValueType(I.getType());
2419 MVT SrcVT = Src1.getValueType();
2420 unsigned SrcNumElts = SrcVT.getVectorNumElements();
2422 if (SrcNumElts == MaskNumElts) {
2423 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2428 // Normalize the shuffle vector since mask and vector length don't match.
2429 if (SrcNumElts < MaskNumElts && MaskNumElts % SrcNumElts == 0) {
2430 // Mask is longer than the source vectors and is a multiple of the source
2431 // vectors. We can use concatenate vector to make the mask and vectors
2433 if (SrcNumElts*2 == MaskNumElts && SequentialMask(Mask, 0)) {
2434 // The shuffle is concatenating two vectors together.
2435 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, getCurDebugLoc(),
2440 // Pad both vectors with undefs to make them the same length as the mask.
2441 unsigned NumConcat = MaskNumElts / SrcNumElts;
2442 bool Src1U = Src1.getOpcode() == ISD::UNDEF;
2443 bool Src2U = Src2.getOpcode() == ISD::UNDEF;
2444 SDValue UndefVal = DAG.getUNDEF(SrcVT);
2446 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
2447 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
2451 Src1 = Src1U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2452 getCurDebugLoc(), VT,
2453 &MOps1[0], NumConcat);
2454 Src2 = Src2U ? DAG.getUNDEF(VT) : DAG.getNode(ISD::CONCAT_VECTORS,
2455 getCurDebugLoc(), VT,
2456 &MOps2[0], NumConcat);
2458 // Readjust mask for new input vector length.
2459 SmallVector<int, 8> MappedOps;
2460 for (unsigned i = 0; i != MaskNumElts; ++i) {
2462 if (Idx < (int)SrcNumElts)
2463 MappedOps.push_back(Idx);
2465 MappedOps.push_back(Idx + MaskNumElts - SrcNumElts);
2467 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2472 if (SrcNumElts > MaskNumElts) {
2473 // Analyze the access pattern of the vector to see if we can extract
2474 // two subvectors and do the shuffle. The analysis is done by calculating
2475 // the range of elements the mask access on both vectors.
2476 int MinRange[2] = { SrcNumElts+1, SrcNumElts+1};
2477 int MaxRange[2] = {-1, -1};
2479 for (unsigned i = 0; i != MaskNumElts; ++i) {
2485 if (Idx >= (int)SrcNumElts) {
2489 if (Idx > MaxRange[Input])
2490 MaxRange[Input] = Idx;
2491 if (Idx < MinRange[Input])
2492 MinRange[Input] = Idx;
2495 // Check if the access is smaller than the vector size and can we find
2496 // a reasonable extract index.
2497 int RangeUse[2] = { 2, 2 }; // 0 = Unused, 1 = Extract, 2 = Can not Extract.
2498 int StartIdx[2]; // StartIdx to extract from
2499 for (int Input=0; Input < 2; ++Input) {
2500 if (MinRange[Input] == (int)(SrcNumElts+1) && MaxRange[Input] == -1) {
2501 RangeUse[Input] = 0; // Unused
2502 StartIdx[Input] = 0;
2503 } else if (MaxRange[Input] - MinRange[Input] < (int)MaskNumElts) {
2504 // Fits within range but we should see if we can find a good
2505 // start index that is a multiple of the mask length.
2506 if (MaxRange[Input] < (int)MaskNumElts) {
2507 RangeUse[Input] = 1; // Extract from beginning of the vector
2508 StartIdx[Input] = 0;
2510 StartIdx[Input] = (MinRange[Input]/MaskNumElts)*MaskNumElts;
2511 if (MaxRange[Input] - StartIdx[Input] < (int)MaskNumElts &&
2512 StartIdx[Input] + MaskNumElts < SrcNumElts)
2513 RangeUse[Input] = 1; // Extract from a multiple of the mask length.
2518 if (RangeUse[0] == 0 && RangeUse[0] == 0) {
2519 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
2522 else if (RangeUse[0] < 2 && RangeUse[1] < 2) {
2523 // Extract appropriate subvector and generate a vector shuffle
2524 for (int Input=0; Input < 2; ++Input) {
2525 SDValue& Src = Input == 0 ? Src1 : Src2;
2526 if (RangeUse[Input] == 0) {
2527 Src = DAG.getUNDEF(VT);
2529 Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, getCurDebugLoc(), VT,
2530 Src, DAG.getIntPtrConstant(StartIdx[Input]));
2533 // Calculate new mask.
2534 SmallVector<int, 8> MappedOps;
2535 for (unsigned i = 0; i != MaskNumElts; ++i) {
2538 MappedOps.push_back(Idx);
2539 else if (Idx < (int)SrcNumElts)
2540 MappedOps.push_back(Idx - StartIdx[0]);
2542 MappedOps.push_back(Idx - SrcNumElts - StartIdx[1] + MaskNumElts);
2544 setValue(&I, DAG.getVectorShuffle(VT, getCurDebugLoc(), Src1, Src2,
2550 // We can't use either concat vectors or extract subvectors so fall back to
2551 // replacing the shuffle with extract and build vector.
2552 // to insert and build vector.
2553 MVT EltVT = VT.getVectorElementType();
2554 MVT PtrVT = TLI.getPointerTy();
2555 SmallVector<SDValue,8> Ops;
2556 for (unsigned i = 0; i != MaskNumElts; ++i) {
2558 Ops.push_back(DAG.getUNDEF(EltVT));
2561 if (Idx < (int)SrcNumElts)
2562 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2563 EltVT, Src1, DAG.getConstant(Idx, PtrVT)));
2565 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurDebugLoc(),
2567 DAG.getConstant(Idx - SrcNumElts, PtrVT)));
2570 setValue(&I, DAG.getNode(ISD::BUILD_VECTOR, getCurDebugLoc(),
2571 VT, &Ops[0], Ops.size()));
2574 void SelectionDAGLowering::visitInsertValue(InsertValueInst &I) {
2575 const Value *Op0 = I.getOperand(0);
2576 const Value *Op1 = I.getOperand(1);
2577 const Type *AggTy = I.getType();
2578 const Type *ValTy = Op1->getType();
2579 bool IntoUndef = isa<UndefValue>(Op0);
2580 bool FromUndef = isa<UndefValue>(Op1);
2582 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2583 I.idx_begin(), I.idx_end());
2585 SmallVector<MVT, 4> AggValueVTs;
2586 ComputeValueVTs(TLI, AggTy, AggValueVTs);
2587 SmallVector<MVT, 4> ValValueVTs;
2588 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2590 unsigned NumAggValues = AggValueVTs.size();
2591 unsigned NumValValues = ValValueVTs.size();
2592 SmallVector<SDValue, 4> Values(NumAggValues);
2594 SDValue Agg = getValue(Op0);
2595 SDValue Val = getValue(Op1);
2597 // Copy the beginning value(s) from the original aggregate.
2598 for (; i != LinearIndex; ++i)
2599 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2600 SDValue(Agg.getNode(), Agg.getResNo() + i);
2601 // Copy values from the inserted value(s).
2602 for (; i != LinearIndex + NumValValues; ++i)
2603 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2604 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
2605 // Copy remaining value(s) from the original aggregate.
2606 for (; i != NumAggValues; ++i)
2607 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
2608 SDValue(Agg.getNode(), Agg.getResNo() + i);
2610 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2611 DAG.getVTList(&AggValueVTs[0], NumAggValues),
2612 &Values[0], NumAggValues));
2615 void SelectionDAGLowering::visitExtractValue(ExtractValueInst &I) {
2616 const Value *Op0 = I.getOperand(0);
2617 const Type *AggTy = Op0->getType();
2618 const Type *ValTy = I.getType();
2619 bool OutOfUndef = isa<UndefValue>(Op0);
2621 unsigned LinearIndex = ComputeLinearIndex(TLI, AggTy,
2622 I.idx_begin(), I.idx_end());
2624 SmallVector<MVT, 4> ValValueVTs;
2625 ComputeValueVTs(TLI, ValTy, ValValueVTs);
2627 unsigned NumValValues = ValValueVTs.size();
2628 SmallVector<SDValue, 4> Values(NumValValues);
2630 SDValue Agg = getValue(Op0);
2631 // Copy out the selected value(s).
2632 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
2633 Values[i - LinearIndex] =
2635 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
2636 SDValue(Agg.getNode(), Agg.getResNo() + i);
2638 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2639 DAG.getVTList(&ValValueVTs[0], NumValValues),
2640 &Values[0], NumValValues));
2644 void SelectionDAGLowering::visitGetElementPtr(User &I) {
2645 SDValue N = getValue(I.getOperand(0));
2646 const Type *Ty = I.getOperand(0)->getType();
2648 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
2651 if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
2652 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
2655 uint64_t Offset = TD->getStructLayout(StTy)->getElementOffset(Field);
2656 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2657 DAG.getIntPtrConstant(Offset));
2659 Ty = StTy->getElementType(Field);
2661 Ty = cast<SequentialType>(Ty)->getElementType();
2663 // If this is a constant subscript, handle it quickly.
2664 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
2665 if (CI->getZExtValue() == 0) continue;
2667 TD->getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
2669 unsigned PtrBits = TLI.getPointerTy().getSizeInBits();
2671 OffsVal = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2673 DAG.getConstant(Offs, MVT::i64));
2675 OffsVal = DAG.getIntPtrConstant(Offs);
2676 N = DAG.getNode(ISD::ADD, getCurDebugLoc(), N.getValueType(), N,
2681 // N = N + Idx * ElementSize;
2682 uint64_t ElementSize = TD->getTypeAllocSize(Ty);
2683 SDValue IdxN = getValue(Idx);
2685 // If the index is smaller or larger than intptr_t, truncate or extend
2687 if (IdxN.getValueType().bitsLT(N.getValueType()))
2688 IdxN = DAG.getNode(ISD::SIGN_EXTEND, getCurDebugLoc(),
2689 N.getValueType(), IdxN);
2690 else if (IdxN.getValueType().bitsGT(N.getValueType()))
2691 IdxN = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2692 N.getValueType(), IdxN);
2694 // If this is a multiply by a power of two, turn it into a shl
2695 // immediately. This is a very common case.
2696 if (ElementSize != 1) {
2697 if (isPowerOf2_64(ElementSize)) {
2698 unsigned Amt = Log2_64(ElementSize);
2699 IdxN = DAG.getNode(ISD::SHL, getCurDebugLoc(),
2700 N.getValueType(), IdxN,
2701 DAG.getConstant(Amt, TLI.getPointerTy()));
2703 SDValue Scale = DAG.getIntPtrConstant(ElementSize);
2704 IdxN = DAG.getNode(ISD::MUL, getCurDebugLoc(),
2705 N.getValueType(), IdxN, Scale);
2709 N = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2710 N.getValueType(), N, IdxN);
2716 void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
2717 // If this is a fixed sized alloca in the entry block of the function,
2718 // allocate it statically on the stack.
2719 if (FuncInfo.StaticAllocaMap.count(&I))
2720 return; // getValue will auto-populate this.
2722 const Type *Ty = I.getAllocatedType();
2723 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
2725 std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
2728 SDValue AllocSize = getValue(I.getArraySize());
2730 AllocSize = DAG.getNode(ISD::MUL, getCurDebugLoc(), AllocSize.getValueType(),
2732 DAG.getConstant(TySize, AllocSize.getValueType()));
2736 MVT IntPtr = TLI.getPointerTy();
2737 if (IntPtr.bitsLT(AllocSize.getValueType()))
2738 AllocSize = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(),
2740 else if (IntPtr.bitsGT(AllocSize.getValueType()))
2741 AllocSize = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(),
2744 // Handle alignment. If the requested alignment is less than or equal to
2745 // the stack alignment, ignore it. If the size is greater than or equal to
2746 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
2747 unsigned StackAlign =
2748 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
2749 if (Align <= StackAlign)
2752 // Round the size of the allocation up to the stack alignment size
2753 // by add SA-1 to the size.
2754 AllocSize = DAG.getNode(ISD::ADD, getCurDebugLoc(),
2755 AllocSize.getValueType(), AllocSize,
2756 DAG.getIntPtrConstant(StackAlign-1));
2757 // Mask out the low bits for alignment purposes.
2758 AllocSize = DAG.getNode(ISD::AND, getCurDebugLoc(),
2759 AllocSize.getValueType(), AllocSize,
2760 DAG.getIntPtrConstant(~(uint64_t)(StackAlign-1)));
2762 SDValue Ops[] = { getRoot(), AllocSize, DAG.getIntPtrConstant(Align) };
2763 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
2764 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, getCurDebugLoc(),
2767 DAG.setRoot(DSA.getValue(1));
2769 // Inform the Frame Information that we have just allocated a variable-sized
2771 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
2774 void SelectionDAGLowering::visitLoad(LoadInst &I) {
2775 const Value *SV = I.getOperand(0);
2776 SDValue Ptr = getValue(SV);
2778 const Type *Ty = I.getType();
2779 bool isVolatile = I.isVolatile();
2780 unsigned Alignment = I.getAlignment();
2782 SmallVector<MVT, 4> ValueVTs;
2783 SmallVector<uint64_t, 4> Offsets;
2784 ComputeValueVTs(TLI, Ty, ValueVTs, &Offsets);
2785 unsigned NumValues = ValueVTs.size();
2790 bool ConstantMemory = false;
2792 // Serialize volatile loads with other side effects.
2794 else if (AA->pointsToConstantMemory(SV)) {
2795 // Do not serialize (non-volatile) loads of constant memory with anything.
2796 Root = DAG.getEntryNode();
2797 ConstantMemory = true;
2799 // Do not serialize non-volatile loads against each other.
2800 Root = DAG.getRoot();
2803 SmallVector<SDValue, 4> Values(NumValues);
2804 SmallVector<SDValue, 4> Chains(NumValues);
2805 MVT PtrVT = Ptr.getValueType();
2806 for (unsigned i = 0; i != NumValues; ++i) {
2807 SDValue L = DAG.getLoad(ValueVTs[i], getCurDebugLoc(), Root,
2808 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2810 DAG.getConstant(Offsets[i], PtrVT)),
2812 isVolatile, Alignment);
2814 Chains[i] = L.getValue(1);
2817 if (!ConstantMemory) {
2818 SDValue Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2820 &Chains[0], NumValues);
2824 PendingLoads.push_back(Chain);
2827 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurDebugLoc(),
2828 DAG.getVTList(&ValueVTs[0], NumValues),
2829 &Values[0], NumValues));
2833 void SelectionDAGLowering::visitStore(StoreInst &I) {
2834 Value *SrcV = I.getOperand(0);
2835 Value *PtrV = I.getOperand(1);
2837 SmallVector<MVT, 4> ValueVTs;
2838 SmallVector<uint64_t, 4> Offsets;
2839 ComputeValueVTs(TLI, SrcV->getType(), ValueVTs, &Offsets);
2840 unsigned NumValues = ValueVTs.size();
2844 // Get the lowered operands. Note that we do this after
2845 // checking if NumResults is zero, because with zero results
2846 // the operands won't have values in the map.
2847 SDValue Src = getValue(SrcV);
2848 SDValue Ptr = getValue(PtrV);
2850 SDValue Root = getRoot();
2851 SmallVector<SDValue, 4> Chains(NumValues);
2852 MVT PtrVT = Ptr.getValueType();
2853 bool isVolatile = I.isVolatile();
2854 unsigned Alignment = I.getAlignment();
2855 for (unsigned i = 0; i != NumValues; ++i)
2856 Chains[i] = DAG.getStore(Root, getCurDebugLoc(),
2857 SDValue(Src.getNode(), Src.getResNo() + i),
2858 DAG.getNode(ISD::ADD, getCurDebugLoc(),
2860 DAG.getConstant(Offsets[i], PtrVT)),
2862 isVolatile, Alignment);
2864 DAG.setRoot(DAG.getNode(ISD::TokenFactor, getCurDebugLoc(),
2865 MVT::Other, &Chains[0], NumValues));
2868 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
2870 void SelectionDAGLowering::visitTargetIntrinsic(CallInst &I,
2871 unsigned Intrinsic) {
2872 bool HasChain = !I.doesNotAccessMemory();
2873 bool OnlyLoad = HasChain && I.onlyReadsMemory();
2875 // Build the operand list.
2876 SmallVector<SDValue, 8> Ops;
2877 if (HasChain) { // If this intrinsic has side-effects, chainify it.
2879 // We don't need to serialize loads against other loads.
2880 Ops.push_back(DAG.getRoot());
2882 Ops.push_back(getRoot());
2886 // Info is set by getTgtMemInstrinsic
2887 TargetLowering::IntrinsicInfo Info;
2888 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, Intrinsic);
2890 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
2891 if (!IsTgtIntrinsic)
2892 Ops.push_back(DAG.getConstant(Intrinsic, TLI.getPointerTy()));
2894 // Add all operands of the call to the operand list.
2895 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
2896 SDValue Op = getValue(I.getOperand(i));
2897 assert(TLI.isTypeLegal(Op.getValueType()) &&
2898 "Intrinsic uses a non-legal type?");
2902 std::vector<MVT> VTArray;
2903 if (I.getType() != Type::VoidTy) {
2904 MVT VT = TLI.getValueType(I.getType());
2905 if (VT.isVector()) {
2906 const VectorType *DestTy = cast<VectorType>(I.getType());
2907 MVT EltVT = TLI.getValueType(DestTy->getElementType());
2909 VT = MVT::getVectorVT(EltVT, DestTy->getNumElements());
2910 assert(VT != MVT::Other && "Intrinsic uses a non-legal type?");
2913 assert(TLI.isTypeLegal(VT) && "Intrinsic uses a non-legal type?");
2914 VTArray.push_back(VT);
2917 VTArray.push_back(MVT::Other);
2919 SDVTList VTs = DAG.getVTList(&VTArray[0], VTArray.size());
2923 if (IsTgtIntrinsic) {
2924 // This is target intrinsic that touches memory
2925 Result = DAG.getMemIntrinsicNode(Info.opc, getCurDebugLoc(),
2926 VTs, &Ops[0], Ops.size(),
2927 Info.memVT, Info.ptrVal, Info.offset,
2928 Info.align, Info.vol,
2929 Info.readMem, Info.writeMem);
2932 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurDebugLoc(),
2933 VTs, &Ops[0], Ops.size());
2934 else if (I.getType() != Type::VoidTy)
2935 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurDebugLoc(),
2936 VTs, &Ops[0], Ops.size());
2938 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurDebugLoc(),
2939 VTs, &Ops[0], Ops.size());
2942 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
2944 PendingLoads.push_back(Chain);
2948 if (I.getType() != Type::VoidTy) {
2949 if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
2950 MVT VT = TLI.getValueType(PTy);
2951 Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
2953 setValue(&I, Result);
2957 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
2958 static GlobalVariable *ExtractTypeInfo(Value *V) {
2959 V = V->stripPointerCasts();
2960 GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
2961 assert ((GV || isa<ConstantPointerNull>(V)) &&
2962 "TypeInfo must be a global variable or NULL");
2968 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
2969 /// call, and add them to the specified machine basic block.
2970 void AddCatchInfo(CallInst &I, MachineModuleInfo *MMI,
2971 MachineBasicBlock *MBB) {
2972 // Inform the MachineModuleInfo of the personality for this landing pad.
2973 ConstantExpr *CE = cast<ConstantExpr>(I.getOperand(2));
2974 assert(CE->getOpcode() == Instruction::BitCast &&
2975 isa<Function>(CE->getOperand(0)) &&
2976 "Personality should be a function");
2977 MMI->addPersonality(MBB, cast<Function>(CE->getOperand(0)));
2979 // Gather all the type infos for this landing pad and pass them along to
2980 // MachineModuleInfo.
2981 std::vector<GlobalVariable *> TyInfo;
2982 unsigned N = I.getNumOperands();
2984 for (unsigned i = N - 1; i > 2; --i) {
2985 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(i))) {
2986 unsigned FilterLength = CI->getZExtValue();
2987 unsigned FirstCatch = i + FilterLength + !FilterLength;
2988 assert (FirstCatch <= N && "Invalid filter length");
2990 if (FirstCatch < N) {
2991 TyInfo.reserve(N - FirstCatch);
2992 for (unsigned j = FirstCatch; j < N; ++j)
2993 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
2994 MMI->addCatchTypeInfo(MBB, TyInfo);
2998 if (!FilterLength) {
3000 MMI->addCleanup(MBB);
3003 TyInfo.reserve(FilterLength - 1);
3004 for (unsigned j = i + 1; j < FirstCatch; ++j)
3005 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3006 MMI->addFilterTypeInfo(MBB, TyInfo);
3015 TyInfo.reserve(N - 3);
3016 for (unsigned j = 3; j < N; ++j)
3017 TyInfo.push_back(ExtractTypeInfo(I.getOperand(j)));
3018 MMI->addCatchTypeInfo(MBB, TyInfo);
3024 /// GetSignificand - Get the significand and build it into a floating-point
3025 /// number with exponent of 1:
3027 /// Op = (Op & 0x007fffff) | 0x3f800000;
3029 /// where Op is the hexidecimal representation of floating point value.
3031 GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
3032 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3033 DAG.getConstant(0x007fffff, MVT::i32));
3034 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
3035 DAG.getConstant(0x3f800000, MVT::i32));
3036 return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
3039 /// GetExponent - Get the exponent:
3041 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
3043 /// where Op is the hexidecimal representation of floating point value.
3045 GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI,
3047 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
3048 DAG.getConstant(0x7f800000, MVT::i32));
3049 SDValue t1 = DAG.getNode(ISD::SRL, dl, MVT::i32, t0,
3050 DAG.getConstant(23, TLI.getPointerTy()));
3051 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
3052 DAG.getConstant(127, MVT::i32));
3053 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
3056 /// getF32Constant - Get 32-bit floating point constant.
3058 getF32Constant(SelectionDAG &DAG, unsigned Flt) {
3059 return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
3062 /// Inlined utility function to implement binary input atomic intrinsics for
3063 /// visitIntrinsicCall: I is a call instruction
3064 /// Op is the associated NodeType for I
3066 SelectionDAGLowering::implVisitBinaryAtomic(CallInst& I, ISD::NodeType Op) {
3067 SDValue Root = getRoot();
3069 DAG.getAtomic(Op, getCurDebugLoc(),
3070 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
3072 getValue(I.getOperand(1)),
3073 getValue(I.getOperand(2)),
3076 DAG.setRoot(L.getValue(1));
3080 // implVisitAluOverflow - Lower arithmetic overflow instrinsics.
3082 SelectionDAGLowering::implVisitAluOverflow(CallInst &I, ISD::NodeType Op) {
3083 SDValue Op1 = getValue(I.getOperand(1));
3084 SDValue Op2 = getValue(I.getOperand(2));
3086 SDVTList VTs = DAG.getVTList(Op1.getValueType(), MVT::i1);
3087 SDValue Result = DAG.getNode(Op, getCurDebugLoc(), VTs, Op1, Op2);
3089 setValue(&I, Result);
3093 /// visitExp - Lower an exp intrinsic. Handles the special sequences for
3094 /// limited-precision mode.
3096 SelectionDAGLowering::visitExp(CallInst &I) {
3098 DebugLoc dl = getCurDebugLoc();
3100 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3101 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3102 SDValue Op = getValue(I.getOperand(1));
3104 // Put the exponent in the right bit position for later addition to the
3107 // #define LOG2OFe 1.4426950f
3108 // IntegerPartOfX = ((int32_t)(X * LOG2OFe));
3109 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3110 getF32Constant(DAG, 0x3fb8aa3b));
3111 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3113 // FractionalPartOfX = (X * LOG2OFe) - (float)IntegerPartOfX;
3114 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3115 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3117 // IntegerPartOfX <<= 23;
3118 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3119 DAG.getConstant(23, TLI.getPointerTy()));
3121 if (LimitFloatPrecision <= 6) {
3122 // For floating-point precision of 6:
3124 // TwoToFractionalPartOfX =
3126 // (0.735607626f + 0.252464424f * x) * x;
3128 // error 0.0144103317, which is 6 bits
3129 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3130 getF32Constant(DAG, 0x3e814304));
3131 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3132 getF32Constant(DAG, 0x3f3c50c8));
3133 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3134 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3135 getF32Constant(DAG, 0x3f7f5e7e));
3136 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
3138 // Add the exponent into the result in integer domain.
3139 SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3140 TwoToFracPartOfX, IntegerPartOfX);
3142 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
3143 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3144 // For floating-point precision of 12:
3146 // TwoToFractionalPartOfX =
3149 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3151 // 0.000107046256 error, which is 13 to 14 bits
3152 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3153 getF32Constant(DAG, 0x3da235e3));
3154 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3155 getF32Constant(DAG, 0x3e65b8f3));
3156 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3157 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3158 getF32Constant(DAG, 0x3f324b07));
3159 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3160 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3161 getF32Constant(DAG, 0x3f7ff8fd));
3162 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
3164 // Add the exponent into the result in integer domain.
3165 SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3166 TwoToFracPartOfX, IntegerPartOfX);
3168 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
3169 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3170 // For floating-point precision of 18:
3172 // TwoToFractionalPartOfX =
3176 // (0.554906021e-1f +
3177 // (0.961591928e-2f +
3178 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3180 // error 2.47208000*10^(-7), which is better than 18 bits
3181 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3182 getF32Constant(DAG, 0x3924b03e));
3183 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3184 getF32Constant(DAG, 0x3ab24b87));
3185 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3186 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3187 getF32Constant(DAG, 0x3c1d8c17));
3188 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3189 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3190 getF32Constant(DAG, 0x3d634a1d));
3191 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3192 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3193 getF32Constant(DAG, 0x3e75fe14));
3194 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3195 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3196 getF32Constant(DAG, 0x3f317234));
3197 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3198 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3199 getF32Constant(DAG, 0x3f800000));
3200 SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
3203 // Add the exponent into the result in integer domain.
3204 SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3205 TwoToFracPartOfX, IntegerPartOfX);
3207 result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
3210 // No special expansion.
3211 result = DAG.getNode(ISD::FEXP, dl,
3212 getValue(I.getOperand(1)).getValueType(),
3213 getValue(I.getOperand(1)));
3216 setValue(&I, result);
3219 /// visitLog - Lower a log intrinsic. Handles the special sequences for
3220 /// limited-precision mode.
3222 SelectionDAGLowering::visitLog(CallInst &I) {
3224 DebugLoc dl = getCurDebugLoc();
3226 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3227 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3228 SDValue Op = getValue(I.getOperand(1));
3229 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3231 // Scale the exponent by log(2) [0.69314718f].
3232 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3233 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3234 getF32Constant(DAG, 0x3f317218));
3236 // Get the significand and build it into a floating-point number with
3238 SDValue X = GetSignificand(DAG, Op1, dl);
3240 if (LimitFloatPrecision <= 6) {
3241 // For floating-point precision of 6:
3245 // (1.4034025f - 0.23903021f * x) * x;
3247 // error 0.0034276066, which is better than 8 bits
3248 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3249 getF32Constant(DAG, 0xbe74c456));
3250 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3251 getF32Constant(DAG, 0x3fb3a2b1));
3252 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3253 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3254 getF32Constant(DAG, 0x3f949a29));
3256 result = DAG.getNode(ISD::FADD, dl,
3257 MVT::f32, LogOfExponent, LogOfMantissa);
3258 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3259 // For floating-point precision of 12:
3265 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
3267 // error 0.000061011436, which is 14 bits
3268 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3269 getF32Constant(DAG, 0xbd67b6d6));
3270 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3271 getF32Constant(DAG, 0x3ee4f4b8));
3272 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3273 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3274 getF32Constant(DAG, 0x3fbc278b));
3275 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3276 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3277 getF32Constant(DAG, 0x40348e95));
3278 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3279 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3280 getF32Constant(DAG, 0x3fdef31a));
3282 result = DAG.getNode(ISD::FADD, dl,
3283 MVT::f32, LogOfExponent, LogOfMantissa);
3284 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3285 // For floating-point precision of 18:
3293 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
3295 // error 0.0000023660568, which is better than 18 bits
3296 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3297 getF32Constant(DAG, 0xbc91e5ac));
3298 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3299 getF32Constant(DAG, 0x3e4350aa));
3300 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3301 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3302 getF32Constant(DAG, 0x3f60d3e3));
3303 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3304 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3305 getF32Constant(DAG, 0x4011cdf0));
3306 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3307 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3308 getF32Constant(DAG, 0x406cfd1c));
3309 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3310 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3311 getF32Constant(DAG, 0x408797cb));
3312 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3313 SDValue LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3314 getF32Constant(DAG, 0x4006dcab));
3316 result = DAG.getNode(ISD::FADD, dl,
3317 MVT::f32, LogOfExponent, LogOfMantissa);
3320 // No special expansion.
3321 result = DAG.getNode(ISD::FLOG, dl,
3322 getValue(I.getOperand(1)).getValueType(),
3323 getValue(I.getOperand(1)));
3326 setValue(&I, result);
3329 /// visitLog2 - Lower a log2 intrinsic. Handles the special sequences for
3330 /// limited-precision mode.
3332 SelectionDAGLowering::visitLog2(CallInst &I) {
3334 DebugLoc dl = getCurDebugLoc();
3336 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3337 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3338 SDValue Op = getValue(I.getOperand(1));
3339 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3341 // Get the exponent.
3342 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
3344 // Get the significand and build it into a floating-point number with
3346 SDValue X = GetSignificand(DAG, Op1, dl);
3348 // Different possible minimax approximations of significand in
3349 // floating-point for various degrees of accuracy over [1,2].
3350 if (LimitFloatPrecision <= 6) {
3351 // For floating-point precision of 6:
3353 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
3355 // error 0.0049451742, which is more than 7 bits
3356 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3357 getF32Constant(DAG, 0xbeb08fe0));
3358 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3359 getF32Constant(DAG, 0x40019463));
3360 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3361 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3362 getF32Constant(DAG, 0x3fd6633d));
3364 result = DAG.getNode(ISD::FADD, dl,
3365 MVT::f32, LogOfExponent, Log2ofMantissa);
3366 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3367 // For floating-point precision of 12:
3373 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
3375 // error 0.0000876136000, which is better than 13 bits
3376 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3377 getF32Constant(DAG, 0xbda7262e));
3378 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3379 getF32Constant(DAG, 0x3f25280b));
3380 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3381 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3382 getF32Constant(DAG, 0x4007b923));
3383 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3384 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3385 getF32Constant(DAG, 0x40823e2f));
3386 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3387 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3388 getF32Constant(DAG, 0x4020d29c));
3390 result = DAG.getNode(ISD::FADD, dl,
3391 MVT::f32, LogOfExponent, Log2ofMantissa);
3392 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3393 // For floating-point precision of 18:
3402 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
3404 // error 0.0000018516, which is better than 18 bits
3405 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3406 getF32Constant(DAG, 0xbcd2769e));
3407 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3408 getF32Constant(DAG, 0x3e8ce0b9));
3409 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3410 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3411 getF32Constant(DAG, 0x3fa22ae7));
3412 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3413 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3414 getF32Constant(DAG, 0x40525723));
3415 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3416 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
3417 getF32Constant(DAG, 0x40aaf200));
3418 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3419 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3420 getF32Constant(DAG, 0x40c39dad));
3421 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3422 SDValue Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
3423 getF32Constant(DAG, 0x4042902c));
3425 result = DAG.getNode(ISD::FADD, dl,
3426 MVT::f32, LogOfExponent, Log2ofMantissa);
3429 // No special expansion.
3430 result = DAG.getNode(ISD::FLOG2, dl,
3431 getValue(I.getOperand(1)).getValueType(),
3432 getValue(I.getOperand(1)));
3435 setValue(&I, result);
3438 /// visitLog10 - Lower a log10 intrinsic. Handles the special sequences for
3439 /// limited-precision mode.
3441 SelectionDAGLowering::visitLog10(CallInst &I) {
3443 DebugLoc dl = getCurDebugLoc();
3445 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3446 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3447 SDValue Op = getValue(I.getOperand(1));
3448 SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
3450 // Scale the exponent by log10(2) [0.30102999f].
3451 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
3452 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
3453 getF32Constant(DAG, 0x3e9a209a));
3455 // Get the significand and build it into a floating-point number with
3457 SDValue X = GetSignificand(DAG, Op1, dl);
3459 if (LimitFloatPrecision <= 6) {
3460 // For floating-point precision of 6:
3462 // Log10ofMantissa =
3464 // (0.60948995f - 0.10380950f * x) * x;
3466 // error 0.0014886165, which is 6 bits
3467 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3468 getF32Constant(DAG, 0xbdd49a13));
3469 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
3470 getF32Constant(DAG, 0x3f1c0789));
3471 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3472 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
3473 getF32Constant(DAG, 0x3f011300));
3475 result = DAG.getNode(ISD::FADD, dl,
3476 MVT::f32, LogOfExponent, Log10ofMantissa);
3477 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3478 // For floating-point precision of 12:
3480 // Log10ofMantissa =
3483 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
3485 // error 0.00019228036, which is better than 12 bits
3486 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3487 getF32Constant(DAG, 0x3d431f31));
3488 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3489 getF32Constant(DAG, 0x3ea21fb2));
3490 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3491 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3492 getF32Constant(DAG, 0x3f6ae232));
3493 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3494 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3495 getF32Constant(DAG, 0x3f25f7c3));
3497 result = DAG.getNode(ISD::FADD, dl,
3498 MVT::f32, LogOfExponent, Log10ofMantissa);
3499 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3500 // For floating-point precision of 18:
3502 // Log10ofMantissa =
3507 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
3509 // error 0.0000037995730, which is better than 18 bits
3510 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3511 getF32Constant(DAG, 0x3c5d51ce));
3512 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
3513 getF32Constant(DAG, 0x3e00685a));
3514 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
3515 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3516 getF32Constant(DAG, 0x3efb6798));
3517 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3518 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
3519 getF32Constant(DAG, 0x3f88d192));
3520 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3521 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3522 getF32Constant(DAG, 0x3fc4316c));
3523 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3524 SDValue Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
3525 getF32Constant(DAG, 0x3f57ce70));
3527 result = DAG.getNode(ISD::FADD, dl,
3528 MVT::f32, LogOfExponent, Log10ofMantissa);
3531 // No special expansion.
3532 result = DAG.getNode(ISD::FLOG10, dl,
3533 getValue(I.getOperand(1)).getValueType(),
3534 getValue(I.getOperand(1)));
3537 setValue(&I, result);
3540 /// visitExp2 - Lower an exp2 intrinsic. Handles the special sequences for
3541 /// limited-precision mode.
3543 SelectionDAGLowering::visitExp2(CallInst &I) {
3545 DebugLoc dl = getCurDebugLoc();
3547 if (getValue(I.getOperand(1)).getValueType() == MVT::f32 &&
3548 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3549 SDValue Op = getValue(I.getOperand(1));
3551 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Op);
3553 // FractionalPartOfX = x - (float)IntegerPartOfX;
3554 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3555 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, Op, t1);
3557 // IntegerPartOfX <<= 23;
3558 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3559 DAG.getConstant(23, TLI.getPointerTy()));
3561 if (LimitFloatPrecision <= 6) {
3562 // For floating-point precision of 6:
3564 // TwoToFractionalPartOfX =
3566 // (0.735607626f + 0.252464424f * x) * x;
3568 // error 0.0144103317, which is 6 bits
3569 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3570 getF32Constant(DAG, 0x3e814304));
3571 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3572 getF32Constant(DAG, 0x3f3c50c8));
3573 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3574 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3575 getF32Constant(DAG, 0x3f7f5e7e));
3576 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3577 SDValue TwoToFractionalPartOfX =
3578 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3580 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3581 MVT::f32, TwoToFractionalPartOfX);
3582 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3583 // For floating-point precision of 12:
3585 // TwoToFractionalPartOfX =
3588 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3590 // error 0.000107046256, which is 13 to 14 bits
3591 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3592 getF32Constant(DAG, 0x3da235e3));
3593 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3594 getF32Constant(DAG, 0x3e65b8f3));
3595 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3596 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3597 getF32Constant(DAG, 0x3f324b07));
3598 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3599 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3600 getF32Constant(DAG, 0x3f7ff8fd));
3601 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3602 SDValue TwoToFractionalPartOfX =
3603 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3605 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3606 MVT::f32, TwoToFractionalPartOfX);
3607 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3608 // For floating-point precision of 18:
3610 // TwoToFractionalPartOfX =
3614 // (0.554906021e-1f +
3615 // (0.961591928e-2f +
3616 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3617 // error 2.47208000*10^(-7), which is better than 18 bits
3618 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3619 getF32Constant(DAG, 0x3924b03e));
3620 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3621 getF32Constant(DAG, 0x3ab24b87));
3622 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3623 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3624 getF32Constant(DAG, 0x3c1d8c17));
3625 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3626 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3627 getF32Constant(DAG, 0x3d634a1d));
3628 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3629 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3630 getF32Constant(DAG, 0x3e75fe14));
3631 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3632 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3633 getF32Constant(DAG, 0x3f317234));
3634 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3635 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3636 getF32Constant(DAG, 0x3f800000));
3637 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3638 SDValue TwoToFractionalPartOfX =
3639 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3641 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3642 MVT::f32, TwoToFractionalPartOfX);
3645 // No special expansion.
3646 result = DAG.getNode(ISD::FEXP2, dl,
3647 getValue(I.getOperand(1)).getValueType(),
3648 getValue(I.getOperand(1)));
3651 setValue(&I, result);
3654 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
3655 /// limited-precision mode with x == 10.0f.
3657 SelectionDAGLowering::visitPow(CallInst &I) {
3659 Value *Val = I.getOperand(1);
3660 DebugLoc dl = getCurDebugLoc();
3661 bool IsExp10 = false;
3663 if (getValue(Val).getValueType() == MVT::f32 &&
3664 getValue(I.getOperand(2)).getValueType() == MVT::f32 &&
3665 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3666 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(Val))) {
3667 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3669 IsExp10 = CFP->getValueAPF().bitwiseIsEqual(Ten);
3674 if (IsExp10 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
3675 SDValue Op = getValue(I.getOperand(2));
3677 // Put the exponent in the right bit position for later addition to the
3680 // #define LOG2OF10 3.3219281f
3681 // IntegerPartOfX = (int32_t)(x * LOG2OF10);
3682 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
3683 getF32Constant(DAG, 0x40549a78));
3684 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
3686 // FractionalPartOfX = x - (float)IntegerPartOfX;
3687 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
3688 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
3690 // IntegerPartOfX <<= 23;
3691 IntegerPartOfX = DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
3692 DAG.getConstant(23, TLI.getPointerTy()));
3694 if (LimitFloatPrecision <= 6) {
3695 // For floating-point precision of 6:
3697 // twoToFractionalPartOfX =
3699 // (0.735607626f + 0.252464424f * x) * x;
3701 // error 0.0144103317, which is 6 bits
3702 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3703 getF32Constant(DAG, 0x3e814304));
3704 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3705 getF32Constant(DAG, 0x3f3c50c8));
3706 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3707 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3708 getF32Constant(DAG, 0x3f7f5e7e));
3709 SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
3710 SDValue TwoToFractionalPartOfX =
3711 DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
3713 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3714 MVT::f32, TwoToFractionalPartOfX);
3715 } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
3716 // For floating-point precision of 12:
3718 // TwoToFractionalPartOfX =
3721 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
3723 // error 0.000107046256, which is 13 to 14 bits
3724 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3725 getF32Constant(DAG, 0x3da235e3));
3726 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3727 getF32Constant(DAG, 0x3e65b8f3));
3728 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3729 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3730 getF32Constant(DAG, 0x3f324b07));
3731 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3732 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3733 getF32Constant(DAG, 0x3f7ff8fd));
3734 SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
3735 SDValue TwoToFractionalPartOfX =
3736 DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
3738 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3739 MVT::f32, TwoToFractionalPartOfX);
3740 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
3741 // For floating-point precision of 18:
3743 // TwoToFractionalPartOfX =
3747 // (0.554906021e-1f +
3748 // (0.961591928e-2f +
3749 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
3750 // error 2.47208000*10^(-7), which is better than 18 bits
3751 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
3752 getF32Constant(DAG, 0x3924b03e));
3753 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
3754 getF32Constant(DAG, 0x3ab24b87));
3755 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
3756 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
3757 getF32Constant(DAG, 0x3c1d8c17));
3758 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
3759 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
3760 getF32Constant(DAG, 0x3d634a1d));
3761 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
3762 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
3763 getF32Constant(DAG, 0x3e75fe14));
3764 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
3765 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
3766 getF32Constant(DAG, 0x3f317234));
3767 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
3768 SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
3769 getF32Constant(DAG, 0x3f800000));
3770 SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
3771 SDValue TwoToFractionalPartOfX =
3772 DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
3774 result = DAG.getNode(ISD::BIT_CONVERT, dl,
3775 MVT::f32, TwoToFractionalPartOfX);
3778 // No special expansion.
3779 result = DAG.getNode(ISD::FPOW, dl,
3780 getValue(I.getOperand(1)).getValueType(),
3781 getValue(I.getOperand(1)),
3782 getValue(I.getOperand(2)));
3785 setValue(&I, result);
3788 /// visitIntrinsicCall - Lower the call to the specified intrinsic function. If
3789 /// we want to emit this as a call to a named external function, return the name
3790 /// otherwise lower it and return null.
3792 SelectionDAGLowering::visitIntrinsicCall(CallInst &I, unsigned Intrinsic) {
3793 DebugLoc dl = getCurDebugLoc();
3794 switch (Intrinsic) {
3796 // By default, turn this into a target intrinsic node.
3797 visitTargetIntrinsic(I, Intrinsic);
3799 case Intrinsic::vastart: visitVAStart(I); return 0;
3800 case Intrinsic::vaend: visitVAEnd(I); return 0;
3801 case Intrinsic::vacopy: visitVACopy(I); return 0;
3802 case Intrinsic::returnaddress:
3803 setValue(&I, DAG.getNode(ISD::RETURNADDR, dl, TLI.getPointerTy(),
3804 getValue(I.getOperand(1))));
3806 case Intrinsic::frameaddress:
3807 setValue(&I, DAG.getNode(ISD::FRAMEADDR, dl, TLI.getPointerTy(),
3808 getValue(I.getOperand(1))));
3810 case Intrinsic::setjmp:
3811 return "_setjmp"+!TLI.usesUnderscoreSetJmp();
3813 case Intrinsic::longjmp:
3814 return "_longjmp"+!TLI.usesUnderscoreLongJmp();
3816 case Intrinsic::memcpy: {
3817 SDValue Op1 = getValue(I.getOperand(1));
3818 SDValue Op2 = getValue(I.getOperand(2));
3819 SDValue Op3 = getValue(I.getOperand(3));
3820 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3821 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3822 I.getOperand(1), 0, I.getOperand(2), 0));
3825 case Intrinsic::memset: {
3826 SDValue Op1 = getValue(I.getOperand(1));
3827 SDValue Op2 = getValue(I.getOperand(2));
3828 SDValue Op3 = getValue(I.getOperand(3));
3829 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3830 DAG.setRoot(DAG.getMemset(getRoot(), dl, Op1, Op2, Op3, Align,
3831 I.getOperand(1), 0));
3834 case Intrinsic::memmove: {
3835 SDValue Op1 = getValue(I.getOperand(1));
3836 SDValue Op2 = getValue(I.getOperand(2));
3837 SDValue Op3 = getValue(I.getOperand(3));
3838 unsigned Align = cast<ConstantInt>(I.getOperand(4))->getZExtValue();
3840 // If the source and destination are known to not be aliases, we can
3841 // lower memmove as memcpy.
3842 uint64_t Size = -1ULL;
3843 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op3))
3844 Size = C->getZExtValue();
3845 if (AA->alias(I.getOperand(1), Size, I.getOperand(2), Size) ==
3846 AliasAnalysis::NoAlias) {
3847 DAG.setRoot(DAG.getMemcpy(getRoot(), dl, Op1, Op2, Op3, Align, false,
3848 I.getOperand(1), 0, I.getOperand(2), 0));
3852 DAG.setRoot(DAG.getMemmove(getRoot(), dl, Op1, Op2, Op3, Align,
3853 I.getOperand(1), 0, I.getOperand(2), 0));
3856 case Intrinsic::dbg_stoppoint: {
3857 DbgStopPointInst &SPI = cast<DbgStopPointInst>(I);
3858 if (isValidDebugInfoIntrinsic(SPI, CodeGenOpt::Default)) {
3859 MachineFunction &MF = DAG.getMachineFunction();
3860 DebugLoc Loc = ExtractDebugLocation(SPI, MF.getDebugLocInfo());
3861 setCurDebugLoc(Loc);
3863 if (OptLevel == CodeGenOpt::None)
3864 DAG.setRoot(DAG.getDbgStopPoint(Loc, getRoot(),
3871 case Intrinsic::dbg_region_start: {
3872 DwarfWriter *DW = DAG.getDwarfWriter();
3873 DbgRegionStartInst &RSI = cast<DbgRegionStartInst>(I);
3874 if (isValidDebugInfoIntrinsic(RSI, OptLevel) && DW
3875 && DW->ShouldEmitDwarfDebug()) {
3877 DW->RecordRegionStart(cast<GlobalVariable>(RSI.getContext()));
3878 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3879 getRoot(), LabelID));
3883 case Intrinsic::dbg_region_end: {
3884 DwarfWriter *DW = DAG.getDwarfWriter();
3885 DbgRegionEndInst &REI = cast<DbgRegionEndInst>(I);
3887 if (!isValidDebugInfoIntrinsic(REI, OptLevel) || !DW
3888 || !DW->ShouldEmitDwarfDebug())
3891 MachineFunction &MF = DAG.getMachineFunction();
3892 DISubprogram Subprogram(cast<GlobalVariable>(REI.getContext()));
3894 if (isInlinedFnEnd(REI, MF.getFunction())) {
3895 // This is end of inlined function. Debugging information for inlined
3896 // function is not handled yet (only supported by FastISel).
3897 if (OptLevel == CodeGenOpt::None) {
3898 unsigned ID = DW->RecordInlinedFnEnd(Subprogram);
3900 // Returned ID is 0 if this is unbalanced "end of inlined
3901 // scope". This could happen if optimizer eats dbg intrinsics or
3902 // "beginning of inlined scope" is not recoginized due to missing
3903 // location info. In such cases, do ignore this region.end.
3904 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3911 DW->RecordRegionEnd(cast<GlobalVariable>(REI.getContext()));
3912 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3913 getRoot(), LabelID));
3916 case Intrinsic::dbg_func_start: {
3917 DwarfWriter *DW = DAG.getDwarfWriter();
3918 DbgFuncStartInst &FSI = cast<DbgFuncStartInst>(I);
3919 if (!isValidDebugInfoIntrinsic(FSI, CodeGenOpt::None) || !DW
3920 || !DW->ShouldEmitDwarfDebug())
3923 MachineFunction &MF = DAG.getMachineFunction();
3924 // This is a beginning of an inlined function.
3925 if (isInlinedFnStart(FSI, MF.getFunction())) {
3926 if (OptLevel != CodeGenOpt::None)
3927 // FIXME: Debugging informaation for inlined function is only
3928 // supported at CodeGenOpt::Node.
3931 DebugLoc PrevLoc = CurDebugLoc;
3932 // If llvm.dbg.func.start is seen in a new block before any
3933 // llvm.dbg.stoppoint intrinsic then the location info is unknown.
3934 // FIXME : Why DebugLoc is reset at the beginning of each block ?
3935 if (PrevLoc.isUnknown())
3938 // Record the source line.
3939 setCurDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3941 DebugLocTuple PrevLocTpl = MF.getDebugLocTuple(PrevLoc);
3942 DISubprogram SP(cast<GlobalVariable>(FSI.getSubprogram()));
3943 DICompileUnit CU(PrevLocTpl.CompileUnit);
3944 unsigned LabelID = DW->RecordInlinedFnStart(SP, CU,
3947 DAG.setRoot(DAG.getLabel(ISD::DBG_LABEL, getCurDebugLoc(),
3948 getRoot(), LabelID));
3952 // This is a beginning of a new function.
3953 MF.setDefaultDebugLoc(ExtractDebugLocation(FSI, MF.getDebugLocInfo()));
3955 // llvm.dbg.func_start also defines beginning of function scope.
3956 DW->RecordRegionStart(cast<GlobalVariable>(FSI.getSubprogram()));
3959 case Intrinsic::dbg_declare: {
3960 if (OptLevel != CodeGenOpt::None)
3961 // FIXME: Variable debug info is not supported here.
3964 DbgDeclareInst &DI = cast<DbgDeclareInst>(I);
3965 if (!isValidDebugInfoIntrinsic(DI, CodeGenOpt::None))
3968 Value *Variable = DI.getVariable();
3969 DAG.setRoot(DAG.getNode(ISD::DECLARE, dl, MVT::Other, getRoot(),
3970 getValue(DI.getAddress()), getValue(Variable)));
3973 case Intrinsic::eh_exception: {
3974 // Insert the EXCEPTIONADDR instruction.
3975 assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
3976 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
3978 Ops[0] = DAG.getRoot();
3979 SDValue Op = DAG.getNode(ISD::EXCEPTIONADDR, dl, VTs, Ops, 1);
3981 DAG.setRoot(Op.getValue(1));
3985 case Intrinsic::eh_selector_i32:
3986 case Intrinsic::eh_selector_i64: {
3987 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
3988 MVT VT = (Intrinsic == Intrinsic::eh_selector_i32 ?
3989 MVT::i32 : MVT::i64);
3992 if (CurMBB->isLandingPad())
3993 AddCatchInfo(I, MMI, CurMBB);
3996 FuncInfo.CatchInfoLost.insert(&I);
3998 // FIXME: Mark exception selector register as live in. Hack for PR1508.
3999 unsigned Reg = TLI.getExceptionSelectorRegister();
4000 if (Reg) CurMBB->addLiveIn(Reg);
4003 // Insert the EHSELECTION instruction.
4004 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
4006 Ops[0] = getValue(I.getOperand(1));
4008 SDValue Op = DAG.getNode(ISD::EHSELECTION, dl, VTs, Ops, 2);
4010 DAG.setRoot(Op.getValue(1));
4012 setValue(&I, DAG.getConstant(0, VT));
4018 case Intrinsic::eh_typeid_for_i32:
4019 case Intrinsic::eh_typeid_for_i64: {
4020 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4021 MVT VT = (Intrinsic == Intrinsic::eh_typeid_for_i32 ?
4022 MVT::i32 : MVT::i64);
4025 // Find the type id for the given typeinfo.
4026 GlobalVariable *GV = ExtractTypeInfo(I.getOperand(1));
4028 unsigned TypeID = MMI->getTypeIDFor(GV);
4029 setValue(&I, DAG.getConstant(TypeID, VT));
4031 // Return something different to eh_selector.
4032 setValue(&I, DAG.getConstant(1, VT));
4038 case Intrinsic::eh_return_i32:
4039 case Intrinsic::eh_return_i64:
4040 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4041 MMI->setCallsEHReturn(true);
4042 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, dl,
4045 getValue(I.getOperand(1)),
4046 getValue(I.getOperand(2))));
4048 setValue(&I, DAG.getConstant(0, TLI.getPointerTy()));
4052 case Intrinsic::eh_unwind_init:
4053 if (MachineModuleInfo *MMI = DAG.getMachineModuleInfo()) {
4054 MMI->setCallsUnwindInit(true);
4059 case Intrinsic::eh_dwarf_cfa: {
4060 MVT VT = getValue(I.getOperand(1)).getValueType();
4062 if (VT.bitsGT(TLI.getPointerTy()))
4063 CfaArg = DAG.getNode(ISD::TRUNCATE, dl,
4064 TLI.getPointerTy(), getValue(I.getOperand(1)));
4066 CfaArg = DAG.getNode(ISD::SIGN_EXTEND, dl,
4067 TLI.getPointerTy(), getValue(I.getOperand(1)));
4069 SDValue Offset = DAG.getNode(ISD::ADD, dl,
4071 DAG.getNode(ISD::FRAME_TO_ARGS_OFFSET, dl,
4072 TLI.getPointerTy()),
4074 setValue(&I, DAG.getNode(ISD::ADD, dl,
4076 DAG.getNode(ISD::FRAMEADDR, dl,
4079 TLI.getPointerTy())),
4084 case Intrinsic::convertff:
4085 case Intrinsic::convertfsi:
4086 case Intrinsic::convertfui:
4087 case Intrinsic::convertsif:
4088 case Intrinsic::convertuif:
4089 case Intrinsic::convertss:
4090 case Intrinsic::convertsu:
4091 case Intrinsic::convertus:
4092 case Intrinsic::convertuu: {
4093 ISD::CvtCode Code = ISD::CVT_INVALID;
4094 switch (Intrinsic) {
4095 case Intrinsic::convertff: Code = ISD::CVT_FF; break;
4096 case Intrinsic::convertfsi: Code = ISD::CVT_FS; break;
4097 case Intrinsic::convertfui: Code = ISD::CVT_FU; break;
4098 case Intrinsic::convertsif: Code = ISD::CVT_SF; break;
4099 case Intrinsic::convertuif: Code = ISD::CVT_UF; break;
4100 case Intrinsic::convertss: Code = ISD::CVT_SS; break;
4101 case Intrinsic::convertsu: Code = ISD::CVT_SU; break;
4102 case Intrinsic::convertus: Code = ISD::CVT_US; break;
4103 case Intrinsic::convertuu: Code = ISD::CVT_UU; break;
4105 MVT DestVT = TLI.getValueType(I.getType());
4106 Value* Op1 = I.getOperand(1);
4107 setValue(&I, DAG.getConvertRndSat(DestVT, getCurDebugLoc(), getValue(Op1),
4108 DAG.getValueType(DestVT),
4109 DAG.getValueType(getValue(Op1).getValueType()),
4110 getValue(I.getOperand(2)),
4111 getValue(I.getOperand(3)),
4116 case Intrinsic::sqrt:
4117 setValue(&I, DAG.getNode(ISD::FSQRT, dl,
4118 getValue(I.getOperand(1)).getValueType(),
4119 getValue(I.getOperand(1))));
4121 case Intrinsic::powi:
4122 setValue(&I, DAG.getNode(ISD::FPOWI, dl,
4123 getValue(I.getOperand(1)).getValueType(),
4124 getValue(I.getOperand(1)),
4125 getValue(I.getOperand(2))));
4127 case Intrinsic::sin:
4128 setValue(&I, DAG.getNode(ISD::FSIN, dl,
4129 getValue(I.getOperand(1)).getValueType(),
4130 getValue(I.getOperand(1))));
4132 case Intrinsic::cos:
4133 setValue(&I, DAG.getNode(ISD::FCOS, dl,
4134 getValue(I.getOperand(1)).getValueType(),
4135 getValue(I.getOperand(1))));
4137 case Intrinsic::log:
4140 case Intrinsic::log2:
4143 case Intrinsic::log10:
4146 case Intrinsic::exp:
4149 case Intrinsic::exp2:
4152 case Intrinsic::pow:
4155 case Intrinsic::pcmarker: {
4156 SDValue Tmp = getValue(I.getOperand(1));
4157 DAG.setRoot(DAG.getNode(ISD::PCMARKER, dl, MVT::Other, getRoot(), Tmp));
4160 case Intrinsic::readcyclecounter: {
4161 SDValue Op = getRoot();
4162 SDValue Tmp = DAG.getNode(ISD::READCYCLECOUNTER, dl,
4163 DAG.getVTList(MVT::i64, MVT::Other),
4166 DAG.setRoot(Tmp.getValue(1));
4169 case Intrinsic::bswap:
4170 setValue(&I, DAG.getNode(ISD::BSWAP, dl,
4171 getValue(I.getOperand(1)).getValueType(),
4172 getValue(I.getOperand(1))));
4174 case Intrinsic::cttz: {
4175 SDValue Arg = getValue(I.getOperand(1));
4176 MVT Ty = Arg.getValueType();
4177 SDValue result = DAG.getNode(ISD::CTTZ, dl, Ty, Arg);
4178 setValue(&I, result);
4181 case Intrinsic::ctlz: {
4182 SDValue Arg = getValue(I.getOperand(1));
4183 MVT Ty = Arg.getValueType();
4184 SDValue result = DAG.getNode(ISD::CTLZ, dl, Ty, Arg);
4185 setValue(&I, result);
4188 case Intrinsic::ctpop: {
4189 SDValue Arg = getValue(I.getOperand(1));
4190 MVT Ty = Arg.getValueType();
4191 SDValue result = DAG.getNode(ISD::CTPOP, dl, Ty, Arg);
4192 setValue(&I, result);
4195 case Intrinsic::stacksave: {
4196 SDValue Op = getRoot();
4197 SDValue Tmp = DAG.getNode(ISD::STACKSAVE, dl,
4198 DAG.getVTList(TLI.getPointerTy(), MVT::Other), &Op, 1);
4200 DAG.setRoot(Tmp.getValue(1));
4203 case Intrinsic::stackrestore: {
4204 SDValue Tmp = getValue(I.getOperand(1));
4205 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, dl, MVT::Other, getRoot(), Tmp));
4208 case Intrinsic::stackprotector: {
4209 // Emit code into the DAG to store the stack guard onto the stack.
4210 MachineFunction &MF = DAG.getMachineFunction();
4211 MachineFrameInfo *MFI = MF.getFrameInfo();
4212 MVT PtrTy = TLI.getPointerTy();
4214 SDValue Src = getValue(I.getOperand(1)); // The guard's value.
4215 AllocaInst *Slot = cast<AllocaInst>(I.getOperand(2));
4217 int FI = FuncInfo.StaticAllocaMap[Slot];
4218 MFI->setStackProtectorIndex(FI);
4220 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4222 // Store the stack protector onto the stack.
4223 SDValue Result = DAG.getStore(getRoot(), getCurDebugLoc(), Src, FIN,
4224 PseudoSourceValue::getFixedStack(FI),
4226 setValue(&I, Result);
4227 DAG.setRoot(Result);
4230 case Intrinsic::var_annotation:
4231 // Discard annotate attributes
4234 case Intrinsic::init_trampoline: {
4235 const Function *F = cast<Function>(I.getOperand(2)->stripPointerCasts());
4239 Ops[1] = getValue(I.getOperand(1));
4240 Ops[2] = getValue(I.getOperand(2));
4241 Ops[3] = getValue(I.getOperand(3));
4242 Ops[4] = DAG.getSrcValue(I.getOperand(1));
4243 Ops[5] = DAG.getSrcValue(F);
4245 SDValue Tmp = DAG.getNode(ISD::TRAMPOLINE, dl,
4246 DAG.getVTList(TLI.getPointerTy(), MVT::Other),
4250 DAG.setRoot(Tmp.getValue(1));
4254 case Intrinsic::gcroot:
4256 Value *Alloca = I.getOperand(1);
4257 Constant *TypeMap = cast<Constant>(I.getOperand(2));
4259 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
4260 GFI->addStackRoot(FI->getIndex(), TypeMap);
4264 case Intrinsic::gcread:
4265 case Intrinsic::gcwrite:
4266 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
4269 case Intrinsic::flt_rounds: {
4270 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, dl, MVT::i32));
4274 case Intrinsic::trap: {
4275 DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot()));
4279 case Intrinsic::uadd_with_overflow:
4280 return implVisitAluOverflow(I, ISD::UADDO);
4281 case Intrinsic::sadd_with_overflow:
4282 return implVisitAluOverflow(I, ISD::SADDO);
4283 case Intrinsic::usub_with_overflow:
4284 return implVisitAluOverflow(I, ISD::USUBO);
4285 case Intrinsic::ssub_with_overflow:
4286 return implVisitAluOverflow(I, ISD::SSUBO);
4287 case Intrinsic::umul_with_overflow:
4288 return implVisitAluOverflow(I, ISD::UMULO);
4289 case Intrinsic::smul_with_overflow:
4290 return implVisitAluOverflow(I, ISD::SMULO);
4292 case Intrinsic::prefetch: {
4295 Ops[1] = getValue(I.getOperand(1));
4296 Ops[2] = getValue(I.getOperand(2));
4297 Ops[3] = getValue(I.getOperand(3));
4298 DAG.setRoot(DAG.getNode(ISD::PREFETCH, dl, MVT::Other, &Ops[0], 4));
4302 case Intrinsic::memory_barrier: {
4305 for (int x = 1; x < 6; ++x)
4306 Ops[x] = getValue(I.getOperand(x));
4308 DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
4311 case Intrinsic::atomic_cmp_swap: {
4312 SDValue Root = getRoot();
4314 DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
4315 getValue(I.getOperand(2)).getValueType().getSimpleVT(),
4317 getValue(I.getOperand(1)),
4318 getValue(I.getOperand(2)),
4319 getValue(I.getOperand(3)),
4322 DAG.setRoot(L.getValue(1));
4325 case Intrinsic::atomic_load_add:
4326 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
4327 case Intrinsic::atomic_load_sub:
4328 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
4329 case Intrinsic::atomic_load_or:
4330 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
4331 case Intrinsic::atomic_load_xor:
4332 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
4333 case Intrinsic::atomic_load_and:
4334 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
4335 case Intrinsic::atomic_load_nand:
4336 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
4337 case Intrinsic::atomic_load_max:
4338 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
4339 case Intrinsic::atomic_load_min:
4340 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
4341 case Intrinsic::atomic_load_umin:
4342 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
4343 case Intrinsic::atomic_load_umax:
4344 return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
4345 case Intrinsic::atomic_swap:
4346 return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
4351 void SelectionDAGLowering::LowerCallTo(CallSite CS, SDValue Callee,
4353 MachineBasicBlock *LandingPad) {
4354 const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
4355 const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
4356 MachineModuleInfo *MMI = DAG.getMachineModuleInfo();
4357 unsigned BeginLabel = 0, EndLabel = 0;
4359 TargetLowering::ArgListTy Args;
4360 TargetLowering::ArgListEntry Entry;
4361 Args.reserve(CS.arg_size());
4362 for (CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
4364 SDValue ArgNode = getValue(*i);
4365 Entry.Node = ArgNode; Entry.Ty = (*i)->getType();
4367 unsigned attrInd = i - CS.arg_begin() + 1;
4368 Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt);
4369 Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt);
4370 Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg);
4371 Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet);
4372 Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest);
4373 Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal);
4374 Entry.Alignment = CS.getParamAlignment(attrInd);
4375 Args.push_back(Entry);
4378 if (LandingPad && MMI) {
4379 // Insert a label before the invoke call to mark the try range. This can be
4380 // used to detect deletion of the invoke via the MachineModuleInfo.
4381 BeginLabel = MMI->NextLabelID();
4382 // Both PendingLoads and PendingExports must be flushed here;
4383 // this call might not return.
4385 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4386 getControlRoot(), BeginLabel));
4389 std::pair<SDValue,SDValue> Result =
4390 TLI.LowerCallTo(getRoot(), CS.getType(),
4391 CS.paramHasAttr(0, Attribute::SExt),
4392 CS.paramHasAttr(0, Attribute::ZExt), FTy->isVarArg(),
4393 CS.paramHasAttr(0, Attribute::InReg), FTy->getNumParams(),
4394 CS.getCallingConv(),
4395 IsTailCall && PerformTailCallOpt,
4396 Callee, Args, DAG, getCurDebugLoc());
4397 if (CS.getType() != Type::VoidTy)
4398 setValue(CS.getInstruction(), Result.first);
4399 DAG.setRoot(Result.second);
4401 if (LandingPad && MMI) {
4402 // Insert a label at the end of the invoke call to mark the try range. This
4403 // can be used to detect deletion of the invoke via the MachineModuleInfo.
4404 EndLabel = MMI->NextLabelID();
4405 DAG.setRoot(DAG.getLabel(ISD::EH_LABEL, getCurDebugLoc(),
4406 getRoot(), EndLabel));
4408 // Inform MachineModuleInfo of range.
4409 MMI->addInvoke(LandingPad, BeginLabel, EndLabel);
4414 void SelectionDAGLowering::visitCall(CallInst &I) {
4415 const char *RenameFn = 0;
4416 if (Function *F = I.getCalledFunction()) {
4417 if (F->isDeclaration()) {
4418 const TargetIntrinsicInfo *II = TLI.getTargetMachine().getIntrinsicInfo();
4420 if (unsigned IID = II->getIntrinsicID(F)) {
4421 RenameFn = visitIntrinsicCall(I, IID);
4426 if (unsigned IID = F->getIntrinsicID()) {
4427 RenameFn = visitIntrinsicCall(I, IID);
4433 // Check for well-known libc/libm calls. If the function is internal, it
4434 // can't be a library call.
4435 unsigned NameLen = F->getNameLen();
4436 if (!F->hasLocalLinkage() && NameLen) {
4437 const char *NameStr = F->getNameStart();
4438 if (NameStr[0] == 'c' &&
4439 ((NameLen == 8 && !strcmp(NameStr, "copysign")) ||
4440 (NameLen == 9 && !strcmp(NameStr, "copysignf")))) {
4441 if (I.getNumOperands() == 3 && // Basic sanity checks.
4442 I.getOperand(1)->getType()->isFloatingPoint() &&
4443 I.getType() == I.getOperand(1)->getType() &&
4444 I.getType() == I.getOperand(2)->getType()) {
4445 SDValue LHS = getValue(I.getOperand(1));
4446 SDValue RHS = getValue(I.getOperand(2));
4447 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurDebugLoc(),
4448 LHS.getValueType(), LHS, RHS));
4451 } else if (NameStr[0] == 'f' &&
4452 ((NameLen == 4 && !strcmp(NameStr, "fabs")) ||
4453 (NameLen == 5 && !strcmp(NameStr, "fabsf")) ||
4454 (NameLen == 5 && !strcmp(NameStr, "fabsl")))) {
4455 if (I.getNumOperands() == 2 && // Basic sanity checks.
4456 I.getOperand(1)->getType()->isFloatingPoint() &&
4457 I.getType() == I.getOperand(1)->getType()) {
4458 SDValue Tmp = getValue(I.getOperand(1));
4459 setValue(&I, DAG.getNode(ISD::FABS, getCurDebugLoc(),
4460 Tmp.getValueType(), Tmp));
4463 } else if (NameStr[0] == 's' &&
4464 ((NameLen == 3 && !strcmp(NameStr, "sin")) ||
4465 (NameLen == 4 && !strcmp(NameStr, "sinf")) ||
4466 (NameLen == 4 && !strcmp(NameStr, "sinl")))) {
4467 if (I.getNumOperands() == 2 && // Basic sanity checks.
4468 I.getOperand(1)->getType()->isFloatingPoint() &&
4469 I.getType() == I.getOperand(1)->getType()) {
4470 SDValue Tmp = getValue(I.getOperand(1));
4471 setValue(&I, DAG.getNode(ISD::FSIN, getCurDebugLoc(),
4472 Tmp.getValueType(), Tmp));
4475 } else if (NameStr[0] == 'c' &&
4476 ((NameLen == 3 && !strcmp(NameStr, "cos")) ||
4477 (NameLen == 4 && !strcmp(NameStr, "cosf")) ||
4478 (NameLen == 4 && !strcmp(NameStr, "cosl")))) {
4479 if (I.getNumOperands() == 2 && // Basic sanity checks.
4480 I.getOperand(1)->getType()->isFloatingPoint() &&
4481 I.getType() == I.getOperand(1)->getType()) {
4482 SDValue Tmp = getValue(I.getOperand(1));
4483 setValue(&I, DAG.getNode(ISD::FCOS, getCurDebugLoc(),
4484 Tmp.getValueType(), Tmp));
4489 } else if (isa<InlineAsm>(I.getOperand(0))) {
4496 Callee = getValue(I.getOperand(0));
4498 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
4500 LowerCallTo(&I, Callee, I.isTailCall());
4504 /// getCopyFromRegs - Emit a series of CopyFromReg nodes that copies from
4505 /// this value and returns the result as a ValueVT value. This uses
4506 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4507 /// If the Flag pointer is NULL, no flag is used.
4508 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, DebugLoc dl,
4510 SDValue *Flag) const {
4511 // Assemble the legal parts into the final values.
4512 SmallVector<SDValue, 4> Values(ValueVTs.size());
4513 SmallVector<SDValue, 8> Parts;
4514 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4515 // Copy the legal parts from the registers.
4516 MVT ValueVT = ValueVTs[Value];
4517 unsigned NumRegs = TLI->getNumRegisters(ValueVT);
4518 MVT RegisterVT = RegVTs[Value];
4520 Parts.resize(NumRegs);
4521 for (unsigned i = 0; i != NumRegs; ++i) {
4524 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
4526 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
4527 *Flag = P.getValue(2);
4529 Chain = P.getValue(1);
4531 // If the source register was virtual and if we know something about it,
4532 // add an assert node.
4533 if (TargetRegisterInfo::isVirtualRegister(Regs[Part+i]) &&
4534 RegisterVT.isInteger() && !RegisterVT.isVector()) {
4535 unsigned SlotNo = Regs[Part+i]-TargetRegisterInfo::FirstVirtualRegister;
4536 FunctionLoweringInfo &FLI = DAG.getFunctionLoweringInfo();
4537 if (FLI.LiveOutRegInfo.size() > SlotNo) {
4538 FunctionLoweringInfo::LiveOutInfo &LOI = FLI.LiveOutRegInfo[SlotNo];
4540 unsigned RegSize = RegisterVT.getSizeInBits();
4541 unsigned NumSignBits = LOI.NumSignBits;
4542 unsigned NumZeroBits = LOI.KnownZero.countLeadingOnes();
4544 // FIXME: We capture more information than the dag can represent. For
4545 // now, just use the tightest assertzext/assertsext possible.
4547 MVT FromVT(MVT::Other);
4548 if (NumSignBits == RegSize)
4549 isSExt = true, FromVT = MVT::i1; // ASSERT SEXT 1
4550 else if (NumZeroBits >= RegSize-1)
4551 isSExt = false, FromVT = MVT::i1; // ASSERT ZEXT 1
4552 else if (NumSignBits > RegSize-8)
4553 isSExt = true, FromVT = MVT::i8; // ASSERT SEXT 8
4554 else if (NumZeroBits >= RegSize-8)
4555 isSExt = false, FromVT = MVT::i8; // ASSERT ZEXT 8
4556 else if (NumSignBits > RegSize-16)
4557 isSExt = true, FromVT = MVT::i16; // ASSERT SEXT 16
4558 else if (NumZeroBits >= RegSize-16)
4559 isSExt = false, FromVT = MVT::i16; // ASSERT ZEXT 16
4560 else if (NumSignBits > RegSize-32)
4561 isSExt = true, FromVT = MVT::i32; // ASSERT SEXT 32
4562 else if (NumZeroBits >= RegSize-32)
4563 isSExt = false, FromVT = MVT::i32; // ASSERT ZEXT 32
4565 if (FromVT != MVT::Other) {
4566 P = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
4567 RegisterVT, P, DAG.getValueType(FromVT));
4576 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(),
4577 NumRegs, RegisterVT, ValueVT);
4582 return DAG.getNode(ISD::MERGE_VALUES, dl,
4583 DAG.getVTList(&ValueVTs[0], ValueVTs.size()),
4584 &Values[0], ValueVTs.size());
4587 /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the
4588 /// specified value into the registers specified by this object. This uses
4589 /// Chain/Flag as the input and updates them for the output Chain/Flag.
4590 /// If the Flag pointer is NULL, no flag is used.
4591 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl,
4592 SDValue &Chain, SDValue *Flag) const {
4593 // Get the list of the values's legal parts.
4594 unsigned NumRegs = Regs.size();
4595 SmallVector<SDValue, 8> Parts(NumRegs);
4596 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
4597 MVT ValueVT = ValueVTs[Value];
4598 unsigned NumParts = TLI->getNumRegisters(ValueVT);
4599 MVT RegisterVT = RegVTs[Value];
4601 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value),
4602 &Parts[Part], NumParts, RegisterVT);
4606 // Copy the parts into the registers.
4607 SmallVector<SDValue, 8> Chains(NumRegs);
4608 for (unsigned i = 0; i != NumRegs; ++i) {
4611 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
4613 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
4614 *Flag = Part.getValue(1);
4616 Chains[i] = Part.getValue(0);
4619 if (NumRegs == 1 || Flag)
4620 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
4621 // flagged to it. That is the CopyToReg nodes and the user are considered
4622 // a single scheduling unit. If we create a TokenFactor and return it as
4623 // chain, then the TokenFactor is both a predecessor (operand) of the
4624 // user as well as a successor (the TF operands are flagged to the user).
4625 // c1, f1 = CopyToReg
4626 // c2, f2 = CopyToReg
4627 // c3 = TokenFactor c1, c2
4630 Chain = Chains[NumRegs-1];
4632 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Chains[0], NumRegs);
4635 /// AddInlineAsmOperands - Add this value to the specified inlineasm node
4636 /// operand list. This adds the code marker and includes the number of
4637 /// values added into it.
4638 void RegsForValue::AddInlineAsmOperands(unsigned Code,
4639 bool HasMatching,unsigned MatchingIdx,
4641 std::vector<SDValue> &Ops) const {
4642 MVT IntPtrTy = DAG.getTargetLoweringInfo().getPointerTy();
4643 assert(Regs.size() < (1 << 13) && "Too many inline asm outputs!");
4644 unsigned Flag = Code | (Regs.size() << 3);
4646 Flag |= 0x80000000 | (MatchingIdx << 16);
4647 Ops.push_back(DAG.getTargetConstant(Flag, IntPtrTy));
4648 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
4649 unsigned NumRegs = TLI->getNumRegisters(ValueVTs[Value]);
4650 MVT RegisterVT = RegVTs[Value];
4651 for (unsigned i = 0; i != NumRegs; ++i) {
4652 assert(Reg < Regs.size() && "Mismatch in # registers expected");
4653 Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
4658 /// isAllocatableRegister - If the specified register is safe to allocate,
4659 /// i.e. it isn't a stack pointer or some other special register, return the
4660 /// register class for the register. Otherwise, return null.
4661 static const TargetRegisterClass *
4662 isAllocatableRegister(unsigned Reg, MachineFunction &MF,
4663 const TargetLowering &TLI,
4664 const TargetRegisterInfo *TRI) {
4665 MVT FoundVT = MVT::Other;
4666 const TargetRegisterClass *FoundRC = 0;
4667 for (TargetRegisterInfo::regclass_iterator RCI = TRI->regclass_begin(),
4668 E = TRI->regclass_end(); RCI != E; ++RCI) {
4669 MVT ThisVT = MVT::Other;
4671 const TargetRegisterClass *RC = *RCI;
4672 // If none of the the value types for this register class are valid, we
4673 // can't use it. For example, 64-bit reg classes on 32-bit targets.
4674 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
4676 if (TLI.isTypeLegal(*I)) {
4677 // If we have already found this register in a different register class,
4678 // choose the one with the largest VT specified. For example, on
4679 // PowerPC, we favor f64 register classes over f32.
4680 if (FoundVT == MVT::Other || FoundVT.bitsLT(*I)) {
4687 if (ThisVT == MVT::Other) continue;
4689 // NOTE: This isn't ideal. In particular, this might allocate the
4690 // frame pointer in functions that need it (due to them not being taken
4691 // out of allocation, because a variable sized allocation hasn't been seen
4692 // yet). This is a slight code pessimization, but should still work.
4693 for (TargetRegisterClass::iterator I = RC->allocation_order_begin(MF),
4694 E = RC->allocation_order_end(MF); I != E; ++I)
4696 // We found a matching register class. Keep looking at others in case
4697 // we find one with larger registers that this physreg is also in.
4708 /// AsmOperandInfo - This contains information for each constraint that we are
4710 class VISIBILITY_HIDDEN SDISelAsmOperandInfo :
4711 public TargetLowering::AsmOperandInfo {
4713 /// CallOperand - If this is the result output operand or a clobber
4714 /// this is null, otherwise it is the incoming operand to the CallInst.
4715 /// This gets modified as the asm is processed.
4716 SDValue CallOperand;
4718 /// AssignedRegs - If this is a register or register class operand, this
4719 /// contains the set of register corresponding to the operand.
4720 RegsForValue AssignedRegs;
4722 explicit SDISelAsmOperandInfo(const InlineAsm::ConstraintInfo &info)
4723 : TargetLowering::AsmOperandInfo(info), CallOperand(0,0) {
4726 /// MarkAllocatedRegs - Once AssignedRegs is set, mark the assigned registers
4727 /// busy in OutputRegs/InputRegs.
4728 void MarkAllocatedRegs(bool isOutReg, bool isInReg,
4729 std::set<unsigned> &OutputRegs,
4730 std::set<unsigned> &InputRegs,
4731 const TargetRegisterInfo &TRI) const {
4733 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4734 MarkRegAndAliases(AssignedRegs.Regs[i], OutputRegs, TRI);
4737 for (unsigned i = 0, e = AssignedRegs.Regs.size(); i != e; ++i)
4738 MarkRegAndAliases(AssignedRegs.Regs[i], InputRegs, TRI);
4742 /// getCallOperandValMVT - Return the MVT of the Value* that this operand
4743 /// corresponds to. If there is no Value* for this operand, it returns
4745 MVT getCallOperandValMVT(const TargetLowering &TLI,
4746 const TargetData *TD) const {
4747 if (CallOperandVal == 0) return MVT::Other;
4749 if (isa<BasicBlock>(CallOperandVal))
4750 return TLI.getPointerTy();
4752 const llvm::Type *OpTy = CallOperandVal->getType();
4754 // If this is an indirect operand, the operand is a pointer to the
4757 OpTy = cast<PointerType>(OpTy)->getElementType();
4759 // If OpTy is not a single value, it may be a struct/union that we
4760 // can tile with integers.
4761 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
4762 unsigned BitSize = TD->getTypeSizeInBits(OpTy);
4771 OpTy = IntegerType::get(BitSize);
4776 return TLI.getValueType(OpTy, true);
4780 /// MarkRegAndAliases - Mark the specified register and all aliases in the
4782 static void MarkRegAndAliases(unsigned Reg, std::set<unsigned> &Regs,
4783 const TargetRegisterInfo &TRI) {
4784 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && "Isn't a physreg");
4786 if (const unsigned *Aliases = TRI.getAliasSet(Reg))
4787 for (; *Aliases; ++Aliases)
4788 Regs.insert(*Aliases);
4791 } // end llvm namespace.
4794 /// GetRegistersForValue - Assign registers (virtual or physical) for the
4795 /// specified operand. We prefer to assign virtual registers, to allow the
4796 /// register allocator handle the assignment process. However, if the asm uses
4797 /// features that we can't model on machineinstrs, we have SDISel do the
4798 /// allocation. This produces generally horrible, but correct, code.
4800 /// OpInfo describes the operand.
4801 /// Input and OutputRegs are the set of already allocated physical registers.
4803 void SelectionDAGLowering::
4804 GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
4805 std::set<unsigned> &OutputRegs,
4806 std::set<unsigned> &InputRegs) {
4807 // Compute whether this value requires an input register, an output register,
4809 bool isOutReg = false;
4810 bool isInReg = false;
4811 switch (OpInfo.Type) {
4812 case InlineAsm::isOutput:
4815 // If there is an input constraint that matches this, we need to reserve
4816 // the input register so no other inputs allocate to it.
4817 isInReg = OpInfo.hasMatchingInput();
4819 case InlineAsm::isInput:
4823 case InlineAsm::isClobber:
4830 MachineFunction &MF = DAG.getMachineFunction();
4831 SmallVector<unsigned, 4> Regs;
4833 // If this is a constraint for a single physreg, or a constraint for a
4834 // register class, find it.
4835 std::pair<unsigned, const TargetRegisterClass*> PhysReg =
4836 TLI.getRegForInlineAsmConstraint(OpInfo.ConstraintCode,
4837 OpInfo.ConstraintVT);
4839 unsigned NumRegs = 1;
4840 if (OpInfo.ConstraintVT != MVT::Other) {
4841 // If this is a FP input in an integer register (or visa versa) insert a bit
4842 // cast of the input value. More generally, handle any case where the input
4843 // value disagrees with the register class we plan to stick this in.
4844 if (OpInfo.Type == InlineAsm::isInput &&
4845 PhysReg.second && !PhysReg.second->hasType(OpInfo.ConstraintVT)) {
4846 // Try to convert to the first MVT that the reg class contains. If the
4847 // types are identical size, use a bitcast to convert (e.g. two differing
4849 MVT RegVT = *PhysReg.second->vt_begin();
4850 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
4851 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4852 RegVT, OpInfo.CallOperand);
4853 OpInfo.ConstraintVT = RegVT;
4854 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
4855 // If the input is a FP value and we want it in FP registers, do a
4856 // bitcast to the corresponding integer type. This turns an f64 value
4857 // into i64, which can be passed with two i32 values on a 32-bit
4859 RegVT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
4860 OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
4861 RegVT, OpInfo.CallOperand);
4862 OpInfo.ConstraintVT = RegVT;
4866 NumRegs = TLI.getNumRegisters(OpInfo.ConstraintVT);
4870 MVT ValueVT = OpInfo.ConstraintVT;
4872 // If this is a constraint for a specific physical register, like {r17},
4874 if (unsigned AssignedReg = PhysReg.first) {
4875 const TargetRegisterClass *RC = PhysReg.second;
4876 if (OpInfo.ConstraintVT == MVT::Other)
4877 ValueVT = *RC->vt_begin();
4879 // Get the actual register value type. This is important, because the user
4880 // may have asked for (e.g.) the AX register in i32 type. We need to
4881 // remember that AX is actually i16 to get the right extension.
4882 RegVT = *RC->vt_begin();
4884 // This is a explicit reference to a physical register.
4885 Regs.push_back(AssignedReg);
4887 // If this is an expanded reference, add the rest of the regs to Regs.
4889 TargetRegisterClass::iterator I = RC->begin();
4890 for (; *I != AssignedReg; ++I)
4891 assert(I != RC->end() && "Didn't find reg!");
4893 // Already added the first reg.
4895 for (; NumRegs; --NumRegs, ++I) {
4896 assert(I != RC->end() && "Ran out of registers to allocate!");
4900 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4901 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4902 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4906 // Otherwise, if this was a reference to an LLVM register class, create vregs
4907 // for this reference.
4908 if (const TargetRegisterClass *RC = PhysReg.second) {
4909 RegVT = *RC->vt_begin();
4910 if (OpInfo.ConstraintVT == MVT::Other)
4913 // Create the appropriate number of virtual registers.
4914 MachineRegisterInfo &RegInfo = MF.getRegInfo();
4915 for (; NumRegs; --NumRegs)
4916 Regs.push_back(RegInfo.createVirtualRegister(RC));
4918 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, RegVT, ValueVT);
4922 // This is a reference to a register class that doesn't directly correspond
4923 // to an LLVM register class. Allocate NumRegs consecutive, available,
4924 // registers from the class.
4925 std::vector<unsigned> RegClassRegs
4926 = TLI.getRegClassForInlineAsmConstraint(OpInfo.ConstraintCode,
4927 OpInfo.ConstraintVT);
4929 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
4930 unsigned NumAllocated = 0;
4931 for (unsigned i = 0, e = RegClassRegs.size(); i != e; ++i) {
4932 unsigned Reg = RegClassRegs[i];
4933 // See if this register is available.
4934 if ((isOutReg && OutputRegs.count(Reg)) || // Already used.
4935 (isInReg && InputRegs.count(Reg))) { // Already used.
4936 // Make sure we find consecutive registers.
4941 // Check to see if this register is allocatable (i.e. don't give out the
4943 const TargetRegisterClass *RC = isAllocatableRegister(Reg, MF, TLI, TRI);
4944 if (!RC) { // Couldn't allocate this register.
4945 // Reset NumAllocated to make sure we return consecutive registers.
4950 // Okay, this register is good, we can use it.
4953 // If we allocated enough consecutive registers, succeed.
4954 if (NumAllocated == NumRegs) {
4955 unsigned RegStart = (i-NumAllocated)+1;
4956 unsigned RegEnd = i+1;
4957 // Mark all of the allocated registers used.
4958 for (unsigned i = RegStart; i != RegEnd; ++i)
4959 Regs.push_back(RegClassRegs[i]);
4961 OpInfo.AssignedRegs = RegsForValue(TLI, Regs, *RC->vt_begin(),
4962 OpInfo.ConstraintVT);
4963 OpInfo.MarkAllocatedRegs(isOutReg, isInReg, OutputRegs, InputRegs, *TRI);
4968 // Otherwise, we couldn't allocate enough registers for this.
4971 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
4972 /// processed uses a memory 'm' constraint.
4974 hasInlineAsmMemConstraint(std::vector<InlineAsm::ConstraintInfo> &CInfos,
4975 const TargetLowering &TLI) {
4976 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
4977 InlineAsm::ConstraintInfo &CI = CInfos[i];
4978 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
4979 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
4980 if (CType == TargetLowering::C_Memory)
4984 // Indirect operand accesses access memory.
4992 /// visitInlineAsm - Handle a call to an InlineAsm object.
4994 void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
4995 InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
4997 /// ConstraintOperands - Information about all of the constraints.
4998 std::vector<SDISelAsmOperandInfo> ConstraintOperands;
5000 std::set<unsigned> OutputRegs, InputRegs;
5002 // Do a prepass over the constraints, canonicalizing them, and building up the
5003 // ConstraintOperands list.
5004 std::vector<InlineAsm::ConstraintInfo>
5005 ConstraintInfos = IA->ParseConstraints();
5007 bool hasMemory = hasInlineAsmMemConstraint(ConstraintInfos, TLI);
5009 SDValue Chain, Flag;
5011 // We won't need to flush pending loads if this asm doesn't touch
5012 // memory and is nonvolatile.
5013 if (hasMemory || IA->hasSideEffects())
5016 Chain = DAG.getRoot();
5018 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
5019 unsigned ResNo = 0; // ResNo - The result number of the next output.
5020 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5021 ConstraintOperands.push_back(SDISelAsmOperandInfo(ConstraintInfos[i]));
5022 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
5024 MVT OpVT = MVT::Other;
5026 // Compute the value type for each operand.
5027 switch (OpInfo.Type) {
5028 case InlineAsm::isOutput:
5029 // Indirect outputs just consume an argument.
5030 if (OpInfo.isIndirect) {
5031 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5035 // The return value of the call is this value. As such, there is no
5036 // corresponding argument.
5037 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5038 if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
5039 OpVT = TLI.getValueType(STy->getElementType(ResNo));
5041 assert(ResNo == 0 && "Asm only has one result!");
5042 OpVT = TLI.getValueType(CS.getType());
5046 case InlineAsm::isInput:
5047 OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
5049 case InlineAsm::isClobber:
5054 // If this is an input or an indirect output, process the call argument.
5055 // BasicBlocks are labels, currently appearing only in asm's.
5056 if (OpInfo.CallOperandVal) {
5057 if (BasicBlock *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
5058 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
5060 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
5063 OpVT = OpInfo.getCallOperandValMVT(TLI, TD);
5066 OpInfo.ConstraintVT = OpVT;
5069 // Second pass over the constraints: compute which constraint option to use
5070 // and assign registers to constraints that want a specific physreg.
5071 for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
5072 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5074 // If this is an output operand with a matching input operand, look up the
5075 // matching input. If their types mismatch, e.g. one is an integer, the
5076 // other is floating point, or their sizes are different, flag it as an
5078 if (OpInfo.hasMatchingInput()) {
5079 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
5080 if (OpInfo.ConstraintVT != Input.ConstraintVT) {
5081 if ((OpInfo.ConstraintVT.isInteger() !=
5082 Input.ConstraintVT.isInteger()) ||
5083 (OpInfo.ConstraintVT.getSizeInBits() !=
5084 Input.ConstraintVT.getSizeInBits())) {
5085 llvm_report_error("llvm: error: Unsupported asm: input constraint"
5086 " with a matching output constraint of incompatible"
5089 Input.ConstraintVT = OpInfo.ConstraintVT;
5093 // Compute the constraint code and ConstraintType to use.
5094 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, hasMemory, &DAG);
5096 // If this is a memory input, and if the operand is not indirect, do what we
5097 // need to to provide an address for the memory input.
5098 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5099 !OpInfo.isIndirect) {
5100 assert(OpInfo.Type == InlineAsm::isInput &&
5101 "Can only indirectify direct input operands!");
5103 // Memory operands really want the address of the value. If we don't have
5104 // an indirect input, put it in the constpool if we can, otherwise spill
5105 // it to a stack slot.
5107 // If the operand is a float, integer, or vector constant, spill to a
5108 // constant pool entry to get its address.
5109 Value *OpVal = OpInfo.CallOperandVal;
5110 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
5111 isa<ConstantVector>(OpVal)) {
5112 OpInfo.CallOperand = DAG.getConstantPool(cast<Constant>(OpVal),
5113 TLI.getPointerTy());
5115 // Otherwise, create a stack slot and emit a store to it before the
5117 const Type *Ty = OpVal->getType();
5118 uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
5119 unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
5120 MachineFunction &MF = DAG.getMachineFunction();
5121 int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align);
5122 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
5123 Chain = DAG.getStore(Chain, getCurDebugLoc(),
5124 OpInfo.CallOperand, StackSlot, NULL, 0);
5125 OpInfo.CallOperand = StackSlot;
5128 // There is no longer a Value* corresponding to this operand.
5129 OpInfo.CallOperandVal = 0;
5130 // It is now an indirect operand.
5131 OpInfo.isIndirect = true;
5134 // If this constraint is for a specific register, allocate it before
5136 if (OpInfo.ConstraintType == TargetLowering::C_Register)
5137 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5139 ConstraintInfos.clear();
5142 // Second pass - Loop over all of the operands, assigning virtual or physregs
5143 // to register class operands.
5144 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5145 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5147 // C_Register operands have already been allocated, Other/Memory don't need
5149 if (OpInfo.ConstraintType == TargetLowering::C_RegisterClass)
5150 GetRegistersForValue(OpInfo, OutputRegs, InputRegs);
5153 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
5154 std::vector<SDValue> AsmNodeOperands;
5155 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
5156 AsmNodeOperands.push_back(
5157 DAG.getTargetExternalSymbol(IA->getAsmString().c_str(), MVT::Other));
5160 // Loop over all of the inputs, copying the operand values into the
5161 // appropriate registers and processing the output regs.
5162 RegsForValue RetValRegs;
5164 // IndirectStoresToEmit - The set of stores to emit after the inline asm node.
5165 std::vector<std::pair<RegsForValue, Value*> > IndirectStoresToEmit;
5167 for (unsigned i = 0, e = ConstraintOperands.size(); i != e; ++i) {
5168 SDISelAsmOperandInfo &OpInfo = ConstraintOperands[i];
5170 switch (OpInfo.Type) {
5171 case InlineAsm::isOutput: {
5172 if (OpInfo.ConstraintType != TargetLowering::C_RegisterClass &&
5173 OpInfo.ConstraintType != TargetLowering::C_Register) {
5174 // Memory output, or 'other' output (e.g. 'X' constraint).
5175 assert(OpInfo.isIndirect && "Memory output must be indirect operand");
5177 // Add information to the INLINEASM node to know about this output.
5178 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5179 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5180 TLI.getPointerTy()));
5181 AsmNodeOperands.push_back(OpInfo.CallOperand);
5185 // Otherwise, this is a register or register class output.
5187 // Copy the output from the appropriate register. Find a register that
5189 if (OpInfo.AssignedRegs.Regs.empty()) {
5190 llvm_report_error("llvm: error: Couldn't allocate output reg for"
5191 " constraint '" + OpInfo.ConstraintCode + "'!");
5194 // If this is an indirect operand, store through the pointer after the
5196 if (OpInfo.isIndirect) {
5197 IndirectStoresToEmit.push_back(std::make_pair(OpInfo.AssignedRegs,
5198 OpInfo.CallOperandVal));
5200 // This is the result value of the call.
5201 assert(CS.getType() != Type::VoidTy && "Bad inline asm!");
5202 // Concatenate this output onto the outputs list.
5203 RetValRegs.append(OpInfo.AssignedRegs);
5206 // Add information to the INLINEASM node to know that this register is
5208 OpInfo.AssignedRegs.AddInlineAsmOperands(OpInfo.isEarlyClobber ?
5209 6 /* EARLYCLOBBER REGDEF */ :
5213 DAG, AsmNodeOperands);
5216 case InlineAsm::isInput: {
5217 SDValue InOperandVal = OpInfo.CallOperand;
5219 if (OpInfo.isMatchingInputConstraint()) { // Matching constraint?
5220 // If this is required to match an output register we have already set,
5221 // just use its register.
5222 unsigned OperandNo = OpInfo.getMatchedOperand();
5224 // Scan until we find the definition we already emitted of this operand.
5225 // When we find it, create a RegsForValue operand.
5226 unsigned CurOp = 2; // The first operand.
5227 for (; OperandNo; --OperandNo) {
5228 // Advance to the next operand.
5230 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5231 assert(((OpFlag & 7) == 2 /*REGDEF*/ ||
5232 (OpFlag & 7) == 6 /*EARLYCLOBBER REGDEF*/ ||
5233 (OpFlag & 7) == 4 /*MEM*/) &&
5234 "Skipped past definitions?");
5235 CurOp += InlineAsm::getNumOperandRegisters(OpFlag)+1;
5239 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
5240 if ((OpFlag & 7) == 2 /*REGDEF*/
5241 || (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
5242 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
5243 if (OpInfo.isIndirect) {
5244 llvm_report_error("llvm: error: "
5245 "Don't know how to handle tied indirect "
5246 "register inputs yet!");
5248 RegsForValue MatchedRegs;
5249 MatchedRegs.TLI = &TLI;
5250 MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());
5251 MVT RegVT = AsmNodeOperands[CurOp+1].getValueType();
5252 MatchedRegs.RegVTs.push_back(RegVT);
5253 MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo();
5254 for (unsigned i = 0, e = InlineAsm::getNumOperandRegisters(OpFlag);
5257 push_back(RegInfo.createVirtualRegister(TLI.getRegClassFor(RegVT)));
5259 // Use the produced MatchedRegs object to
5260 MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5262 MatchedRegs.AddInlineAsmOperands(1 /*REGUSE*/,
5263 true, OpInfo.getMatchedOperand(),
5264 DAG, AsmNodeOperands);
5267 assert(((OpFlag & 7) == 4) && "Unknown matching constraint!");
5268 assert((InlineAsm::getNumOperandRegisters(OpFlag)) == 1 &&
5269 "Unexpected number of operands");
5270 // Add information to the INLINEASM node to know about this input.
5271 // See InlineAsm.h isUseOperandTiedToDef.
5272 OpFlag |= 0x80000000 | (OpInfo.getMatchedOperand() << 16);
5273 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlag,
5274 TLI.getPointerTy()));
5275 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
5280 if (OpInfo.ConstraintType == TargetLowering::C_Other) {
5281 assert(!OpInfo.isIndirect &&
5282 "Don't know how to handle indirect other inputs yet!");
5284 std::vector<SDValue> Ops;
5285 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode[0],
5286 hasMemory, Ops, DAG);
5288 llvm_report_error("llvm: error: Invalid operand for inline asm"
5289 " constraint '" + OpInfo.ConstraintCode + "'!");
5292 // Add information to the INLINEASM node to know about this input.
5293 unsigned ResOpType = 3 /*IMM*/ | (Ops.size() << 3);
5294 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5295 TLI.getPointerTy()));
5296 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
5298 } else if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
5299 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
5300 assert(InOperandVal.getValueType() == TLI.getPointerTy() &&
5301 "Memory operands expect pointer values");
5303 // Add information to the INLINEASM node to know about this input.
5304 unsigned ResOpType = 4/*MEM*/ | (1<<3);
5305 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
5306 TLI.getPointerTy()));
5307 AsmNodeOperands.push_back(InOperandVal);
5311 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
5312 OpInfo.ConstraintType == TargetLowering::C_Register) &&
5313 "Unknown constraint type!");
5314 assert(!OpInfo.isIndirect &&
5315 "Don't know how to handle indirect register inputs yet!");
5317 // Copy the input into the appropriate registers.
5318 if (OpInfo.AssignedRegs.Regs.empty()) {
5319 llvm_report_error("llvm: error: Couldn't allocate input reg for"
5320 " constraint '"+ OpInfo.ConstraintCode +"'!");
5323 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(),
5326 OpInfo.AssignedRegs.AddInlineAsmOperands(1/*REGUSE*/, false, 0,
5327 DAG, AsmNodeOperands);
5330 case InlineAsm::isClobber: {
5331 // Add the clobbered value to the operand list, so that the register
5332 // allocator is aware that the physreg got clobbered.
5333 if (!OpInfo.AssignedRegs.Regs.empty())
5334 OpInfo.AssignedRegs.AddInlineAsmOperands(6 /* EARLYCLOBBER REGDEF */,
5335 false, 0, DAG,AsmNodeOperands);
5341 // Finish up input operands.
5342 AsmNodeOperands[0] = Chain;
5343 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
5345 Chain = DAG.getNode(ISD::INLINEASM, getCurDebugLoc(),
5346 DAG.getVTList(MVT::Other, MVT::Flag),
5347 &AsmNodeOperands[0], AsmNodeOperands.size());
5348 Flag = Chain.getValue(1);
5350 // If this asm returns a register value, copy the result from that register
5351 // and set it as the value of the call.
5352 if (!RetValRegs.Regs.empty()) {
5353 SDValue Val = RetValRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5356 // FIXME: Why don't we do this for inline asms with MRVs?
5357 if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
5358 MVT ResultType = TLI.getValueType(CS.getType());
5360 // If any of the results of the inline asm is a vector, it may have the
5361 // wrong width/num elts. This can happen for register classes that can
5362 // contain multiple different value types. The preg or vreg allocated may
5363 // not have the same VT as was expected. Convert it to the right type
5364 // with bit_convert.
5365 if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
5366 Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
5369 } else if (ResultType != Val.getValueType() &&
5370 ResultType.isInteger() && Val.getValueType().isInteger()) {
5371 // If a result value was tied to an input value, the computed result may
5372 // have a wider width than the expected result. Extract the relevant
5374 Val = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), ResultType, Val);
5377 assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
5380 setValue(CS.getInstruction(), Val);
5381 // Don't need to use this as a chain in this case.
5382 if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())
5386 std::vector<std::pair<SDValue, Value*> > StoresToEmit;
5388 // Process indirect outputs, first output all of the flagged copies out of
5390 for (unsigned i = 0, e = IndirectStoresToEmit.size(); i != e; ++i) {
5391 RegsForValue &OutRegs = IndirectStoresToEmit[i].first;
5392 Value *Ptr = IndirectStoresToEmit[i].second;
5393 SDValue OutVal = OutRegs.getCopyFromRegs(DAG, getCurDebugLoc(),
5395 StoresToEmit.push_back(std::make_pair(OutVal, Ptr));
5399 // Emit the non-flagged stores from the physregs.
5400 SmallVector<SDValue, 8> OutChains;
5401 for (unsigned i = 0, e = StoresToEmit.size(); i != e; ++i)
5402 OutChains.push_back(DAG.getStore(Chain, getCurDebugLoc(),
5403 StoresToEmit[i].first,
5404 getValue(StoresToEmit[i].second),
5405 StoresToEmit[i].second, 0));
5406 if (!OutChains.empty())
5407 Chain = DAG.getNode(ISD::TokenFactor, getCurDebugLoc(), MVT::Other,
5408 &OutChains[0], OutChains.size());
5413 void SelectionDAGLowering::visitMalloc(MallocInst &I) {
5414 SDValue Src = getValue(I.getOperand(0));
5416 // Scale up by the type size in the original i32 type width. Various
5417 // mid-level optimizers may make assumptions about demanded bits etc from the
5418 // i32-ness of the optimizer: we do not want to promote to i64 and then
5419 // multiply on 64-bit targets.
5420 // FIXME: Malloc inst should go away: PR715.
5421 uint64_t ElementSize = TD->getTypeAllocSize(I.getType()->getElementType());
5422 if (ElementSize != 1)
5423 Src = DAG.getNode(ISD::MUL, getCurDebugLoc(), Src.getValueType(),
5424 Src, DAG.getConstant(ElementSize, Src.getValueType()));
5426 MVT IntPtr = TLI.getPointerTy();
5428 if (IntPtr.bitsLT(Src.getValueType()))
5429 Src = DAG.getNode(ISD::TRUNCATE, getCurDebugLoc(), IntPtr, Src);
5430 else if (IntPtr.bitsGT(Src.getValueType()))
5431 Src = DAG.getNode(ISD::ZERO_EXTEND, getCurDebugLoc(), IntPtr, Src);
5433 TargetLowering::ArgListTy Args;
5434 TargetLowering::ArgListEntry Entry;
5436 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5437 Args.push_back(Entry);
5439 std::pair<SDValue,SDValue> Result =
5440 TLI.LowerCallTo(getRoot(), I.getType(), false, false, false, false,
5441 0, CallingConv::C, PerformTailCallOpt,
5442 DAG.getExternalSymbol("malloc", IntPtr),
5443 Args, DAG, getCurDebugLoc());
5444 setValue(&I, Result.first); // Pointers always fit in registers
5445 DAG.setRoot(Result.second);
5448 void SelectionDAGLowering::visitFree(FreeInst &I) {
5449 TargetLowering::ArgListTy Args;
5450 TargetLowering::ArgListEntry Entry;
5451 Entry.Node = getValue(I.getOperand(0));
5452 Entry.Ty = TLI.getTargetData()->getIntPtrType();
5453 Args.push_back(Entry);
5454 MVT IntPtr = TLI.getPointerTy();
5455 std::pair<SDValue,SDValue> Result =
5456 TLI.LowerCallTo(getRoot(), Type::VoidTy, false, false, false, false,
5457 0, CallingConv::C, PerformTailCallOpt,
5458 DAG.getExternalSymbol("free", IntPtr), Args, DAG,
5460 DAG.setRoot(Result.second);
5463 void SelectionDAGLowering::visitVAStart(CallInst &I) {
5464 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurDebugLoc(),
5465 MVT::Other, getRoot(),
5466 getValue(I.getOperand(1)),
5467 DAG.getSrcValue(I.getOperand(1))));
5470 void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
5471 SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
5472 getRoot(), getValue(I.getOperand(0)),
5473 DAG.getSrcValue(I.getOperand(0)));
5475 DAG.setRoot(V.getValue(1));
5478 void SelectionDAGLowering::visitVAEnd(CallInst &I) {
5479 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurDebugLoc(),
5480 MVT::Other, getRoot(),
5481 getValue(I.getOperand(1)),
5482 DAG.getSrcValue(I.getOperand(1))));
5485 void SelectionDAGLowering::visitVACopy(CallInst &I) {
5486 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurDebugLoc(),
5487 MVT::Other, getRoot(),
5488 getValue(I.getOperand(1)),
5489 getValue(I.getOperand(2)),
5490 DAG.getSrcValue(I.getOperand(1)),
5491 DAG.getSrcValue(I.getOperand(2))));
5494 /// TargetLowering::LowerArguments - This is the default LowerArguments
5495 /// implementation, which just inserts a FORMAL_ARGUMENTS node. FIXME: When all
5496 /// targets are migrated to using FORMAL_ARGUMENTS, this hook should be
5497 /// integrated into SDISel.
5498 void TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
5499 SmallVectorImpl<SDValue> &ArgValues,
5501 // Add CC# and isVararg as operands to the FORMAL_ARGUMENTS node.
5502 SmallVector<SDValue, 3+16> Ops;
5503 Ops.push_back(DAG.getRoot());
5504 Ops.push_back(DAG.getConstant(F.getCallingConv(), getPointerTy()));
5505 Ops.push_back(DAG.getConstant(F.isVarArg(), getPointerTy()));
5507 // Add one result value for each formal argument.
5508 SmallVector<MVT, 16> RetVals;
5510 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
5512 SmallVector<MVT, 4> ValueVTs;
5513 ComputeValueVTs(*this, I->getType(), ValueVTs);
5514 for (unsigned Value = 0, NumValues = ValueVTs.size();
5515 Value != NumValues; ++Value) {
5516 MVT VT = ValueVTs[Value];
5517 const Type *ArgTy = VT.getTypeForMVT(*DAG.getContext());
5518 ISD::ArgFlagsTy Flags;
5519 unsigned OriginalAlignment =
5520 getTargetData()->getABITypeAlignment(ArgTy);
5522 if (F.paramHasAttr(j, Attribute::ZExt))
5524 if (F.paramHasAttr(j, Attribute::SExt))
5526 if (F.paramHasAttr(j, Attribute::InReg))
5528 if (F.paramHasAttr(j, Attribute::StructRet))
5530 if (F.paramHasAttr(j, Attribute::ByVal)) {
5532 const PointerType *Ty = cast<PointerType>(I->getType());
5533 const Type *ElementTy = Ty->getElementType();
5534 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5535 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5536 // For ByVal, alignment should be passed from FE. BE will guess if
5537 // this info is not there but there are cases it cannot get right.
5538 if (F.getParamAlignment(j))
5539 FrameAlign = F.getParamAlignment(j);
5540 Flags.setByValAlign(FrameAlign);
5541 Flags.setByValSize(FrameSize);
5543 if (F.paramHasAttr(j, Attribute::Nest))
5545 Flags.setOrigAlign(OriginalAlignment);
5547 MVT RegisterVT = getRegisterType(VT);
5548 unsigned NumRegs = getNumRegisters(VT);
5549 for (unsigned i = 0; i != NumRegs; ++i) {
5550 RetVals.push_back(RegisterVT);
5551 ISD::ArgFlagsTy MyFlags = Flags;
5552 if (NumRegs > 1 && i == 0)
5554 // if it isn't first piece, alignment must be 1
5556 MyFlags.setOrigAlign(1);
5557 Ops.push_back(DAG.getArgFlags(MyFlags));
5562 RetVals.push_back(MVT::Other);
5565 SDNode *Result = DAG.getNode(ISD::FORMAL_ARGUMENTS, dl,
5566 DAG.getVTList(&RetVals[0], RetVals.size()),
5567 &Ops[0], Ops.size()).getNode();
5569 // Prelower FORMAL_ARGUMENTS. This isn't required for functionality, but
5570 // allows exposing the loads that may be part of the argument access to the
5571 // first DAGCombiner pass.
5572 SDValue TmpRes = LowerOperation(SDValue(Result, 0), DAG);
5574 // The number of results should match up, except that the lowered one may have
5575 // an extra flag result.
5576 assert((Result->getNumValues() == TmpRes.getNode()->getNumValues() ||
5577 (Result->getNumValues()+1 == TmpRes.getNode()->getNumValues() &&
5578 TmpRes.getValue(Result->getNumValues()).getValueType() == MVT::Flag))
5579 && "Lowering produced unexpected number of results!");
5581 // The FORMAL_ARGUMENTS node itself is likely no longer needed.
5582 if (Result != TmpRes.getNode() && Result->use_empty()) {
5583 HandleSDNode Dummy(DAG.getRoot());
5584 DAG.RemoveDeadNode(Result);
5587 Result = TmpRes.getNode();
5589 unsigned NumArgRegs = Result->getNumValues() - 1;
5590 DAG.setRoot(SDValue(Result, NumArgRegs));
5592 // Set up the return result vector.
5595 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
5597 SmallVector<MVT, 4> ValueVTs;
5598 ComputeValueVTs(*this, I->getType(), ValueVTs);
5599 for (unsigned Value = 0, NumValues = ValueVTs.size();
5600 Value != NumValues; ++Value) {
5601 MVT VT = ValueVTs[Value];
5602 MVT PartVT = getRegisterType(VT);
5604 unsigned NumParts = getNumRegisters(VT);
5605 SmallVector<SDValue, 4> Parts(NumParts);
5606 for (unsigned j = 0; j != NumParts; ++j)
5607 Parts[j] = SDValue(Result, i++);
5609 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5610 if (F.paramHasAttr(Idx, Attribute::SExt))
5611 AssertOp = ISD::AssertSext;
5612 else if (F.paramHasAttr(Idx, Attribute::ZExt))
5613 AssertOp = ISD::AssertZext;
5615 ArgValues.push_back(getCopyFromParts(DAG, dl, &Parts[0], NumParts,
5616 PartVT, VT, AssertOp));
5619 assert(i == NumArgRegs && "Argument register count mismatch!");
5623 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
5624 /// implementation, which just inserts an ISD::CALL node, which is later custom
5625 /// lowered by the target to something concrete. FIXME: When all targets are
5626 /// migrated to using ISD::CALL, this hook should be integrated into SDISel.
5627 std::pair<SDValue, SDValue>
5628 TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
5629 bool RetSExt, bool RetZExt, bool isVarArg,
5630 bool isInreg, unsigned NumFixedArgs,
5631 unsigned CallingConv, bool isTailCall,
5633 ArgListTy &Args, SelectionDAG &DAG, DebugLoc dl) {
5634 assert((!isTailCall || PerformTailCallOpt) &&
5635 "isTailCall set when tail-call optimizations are disabled!");
5637 SmallVector<SDValue, 32> Ops;
5638 Ops.push_back(Chain); // Op#0 - Chain
5639 Ops.push_back(Callee);
5641 // Handle all of the outgoing arguments.
5642 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
5643 SmallVector<MVT, 4> ValueVTs;
5644 ComputeValueVTs(*this, Args[i].Ty, ValueVTs);
5645 for (unsigned Value = 0, NumValues = ValueVTs.size();
5646 Value != NumValues; ++Value) {
5647 MVT VT = ValueVTs[Value];
5648 const Type *ArgTy = VT.getTypeForMVT(*DAG.getContext());
5649 SDValue Op = SDValue(Args[i].Node.getNode(),
5650 Args[i].Node.getResNo() + Value);
5651 ISD::ArgFlagsTy Flags;
5652 unsigned OriginalAlignment =
5653 getTargetData()->getABITypeAlignment(ArgTy);
5659 if (Args[i].isInReg)
5663 if (Args[i].isByVal) {
5665 const PointerType *Ty = cast<PointerType>(Args[i].Ty);
5666 const Type *ElementTy = Ty->getElementType();
5667 unsigned FrameAlign = getByValTypeAlignment(ElementTy);
5668 unsigned FrameSize = getTargetData()->getTypeAllocSize(ElementTy);
5669 // For ByVal, alignment should come from FE. BE will guess if this
5670 // info is not there but there are cases it cannot get right.
5671 if (Args[i].Alignment)
5672 FrameAlign = Args[i].Alignment;
5673 Flags.setByValAlign(FrameAlign);
5674 Flags.setByValSize(FrameSize);
5678 Flags.setOrigAlign(OriginalAlignment);
5680 MVT PartVT = getRegisterType(VT);
5681 unsigned NumParts = getNumRegisters(VT);
5682 SmallVector<SDValue, 4> Parts(NumParts);
5683 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
5686 ExtendKind = ISD::SIGN_EXTEND;
5687 else if (Args[i].isZExt)
5688 ExtendKind = ISD::ZERO_EXTEND;
5690 getCopyToParts(DAG, dl, Op, &Parts[0], NumParts, PartVT, ExtendKind);
5692 for (unsigned i = 0; i != NumParts; ++i) {
5693 // if it isn't first piece, alignment must be 1
5694 ISD::ArgFlagsTy MyFlags = Flags;
5695 if (NumParts > 1 && i == 0)
5698 MyFlags.setOrigAlign(1);
5700 Ops.push_back(Parts[i]);
5701 Ops.push_back(DAG.getArgFlags(MyFlags));
5706 // Figure out the result value types. We start by making a list of
5707 // the potentially illegal return value types.
5708 SmallVector<MVT, 4> LoweredRetTys;
5709 SmallVector<MVT, 4> RetTys;
5710 ComputeValueVTs(*this, RetTy, RetTys);
5712 // Then we translate that to a list of legal types.
5713 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5715 MVT RegisterVT = getRegisterType(VT);
5716 unsigned NumRegs = getNumRegisters(VT);
5717 for (unsigned i = 0; i != NumRegs; ++i)
5718 LoweredRetTys.push_back(RegisterVT);
5721 LoweredRetTys.push_back(MVT::Other); // Always has a chain.
5723 // Create the CALL node.
5724 SDValue Res = DAG.getCall(CallingConv, dl,
5725 isVarArg, isTailCall, isInreg,
5726 DAG.getVTList(&LoweredRetTys[0],
5727 LoweredRetTys.size()),
5728 &Ops[0], Ops.size(), NumFixedArgs
5730 Chain = Res.getValue(LoweredRetTys.size() - 1);
5732 // Gather up the call result into a single value.
5733 if (RetTy != Type::VoidTy && !RetTys.empty()) {
5734 ISD::NodeType AssertOp = ISD::DELETED_NODE;
5737 AssertOp = ISD::AssertSext;
5739 AssertOp = ISD::AssertZext;
5741 SmallVector<SDValue, 4> ReturnValues;
5743 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
5745 MVT RegisterVT = getRegisterType(VT);
5746 unsigned NumRegs = getNumRegisters(VT);
5747 unsigned RegNoEnd = NumRegs + RegNo;
5748 SmallVector<SDValue, 4> Results;
5749 for (; RegNo != RegNoEnd; ++RegNo)
5750 Results.push_back(Res.getValue(RegNo));
5751 SDValue ReturnValue =
5752 getCopyFromParts(DAG, dl, &Results[0], NumRegs, RegisterVT, VT,
5754 ReturnValues.push_back(ReturnValue);
5756 Res = DAG.getNode(ISD::MERGE_VALUES, dl,
5757 DAG.getVTList(&RetTys[0], RetTys.size()),
5758 &ReturnValues[0], ReturnValues.size());
5761 return std::make_pair(Res, Chain);
5764 void TargetLowering::LowerOperationWrapper(SDNode *N,
5765 SmallVectorImpl<SDValue> &Results,
5766 SelectionDAG &DAG) {
5767 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
5769 Results.push_back(Res);
5772 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
5773 llvm_unreachable("LowerOperation not implemented for this target!");
5778 void SelectionDAGLowering::CopyValueToVirtualRegister(Value *V, unsigned Reg) {
5779 SDValue Op = getValue(V);
5780 assert((Op.getOpcode() != ISD::CopyFromReg ||
5781 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
5782 "Copy from a reg to the same reg!");
5783 assert(!TargetRegisterInfo::isPhysicalRegister(Reg) && "Is a physreg");
5785 RegsForValue RFV(TLI, Reg, V->getType());
5786 SDValue Chain = DAG.getEntryNode();
5787 RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0);
5788 PendingExports.push_back(Chain);
5791 #include "llvm/CodeGen/SelectionDAGISel.h"
5793 void SelectionDAGISel::
5794 LowerArguments(BasicBlock *LLVMBB) {
5795 // If this is the entry block, emit arguments.
5796 Function &F = *LLVMBB->getParent();
5797 SDValue OldRoot = SDL->DAG.getRoot();
5798 SmallVector<SDValue, 16> Args;
5799 TLI.LowerArguments(F, SDL->DAG, Args, SDL->getCurDebugLoc());
5802 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end();
5804 SmallVector<MVT, 4> ValueVTs;
5805 ComputeValueVTs(TLI, AI->getType(), ValueVTs);
5806 unsigned NumValues = ValueVTs.size();
5807 if (!AI->use_empty()) {
5808 SDL->setValue(AI, SDL->DAG.getMergeValues(&Args[a], NumValues,
5809 SDL->getCurDebugLoc()));
5810 // If this argument is live outside of the entry block, insert a copy from
5811 // whereever we got it to the vreg that other BB's will reference it as.
5812 SDL->CopyToExportRegsIfNeeded(AI);
5817 // Finally, if the target has anything special to do, allow it to do so.
5818 // FIXME: this should insert code into the DAG!
5819 EmitFunctionEntryCode(F, SDL->DAG.getMachineFunction());
5822 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
5823 /// ensure constants are generated when needed. Remember the virtual registers
5824 /// that need to be added to the Machine PHI nodes as input. We cannot just
5825 /// directly add them, because expansion might result in multiple MBB's for one
5826 /// BB. As such, the start of the BB might correspond to a different MBB than
5830 SelectionDAGISel::HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB) {
5831 TerminatorInst *TI = LLVMBB->getTerminator();
5833 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5835 // Check successor nodes' PHI nodes that expect a constant to be available
5837 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5838 BasicBlock *SuccBB = TI->getSuccessor(succ);
5839 if (!isa<PHINode>(SuccBB->begin())) continue;
5840 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5842 // If this terminator has multiple identical successors (common for
5843 // switches), only handle each succ once.
5844 if (!SuccsHandled.insert(SuccMBB)) continue;
5846 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5849 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5850 // nodes and Machine PHI nodes, but the incoming operands have not been
5852 for (BasicBlock::iterator I = SuccBB->begin();
5853 (PN = dyn_cast<PHINode>(I)); ++I) {
5854 // Ignore dead phi's.
5855 if (PN->use_empty()) continue;
5858 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5860 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
5861 unsigned &RegOut = SDL->ConstantsOut[C];
5863 RegOut = FuncInfo->CreateRegForValue(C);
5864 SDL->CopyValueToVirtualRegister(C, RegOut);
5868 Reg = FuncInfo->ValueMap[PHIOp];
5870 assert(isa<AllocaInst>(PHIOp) &&
5871 FuncInfo->StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
5872 "Didn't codegen value into a register!??");
5873 Reg = FuncInfo->CreateRegForValue(PHIOp);
5874 SDL->CopyValueToVirtualRegister(PHIOp, Reg);
5878 // Remember that this register needs to added to the machine PHI node as
5879 // the input for this MBB.
5880 SmallVector<MVT, 4> ValueVTs;
5881 ComputeValueVTs(TLI, PN->getType(), ValueVTs);
5882 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
5883 MVT VT = ValueVTs[vti];
5884 unsigned NumRegisters = TLI.getNumRegisters(VT);
5885 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
5886 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
5887 Reg += NumRegisters;
5891 SDL->ConstantsOut.clear();
5894 /// This is the Fast-ISel version of HandlePHINodesInSuccessorBlocks. It only
5895 /// supports legal types, and it emits MachineInstrs directly instead of
5896 /// creating SelectionDAG nodes.
5899 SelectionDAGISel::HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB,
5901 TerminatorInst *TI = LLVMBB->getTerminator();
5903 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
5904 unsigned OrigNumPHINodesToUpdate = SDL->PHINodesToUpdate.size();
5906 // Check successor nodes' PHI nodes that expect a constant to be available
5908 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
5909 BasicBlock *SuccBB = TI->getSuccessor(succ);
5910 if (!isa<PHINode>(SuccBB->begin())) continue;
5911 MachineBasicBlock *SuccMBB = FuncInfo->MBBMap[SuccBB];
5913 // If this terminator has multiple identical successors (common for
5914 // switches), only handle each succ once.
5915 if (!SuccsHandled.insert(SuccMBB)) continue;
5917 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
5920 // At this point we know that there is a 1-1 correspondence between LLVM PHI
5921 // nodes and Machine PHI nodes, but the incoming operands have not been
5923 for (BasicBlock::iterator I = SuccBB->begin();
5924 (PN = dyn_cast<PHINode>(I)); ++I) {
5925 // Ignore dead phi's.
5926 if (PN->use_empty()) continue;
5928 // Only handle legal types. Two interesting things to note here. First,
5929 // by bailing out early, we may leave behind some dead instructions,
5930 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
5931 // own moves. Second, this check is necessary becuase FastISel doesn't
5932 // use CreateRegForValue to create registers, so it always creates
5933 // exactly one register for each non-void instruction.
5934 MVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
5935 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
5938 VT = TLI.getTypeToTransformTo(VT);
5940 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5945 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
5947 unsigned Reg = F->getRegForValue(PHIOp);
5949 SDL->PHINodesToUpdate.resize(OrigNumPHINodesToUpdate);
5952 SDL->PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));